]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.4.2-201206122153.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.4.2-201206122153.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b4a898f..830febf 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 @@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41 +PERF*
42 SCCS
43 System.map*
44 TAGS
45 @@ -80,6 +86,7 @@ aic7*seq.h*
46 aicasm
47 aicdb.h*
48 altivec*.c
49 +ashldi3.S
50 asm-offsets.h
51 asm_offsets.h
52 autoconf.h*
53 @@ -92,19 +99,24 @@ bounds.h
54 bsetup
55 btfixupprep
56 build
57 +builtin-policy.h
58 bvmlinux
59 bzImage*
60 capability_names.h
61 capflags.c
62 classlist.h*
63 +clut_vga16.c
64 +common-cmds.h
65 comp*.log
66 compile.h*
67 conf
68 config
69 config-*
70 config_data.h*
71 +config.c
72 config.mak
73 config.mak.autogen
74 +config.tmp
75 conmakehash
76 consolemap_deftbl.c*
77 cpustr.h
78 @@ -115,9 +127,11 @@ devlist.h*
79 dnotify_test
80 docproc
81 dslm
82 +dtc-lexer.lex.c
83 elf2ecoff
84 elfconfig.h*
85 evergreen_reg_safe.h
86 +exception_policy.conf
87 fixdep
88 flask.h
89 fore200e_mkfirm
90 @@ -125,12 +139,15 @@ fore200e_pca_fw.c*
91 gconf
92 gconf.glade.h
93 gen-devlist
94 +gen-kdb_cmds.c
95 gen_crc32table
96 gen_init_cpio
97 generated
98 genheaders
99 genksyms
100 *_gray256.c
101 +hash
102 +hid-example
103 hpet_example
104 hugepage-mmap
105 hugepage-shm
106 @@ -145,7 +162,7 @@ int32.c
107 int4.c
108 int8.c
109 kallsyms
110 -kconfig
111 +kern_constants.h
112 keywords.c
113 ksym.c*
114 ksym.h*
115 @@ -153,7 +170,7 @@ kxgettext
116 lkc_defs.h
117 lex.c
118 lex.*.c
119 -linux
120 +lib1funcs.S
121 logo_*.c
122 logo_*_clut224.c
123 logo_*_mono.c
124 @@ -164,14 +181,15 @@ machtypes.h
125 map
126 map_hugetlb
127 maui_boot.h
128 -media
129 mconf
130 +mdp
131 miboot*
132 mk_elfconfig
133 mkboot
134 mkbugboot
135 mkcpustr
136 mkdep
137 +mkpiggy
138 mkprep
139 mkregtable
140 mktables
141 @@ -188,6 +206,7 @@ oui.c*
142 page-types
143 parse.c
144 parse.h
145 +parse-events*
146 patches*
147 pca200e.bin
148 pca200e_ecd.bin2
149 @@ -197,6 +216,7 @@ perf-archive
150 piggyback
151 piggy.gzip
152 piggy.S
153 +pmu-*
154 pnmtologo
155 ppc_defs.h*
156 pss_boot.h
157 @@ -207,6 +227,7 @@ r300_reg_safe.h
158 r420_reg_safe.h
159 r600_reg_safe.h
160 recordmcount
161 +regdb.c
162 relocs
163 rlim_names.h
164 rn50_reg_safe.h
165 @@ -217,6 +238,7 @@ setup
166 setup.bin
167 setup.elf
168 sImage
169 +slabinfo
170 sm_tbl*
171 split-include
172 syscalltab.h
173 @@ -227,6 +249,7 @@ tftpboot.img
174 timeconst.h
175 times.h*
176 trix_boot.h
177 +user_constants.h
178 utsrelease.h*
179 vdso-syms.lds
180 vdso.lds
181 @@ -238,13 +261,17 @@ vdso32.lds
182 vdso32.so.dbg
183 vdso64.lds
184 vdso64.so.dbg
185 +vdsox32.lds
186 +vdsox32-syms.lds
187 version.h*
188 vmImage
189 vmlinux
190 vmlinux-*
191 vmlinux.aout
192 vmlinux.bin.all
193 +vmlinux.bin.bz2
194 vmlinux.lds
195 +vmlinux.relocs
196 vmlinuz
197 voffset.h
198 vsyscall.lds
199 @@ -252,9 +279,11 @@ vsyscall_32.lds
200 wanxlfw.inc
201 uImage
202 unifdef
203 +utsrelease.h
204 wakeup.bin
205 wakeup.elf
206 wakeup.lds
207 zImage*
208 zconf.hash.c
209 +zconf.lex.c
210 zoffset.h
211 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
212 index c1601e5..08557ce 100644
213 --- a/Documentation/kernel-parameters.txt
214 +++ b/Documentation/kernel-parameters.txt
215 @@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
216 the specified number of seconds. This is to be used if
217 your oopses keep scrolling off the screen.
218
219 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
220 + virtualization environments that don't cope well with the
221 + expand down segment used by UDEREF on X86-32 or the frequent
222 + page table updates on X86-64.
223 +
224 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
225 +
226 pcbit= [HW,ISDN]
227
228 pcd. [PARIDE]
229 diff --git a/Makefile b/Makefile
230 index 901a955..8277cb4 100644
231 --- a/Makefile
232 +++ b/Makefile
233 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
234
235 HOSTCC = gcc
236 HOSTCXX = g++
237 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
238 -HOSTCXXFLAGS = -O2
239 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
240 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
241 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
242
243 # Decide whether to build built-in, modular, or both.
244 # Normally, just do built-in.
245 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
246 # Rules shared between *config targets and build targets
247
248 # Basic helpers built in scripts/
249 -PHONY += scripts_basic
250 -scripts_basic:
251 +PHONY += scripts_basic gcc-plugins
252 +scripts_basic: gcc-plugins
253 $(Q)$(MAKE) $(build)=scripts/basic
254 $(Q)rm -f .tmp_quiet_recordmcount
255
256 @@ -564,6 +565,55 @@ else
257 KBUILD_CFLAGS += -O2
258 endif
259
260 +ifndef DISABLE_PAX_PLUGINS
261 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
262 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
263 +ifndef CONFIG_UML
264 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
265 +endif
266 +endif
267 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
268 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
269 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
270 +endif
271 +ifdef CONFIG_KALLOCSTAT_PLUGIN
272 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
273 +endif
274 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
275 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
276 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
277 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
278 +endif
279 +ifdef CONFIG_CHECKER_PLUGIN
280 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
281 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
282 +endif
283 +endif
284 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
285 +ifdef CONFIG_PAX_SIZE_OVERFLOW
286 +SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
287 +endif
288 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
289 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
290 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
291 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
292 +ifeq ($(KBUILD_EXTMOD),)
293 +gcc-plugins:
294 + $(Q)$(MAKE) $(build)=tools/gcc
295 +else
296 +gcc-plugins: ;
297 +endif
298 +else
299 +gcc-plugins:
300 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
301 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
302 +else
303 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
304 +endif
305 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
306 +endif
307 +endif
308 +
309 include $(srctree)/arch/$(SRCARCH)/Makefile
310
311 ifneq ($(CONFIG_FRAME_WARN),0)
312 @@ -708,7 +758,7 @@ export mod_strip_cmd
313
314
315 ifeq ($(KBUILD_EXTMOD),)
316 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
317 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
318
319 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
320 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
321 @@ -932,6 +982,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
322
323 # The actual objects are generated when descending,
324 # make sure no implicit rule kicks in
325 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
326 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
327 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
328
329 # Handle descending into subdirectories listed in $(vmlinux-dirs)
330 @@ -941,7 +993,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
331 # Error messages still appears in the original language
332
333 PHONY += $(vmlinux-dirs)
334 -$(vmlinux-dirs): prepare scripts
335 +$(vmlinux-dirs): gcc-plugins prepare scripts
336 $(Q)$(MAKE) $(build)=$@
337
338 # Store (new) KERNELRELASE string in include/config/kernel.release
339 @@ -985,6 +1037,7 @@ prepare0: archprepare FORCE
340 $(Q)$(MAKE) $(build)=.
341
342 # All the preparing..
343 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
344 prepare: prepare0
345
346 # Generate some files
347 @@ -1092,6 +1145,8 @@ all: modules
348 # using awk while concatenating to the final file.
349
350 PHONY += modules
351 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
352 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
353 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
354 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
355 @$(kecho) ' Building modules, stage 2.';
356 @@ -1107,7 +1162,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
357
358 # Target to prepare building external modules
359 PHONY += modules_prepare
360 -modules_prepare: prepare scripts
361 +modules_prepare: gcc-plugins prepare scripts
362
363 # Target to install modules
364 PHONY += modules_install
365 @@ -1204,6 +1259,7 @@ distclean: mrproper
366 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
367 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
368 -o -name '.*.rej' \
369 + -o -name '.*.rej' -o -name '*.so' \
370 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
371 -type f -print | xargs rm -f
372
373 @@ -1364,6 +1420,8 @@ PHONY += $(module-dirs) modules
374 $(module-dirs): crmodverdir $(objtree)/Module.symvers
375 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
376
377 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
378 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
379 modules: $(module-dirs)
380 @$(kecho) ' Building modules, stage 2.';
381 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
382 @@ -1490,17 +1548,21 @@ else
383 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
384 endif
385
386 -%.s: %.c prepare scripts FORCE
387 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
388 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
389 +%.s: %.c gcc-plugins prepare scripts FORCE
390 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
391 %.i: %.c prepare scripts FORCE
392 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
393 -%.o: %.c prepare scripts FORCE
394 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
395 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
396 +%.o: %.c gcc-plugins prepare scripts FORCE
397 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
398 %.lst: %.c prepare scripts FORCE
399 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
400 -%.s: %.S prepare scripts FORCE
401 +%.s: %.S gcc-plugins prepare scripts FORCE
402 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
403 -%.o: %.S prepare scripts FORCE
404 +%.o: %.S gcc-plugins prepare scripts FORCE
405 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
406 %.symtypes: %.c prepare scripts FORCE
407 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
408 @@ -1510,11 +1572,15 @@ endif
409 $(cmd_crmodverdir)
410 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
411 $(build)=$(build-dir)
412 -%/: prepare scripts FORCE
413 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
414 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
415 +%/: gcc-plugins prepare scripts FORCE
416 $(cmd_crmodverdir)
417 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
418 $(build)=$(build-dir)
419 -%.ko: prepare scripts FORCE
420 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
421 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
422 +%.ko: gcc-plugins prepare scripts FORCE
423 $(cmd_crmodverdir)
424 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
425 $(build)=$(build-dir) $(@:.ko=.o)
426 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
427 index 3bb7ffe..347a54c 100644
428 --- a/arch/alpha/include/asm/atomic.h
429 +++ b/arch/alpha/include/asm/atomic.h
430 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
431 #define atomic_dec(v) atomic_sub(1,(v))
432 #define atomic64_dec(v) atomic64_sub(1,(v))
433
434 +#define atomic64_read_unchecked(v) atomic64_read(v)
435 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
436 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
437 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
438 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
439 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
440 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
441 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
442 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
443 +
444 #define smp_mb__before_atomic_dec() smp_mb()
445 #define smp_mb__after_atomic_dec() smp_mb()
446 #define smp_mb__before_atomic_inc() smp_mb()
447 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
448 index ad368a9..fbe0f25 100644
449 --- a/arch/alpha/include/asm/cache.h
450 +++ b/arch/alpha/include/asm/cache.h
451 @@ -4,19 +4,19 @@
452 #ifndef __ARCH_ALPHA_CACHE_H
453 #define __ARCH_ALPHA_CACHE_H
454
455 +#include <linux/const.h>
456
457 /* Bytes per L1 (data) cache line. */
458 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
459 -# define L1_CACHE_BYTES 64
460 # define L1_CACHE_SHIFT 6
461 #else
462 /* Both EV4 and EV5 are write-through, read-allocate,
463 direct-mapped, physical.
464 */
465 -# define L1_CACHE_BYTES 32
466 # define L1_CACHE_SHIFT 5
467 #endif
468
469 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
470 #define SMP_CACHE_BYTES L1_CACHE_BYTES
471
472 #endif
473 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
474 index 968d999..d36b2df 100644
475 --- a/arch/alpha/include/asm/elf.h
476 +++ b/arch/alpha/include/asm/elf.h
477 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
478
479 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
480
481 +#ifdef CONFIG_PAX_ASLR
482 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
483 +
484 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
485 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
486 +#endif
487 +
488 /* $0 is set by ld.so to a pointer to a function which might be
489 registered using atexit. This provides a mean for the dynamic
490 linker to call DT_FINI functions for shared libraries that have
491 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
492 index bc2a0da..8ad11ee 100644
493 --- a/arch/alpha/include/asm/pgalloc.h
494 +++ b/arch/alpha/include/asm/pgalloc.h
495 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
496 pgd_set(pgd, pmd);
497 }
498
499 +static inline void
500 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
501 +{
502 + pgd_populate(mm, pgd, pmd);
503 +}
504 +
505 extern pgd_t *pgd_alloc(struct mm_struct *mm);
506
507 static inline void
508 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
509 index 81a4342..348b927 100644
510 --- a/arch/alpha/include/asm/pgtable.h
511 +++ b/arch/alpha/include/asm/pgtable.h
512 @@ -102,6 +102,17 @@ struct vm_area_struct;
513 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
514 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
515 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
516 +
517 +#ifdef CONFIG_PAX_PAGEEXEC
518 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
519 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
520 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
521 +#else
522 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
523 +# define PAGE_COPY_NOEXEC PAGE_COPY
524 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
525 +#endif
526 +
527 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
528
529 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
530 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
531 index 2fd00b7..cfd5069 100644
532 --- a/arch/alpha/kernel/module.c
533 +++ b/arch/alpha/kernel/module.c
534 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
535
536 /* The small sections were sorted to the end of the segment.
537 The following should definitely cover them. */
538 - gp = (u64)me->module_core + me->core_size - 0x8000;
539 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
540 got = sechdrs[me->arch.gotsecindex].sh_addr;
541
542 for (i = 0; i < n; i++) {
543 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
544 index 49ee319..9ee7d14 100644
545 --- a/arch/alpha/kernel/osf_sys.c
546 +++ b/arch/alpha/kernel/osf_sys.c
547 @@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
548 /* At this point: (!vma || addr < vma->vm_end). */
549 if (limit - len < addr)
550 return -ENOMEM;
551 - if (!vma || addr + len <= vma->vm_start)
552 + if (check_heap_stack_gap(vma, addr, len))
553 return addr;
554 addr = vma->vm_end;
555 vma = vma->vm_next;
556 @@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
557 merely specific addresses, but regions of memory -- perhaps
558 this feature should be incorporated into all ports? */
559
560 +#ifdef CONFIG_PAX_RANDMMAP
561 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
562 +#endif
563 +
564 if (addr) {
565 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
566 if (addr != (unsigned long) -ENOMEM)
567 @@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
568 }
569
570 /* Next, try allocating at TASK_UNMAPPED_BASE. */
571 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
572 - len, limit);
573 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
574 +
575 if (addr != (unsigned long) -ENOMEM)
576 return addr;
577
578 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
579 index 5eecab1..609abc0 100644
580 --- a/arch/alpha/mm/fault.c
581 +++ b/arch/alpha/mm/fault.c
582 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
583 __reload_thread(pcb);
584 }
585
586 +#ifdef CONFIG_PAX_PAGEEXEC
587 +/*
588 + * PaX: decide what to do with offenders (regs->pc = fault address)
589 + *
590 + * returns 1 when task should be killed
591 + * 2 when patched PLT trampoline was detected
592 + * 3 when unpatched PLT trampoline was detected
593 + */
594 +static int pax_handle_fetch_fault(struct pt_regs *regs)
595 +{
596 +
597 +#ifdef CONFIG_PAX_EMUPLT
598 + int err;
599 +
600 + do { /* PaX: patched PLT emulation #1 */
601 + unsigned int ldah, ldq, jmp;
602 +
603 + err = get_user(ldah, (unsigned int *)regs->pc);
604 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
605 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
606 +
607 + if (err)
608 + break;
609 +
610 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
611 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
612 + jmp == 0x6BFB0000U)
613 + {
614 + unsigned long r27, addr;
615 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
616 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
617 +
618 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
619 + err = get_user(r27, (unsigned long *)addr);
620 + if (err)
621 + break;
622 +
623 + regs->r27 = r27;
624 + regs->pc = r27;
625 + return 2;
626 + }
627 + } while (0);
628 +
629 + do { /* PaX: patched PLT emulation #2 */
630 + unsigned int ldah, lda, br;
631 +
632 + err = get_user(ldah, (unsigned int *)regs->pc);
633 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
634 + err |= get_user(br, (unsigned int *)(regs->pc+8));
635 +
636 + if (err)
637 + break;
638 +
639 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
640 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
641 + (br & 0xFFE00000U) == 0xC3E00000U)
642 + {
643 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
644 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
645 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
646 +
647 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
648 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
649 + return 2;
650 + }
651 + } while (0);
652 +
653 + do { /* PaX: unpatched PLT emulation */
654 + unsigned int br;
655 +
656 + err = get_user(br, (unsigned int *)regs->pc);
657 +
658 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
659 + unsigned int br2, ldq, nop, jmp;
660 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
661 +
662 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
663 + err = get_user(br2, (unsigned int *)addr);
664 + err |= get_user(ldq, (unsigned int *)(addr+4));
665 + err |= get_user(nop, (unsigned int *)(addr+8));
666 + err |= get_user(jmp, (unsigned int *)(addr+12));
667 + err |= get_user(resolver, (unsigned long *)(addr+16));
668 +
669 + if (err)
670 + break;
671 +
672 + if (br2 == 0xC3600000U &&
673 + ldq == 0xA77B000CU &&
674 + nop == 0x47FF041FU &&
675 + jmp == 0x6B7B0000U)
676 + {
677 + regs->r28 = regs->pc+4;
678 + regs->r27 = addr+16;
679 + regs->pc = resolver;
680 + return 3;
681 + }
682 + }
683 + } while (0);
684 +#endif
685 +
686 + return 1;
687 +}
688 +
689 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
690 +{
691 + unsigned long i;
692 +
693 + printk(KERN_ERR "PAX: bytes at PC: ");
694 + for (i = 0; i < 5; i++) {
695 + unsigned int c;
696 + if (get_user(c, (unsigned int *)pc+i))
697 + printk(KERN_CONT "???????? ");
698 + else
699 + printk(KERN_CONT "%08x ", c);
700 + }
701 + printk("\n");
702 +}
703 +#endif
704
705 /*
706 * This routine handles page faults. It determines the address,
707 @@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
708 good_area:
709 si_code = SEGV_ACCERR;
710 if (cause < 0) {
711 - if (!(vma->vm_flags & VM_EXEC))
712 + if (!(vma->vm_flags & VM_EXEC)) {
713 +
714 +#ifdef CONFIG_PAX_PAGEEXEC
715 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
716 + goto bad_area;
717 +
718 + up_read(&mm->mmap_sem);
719 + switch (pax_handle_fetch_fault(regs)) {
720 +
721 +#ifdef CONFIG_PAX_EMUPLT
722 + case 2:
723 + case 3:
724 + return;
725 +#endif
726 +
727 + }
728 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
729 + do_group_exit(SIGKILL);
730 +#else
731 goto bad_area;
732 +#endif
733 +
734 + }
735 } else if (!cause) {
736 /* Allow reads even for write-only mappings */
737 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
738 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
739 index 68374ba..cff7196 100644
740 --- a/arch/arm/include/asm/atomic.h
741 +++ b/arch/arm/include/asm/atomic.h
742 @@ -17,17 +17,35 @@
743 #include <asm/barrier.h>
744 #include <asm/cmpxchg.h>
745
746 +#ifdef CONFIG_GENERIC_ATOMIC64
747 +#include <asm-generic/atomic64.h>
748 +#endif
749 +
750 #define ATOMIC_INIT(i) { (i) }
751
752 #ifdef __KERNEL__
753
754 +#define _ASM_EXTABLE(from, to) \
755 +" .pushsection __ex_table,\"a\"\n"\
756 +" .align 3\n" \
757 +" .long " #from ", " #to"\n" \
758 +" .popsection"
759 +
760 /*
761 * On ARM, ordinary assignment (str instruction) doesn't clear the local
762 * strex/ldrex monitor on some implementations. The reason we can use it for
763 * atomic_set() is the clrex or dummy strex done on every exception return.
764 */
765 #define atomic_read(v) (*(volatile int *)&(v)->counter)
766 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
767 +{
768 + return v->counter;
769 +}
770 #define atomic_set(v,i) (((v)->counter) = (i))
771 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
772 +{
773 + v->counter = i;
774 +}
775
776 #if __LINUX_ARM_ARCH__ >= 6
777
778 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
779 int result;
780
781 __asm__ __volatile__("@ atomic_add\n"
782 +"1: ldrex %1, [%3]\n"
783 +" adds %0, %1, %4\n"
784 +
785 +#ifdef CONFIG_PAX_REFCOUNT
786 +" bvc 3f\n"
787 +"2: bkpt 0xf103\n"
788 +"3:\n"
789 +#endif
790 +
791 +" strex %1, %0, [%3]\n"
792 +" teq %1, #0\n"
793 +" bne 1b"
794 +
795 +#ifdef CONFIG_PAX_REFCOUNT
796 +"\n4:\n"
797 + _ASM_EXTABLE(2b, 4b)
798 +#endif
799 +
800 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
801 + : "r" (&v->counter), "Ir" (i)
802 + : "cc");
803 +}
804 +
805 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
806 +{
807 + unsigned long tmp;
808 + int result;
809 +
810 + __asm__ __volatile__("@ atomic_add_unchecked\n"
811 "1: ldrex %0, [%3]\n"
812 " add %0, %0, %4\n"
813 " strex %1, %0, [%3]\n"
814 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
815 smp_mb();
816
817 __asm__ __volatile__("@ atomic_add_return\n"
818 +"1: ldrex %1, [%3]\n"
819 +" adds %0, %1, %4\n"
820 +
821 +#ifdef CONFIG_PAX_REFCOUNT
822 +" bvc 3f\n"
823 +" mov %0, %1\n"
824 +"2: bkpt 0xf103\n"
825 +"3:\n"
826 +#endif
827 +
828 +" strex %1, %0, [%3]\n"
829 +" teq %1, #0\n"
830 +" bne 1b"
831 +
832 +#ifdef CONFIG_PAX_REFCOUNT
833 +"\n4:\n"
834 + _ASM_EXTABLE(2b, 4b)
835 +#endif
836 +
837 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
838 + : "r" (&v->counter), "Ir" (i)
839 + : "cc");
840 +
841 + smp_mb();
842 +
843 + return result;
844 +}
845 +
846 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
847 +{
848 + unsigned long tmp;
849 + int result;
850 +
851 + smp_mb();
852 +
853 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
854 "1: ldrex %0, [%3]\n"
855 " add %0, %0, %4\n"
856 " strex %1, %0, [%3]\n"
857 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
858 int result;
859
860 __asm__ __volatile__("@ atomic_sub\n"
861 +"1: ldrex %1, [%3]\n"
862 +" subs %0, %1, %4\n"
863 +
864 +#ifdef CONFIG_PAX_REFCOUNT
865 +" bvc 3f\n"
866 +"2: bkpt 0xf103\n"
867 +"3:\n"
868 +#endif
869 +
870 +" strex %1, %0, [%3]\n"
871 +" teq %1, #0\n"
872 +" bne 1b"
873 +
874 +#ifdef CONFIG_PAX_REFCOUNT
875 +"\n4:\n"
876 + _ASM_EXTABLE(2b, 4b)
877 +#endif
878 +
879 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
880 + : "r" (&v->counter), "Ir" (i)
881 + : "cc");
882 +}
883 +
884 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
885 +{
886 + unsigned long tmp;
887 + int result;
888 +
889 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
890 "1: ldrex %0, [%3]\n"
891 " sub %0, %0, %4\n"
892 " strex %1, %0, [%3]\n"
893 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
894 smp_mb();
895
896 __asm__ __volatile__("@ atomic_sub_return\n"
897 -"1: ldrex %0, [%3]\n"
898 -" sub %0, %0, %4\n"
899 +"1: ldrex %1, [%3]\n"
900 +" sub %0, %1, %4\n"
901 +
902 +#ifdef CONFIG_PAX_REFCOUNT
903 +" bvc 3f\n"
904 +" mov %0, %1\n"
905 +"2: bkpt 0xf103\n"
906 +"3:\n"
907 +#endif
908 +
909 " strex %1, %0, [%3]\n"
910 " teq %1, #0\n"
911 " bne 1b"
912 +
913 +#ifdef CONFIG_PAX_REFCOUNT
914 +"\n4:\n"
915 + _ASM_EXTABLE(2b, 4b)
916 +#endif
917 +
918 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
919 : "r" (&v->counter), "Ir" (i)
920 : "cc");
921 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
922 return oldval;
923 }
924
925 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
926 +{
927 + unsigned long oldval, res;
928 +
929 + smp_mb();
930 +
931 + do {
932 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
933 + "ldrex %1, [%3]\n"
934 + "mov %0, #0\n"
935 + "teq %1, %4\n"
936 + "strexeq %0, %5, [%3]\n"
937 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
938 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
939 + : "cc");
940 + } while (res);
941 +
942 + smp_mb();
943 +
944 + return oldval;
945 +}
946 +
947 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
948 {
949 unsigned long tmp, tmp2;
950 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
951
952 return val;
953 }
954 +
955 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
956 +{
957 + return atomic_add_return(i, v);
958 +}
959 +
960 #define atomic_add(i, v) (void) atomic_add_return(i, v)
961 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
962 +{
963 + (void) atomic_add_return(i, v);
964 +}
965
966 static inline int atomic_sub_return(int i, atomic_t *v)
967 {
968 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
969 return val;
970 }
971 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
972 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
973 +{
974 + (void) atomic_sub_return(i, v);
975 +}
976
977 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
978 {
979 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
980 return ret;
981 }
982
983 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
984 +{
985 + return atomic_cmpxchg(v, old, new);
986 +}
987 +
988 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
989 {
990 unsigned long flags;
991 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
992 #endif /* __LINUX_ARM_ARCH__ */
993
994 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
995 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
996 +{
997 + return xchg(&v->counter, new);
998 +}
999
1000 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1001 {
1002 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1003 }
1004
1005 #define atomic_inc(v) atomic_add(1, v)
1006 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1007 +{
1008 + atomic_add_unchecked(1, v);
1009 +}
1010 #define atomic_dec(v) atomic_sub(1, v)
1011 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1012 +{
1013 + atomic_sub_unchecked(1, v);
1014 +}
1015
1016 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1017 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1018 +{
1019 + return atomic_add_return_unchecked(1, v) == 0;
1020 +}
1021 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1022 #define atomic_inc_return(v) (atomic_add_return(1, v))
1023 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1024 +{
1025 + return atomic_add_return_unchecked(1, v);
1026 +}
1027 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1028 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1029
1030 @@ -241,6 +428,14 @@ typedef struct {
1031 u64 __aligned(8) counter;
1032 } atomic64_t;
1033
1034 +#ifdef CONFIG_PAX_REFCOUNT
1035 +typedef struct {
1036 + u64 __aligned(8) counter;
1037 +} atomic64_unchecked_t;
1038 +#else
1039 +typedef atomic64_t atomic64_unchecked_t;
1040 +#endif
1041 +
1042 #define ATOMIC64_INIT(i) { (i) }
1043
1044 static inline u64 atomic64_read(atomic64_t *v)
1045 @@ -256,6 +451,19 @@ static inline u64 atomic64_read(atomic64_t *v)
1046 return result;
1047 }
1048
1049 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1050 +{
1051 + u64 result;
1052 +
1053 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1054 +" ldrexd %0, %H0, [%1]"
1055 + : "=&r" (result)
1056 + : "r" (&v->counter), "Qo" (v->counter)
1057 + );
1058 +
1059 + return result;
1060 +}
1061 +
1062 static inline void atomic64_set(atomic64_t *v, u64 i)
1063 {
1064 u64 tmp;
1065 @@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1066 : "cc");
1067 }
1068
1069 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1070 +{
1071 + u64 tmp;
1072 +
1073 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1074 +"1: ldrexd %0, %H0, [%2]\n"
1075 +" strexd %0, %3, %H3, [%2]\n"
1076 +" teq %0, #0\n"
1077 +" bne 1b"
1078 + : "=&r" (tmp), "=Qo" (v->counter)
1079 + : "r" (&v->counter), "r" (i)
1080 + : "cc");
1081 +}
1082 +
1083 static inline void atomic64_add(u64 i, atomic64_t *v)
1084 {
1085 u64 result;
1086 @@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1087 __asm__ __volatile__("@ atomic64_add\n"
1088 "1: ldrexd %0, %H0, [%3]\n"
1089 " adds %0, %0, %4\n"
1090 +" adcs %H0, %H0, %H4\n"
1091 +
1092 +#ifdef CONFIG_PAX_REFCOUNT
1093 +" bvc 3f\n"
1094 +"2: bkpt 0xf103\n"
1095 +"3:\n"
1096 +#endif
1097 +
1098 +" strexd %1, %0, %H0, [%3]\n"
1099 +" teq %1, #0\n"
1100 +" bne 1b"
1101 +
1102 +#ifdef CONFIG_PAX_REFCOUNT
1103 +"\n4:\n"
1104 + _ASM_EXTABLE(2b, 4b)
1105 +#endif
1106 +
1107 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1108 + : "r" (&v->counter), "r" (i)
1109 + : "cc");
1110 +}
1111 +
1112 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1113 +{
1114 + u64 result;
1115 + unsigned long tmp;
1116 +
1117 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1118 +"1: ldrexd %0, %H0, [%3]\n"
1119 +" adds %0, %0, %4\n"
1120 " adc %H0, %H0, %H4\n"
1121 " strexd %1, %0, %H0, [%3]\n"
1122 " teq %1, #0\n"
1123 @@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1124
1125 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1126 {
1127 - u64 result;
1128 - unsigned long tmp;
1129 + u64 result, tmp;
1130
1131 smp_mb();
1132
1133 __asm__ __volatile__("@ atomic64_add_return\n"
1134 +"1: ldrexd %1, %H1, [%3]\n"
1135 +" adds %0, %1, %4\n"
1136 +" adcs %H0, %H1, %H4\n"
1137 +
1138 +#ifdef CONFIG_PAX_REFCOUNT
1139 +" bvc 3f\n"
1140 +" mov %0, %1\n"
1141 +" mov %H0, %H1\n"
1142 +"2: bkpt 0xf103\n"
1143 +"3:\n"
1144 +#endif
1145 +
1146 +" strexd %1, %0, %H0, [%3]\n"
1147 +" teq %1, #0\n"
1148 +" bne 1b"
1149 +
1150 +#ifdef CONFIG_PAX_REFCOUNT
1151 +"\n4:\n"
1152 + _ASM_EXTABLE(2b, 4b)
1153 +#endif
1154 +
1155 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1156 + : "r" (&v->counter), "r" (i)
1157 + : "cc");
1158 +
1159 + smp_mb();
1160 +
1161 + return result;
1162 +}
1163 +
1164 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1165 +{
1166 + u64 result;
1167 + unsigned long tmp;
1168 +
1169 + smp_mb();
1170 +
1171 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1172 "1: ldrexd %0, %H0, [%3]\n"
1173 " adds %0, %0, %4\n"
1174 " adc %H0, %H0, %H4\n"
1175 @@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1176 __asm__ __volatile__("@ atomic64_sub\n"
1177 "1: ldrexd %0, %H0, [%3]\n"
1178 " subs %0, %0, %4\n"
1179 +" sbcs %H0, %H0, %H4\n"
1180 +
1181 +#ifdef CONFIG_PAX_REFCOUNT
1182 +" bvc 3f\n"
1183 +"2: bkpt 0xf103\n"
1184 +"3:\n"
1185 +#endif
1186 +
1187 +" strexd %1, %0, %H0, [%3]\n"
1188 +" teq %1, #0\n"
1189 +" bne 1b"
1190 +
1191 +#ifdef CONFIG_PAX_REFCOUNT
1192 +"\n4:\n"
1193 + _ASM_EXTABLE(2b, 4b)
1194 +#endif
1195 +
1196 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1197 + : "r" (&v->counter), "r" (i)
1198 + : "cc");
1199 +}
1200 +
1201 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1202 +{
1203 + u64 result;
1204 + unsigned long tmp;
1205 +
1206 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1207 +"1: ldrexd %0, %H0, [%3]\n"
1208 +" subs %0, %0, %4\n"
1209 " sbc %H0, %H0, %H4\n"
1210 " strexd %1, %0, %H0, [%3]\n"
1211 " teq %1, #0\n"
1212 @@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1213
1214 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1215 {
1216 - u64 result;
1217 - unsigned long tmp;
1218 + u64 result, tmp;
1219
1220 smp_mb();
1221
1222 __asm__ __volatile__("@ atomic64_sub_return\n"
1223 -"1: ldrexd %0, %H0, [%3]\n"
1224 -" subs %0, %0, %4\n"
1225 -" sbc %H0, %H0, %H4\n"
1226 +"1: ldrexd %1, %H1, [%3]\n"
1227 +" subs %0, %1, %4\n"
1228 +" sbc %H0, %H1, %H4\n"
1229 +
1230 +#ifdef CONFIG_PAX_REFCOUNT
1231 +" bvc 3f\n"
1232 +" mov %0, %1\n"
1233 +" mov %H0, %H1\n"
1234 +"2: bkpt 0xf103\n"
1235 +"3:\n"
1236 +#endif
1237 +
1238 " strexd %1, %0, %H0, [%3]\n"
1239 " teq %1, #0\n"
1240 " bne 1b"
1241 +
1242 +#ifdef CONFIG_PAX_REFCOUNT
1243 +"\n4:\n"
1244 + _ASM_EXTABLE(2b, 4b)
1245 +#endif
1246 +
1247 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1248 : "r" (&v->counter), "r" (i)
1249 : "cc");
1250 @@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1251 return oldval;
1252 }
1253
1254 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1255 +{
1256 + u64 oldval;
1257 + unsigned long res;
1258 +
1259 + smp_mb();
1260 +
1261 + do {
1262 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1263 + "ldrexd %1, %H1, [%3]\n"
1264 + "mov %0, #0\n"
1265 + "teq %1, %4\n"
1266 + "teqeq %H1, %H4\n"
1267 + "strexdeq %0, %5, %H5, [%3]"
1268 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1269 + : "r" (&ptr->counter), "r" (old), "r" (new)
1270 + : "cc");
1271 + } while (res);
1272 +
1273 + smp_mb();
1274 +
1275 + return oldval;
1276 +}
1277 +
1278 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1279 {
1280 u64 result;
1281 @@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1282
1283 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1284 {
1285 - u64 result;
1286 - unsigned long tmp;
1287 + u64 result, tmp;
1288
1289 smp_mb();
1290
1291 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1292 -"1: ldrexd %0, %H0, [%3]\n"
1293 -" subs %0, %0, #1\n"
1294 -" sbc %H0, %H0, #0\n"
1295 +"1: ldrexd %1, %H1, [%3]\n"
1296 +" subs %0, %1, #1\n"
1297 +" sbc %H0, %H1, #0\n"
1298 +
1299 +#ifdef CONFIG_PAX_REFCOUNT
1300 +" bvc 3f\n"
1301 +" mov %0, %1\n"
1302 +" mov %H0, %H1\n"
1303 +"2: bkpt 0xf103\n"
1304 +"3:\n"
1305 +#endif
1306 +
1307 " teq %H0, #0\n"
1308 -" bmi 2f\n"
1309 +" bmi 4f\n"
1310 " strexd %1, %0, %H0, [%3]\n"
1311 " teq %1, #0\n"
1312 " bne 1b\n"
1313 -"2:"
1314 +"4:\n"
1315 +
1316 +#ifdef CONFIG_PAX_REFCOUNT
1317 + _ASM_EXTABLE(2b, 4b)
1318 +#endif
1319 +
1320 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1321 : "r" (&v->counter)
1322 : "cc");
1323 @@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1324 " teq %0, %5\n"
1325 " teqeq %H0, %H5\n"
1326 " moveq %1, #0\n"
1327 -" beq 2f\n"
1328 +" beq 4f\n"
1329 " adds %0, %0, %6\n"
1330 " adc %H0, %H0, %H6\n"
1331 +
1332 +#ifdef CONFIG_PAX_REFCOUNT
1333 +" bvc 3f\n"
1334 +"2: bkpt 0xf103\n"
1335 +"3:\n"
1336 +#endif
1337 +
1338 " strexd %2, %0, %H0, [%4]\n"
1339 " teq %2, #0\n"
1340 " bne 1b\n"
1341 -"2:"
1342 +"4:\n"
1343 +
1344 +#ifdef CONFIG_PAX_REFCOUNT
1345 + _ASM_EXTABLE(2b, 4b)
1346 +#endif
1347 +
1348 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1349 : "r" (&v->counter), "r" (u), "r" (a)
1350 : "cc");
1351 @@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1352
1353 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1354 #define atomic64_inc(v) atomic64_add(1LL, (v))
1355 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1356 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1357 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1358 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1359 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1360 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1361 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1362 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1363 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1364 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1365 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1366 index 75fe66b..2255c86 100644
1367 --- a/arch/arm/include/asm/cache.h
1368 +++ b/arch/arm/include/asm/cache.h
1369 @@ -4,8 +4,10 @@
1370 #ifndef __ASMARM_CACHE_H
1371 #define __ASMARM_CACHE_H
1372
1373 +#include <linux/const.h>
1374 +
1375 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1376 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1377 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1378
1379 /*
1380 * Memory returned by kmalloc() may be used for DMA, so we must make
1381 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1382 index 1252a26..9dc17b5 100644
1383 --- a/arch/arm/include/asm/cacheflush.h
1384 +++ b/arch/arm/include/asm/cacheflush.h
1385 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1386 void (*dma_unmap_area)(const void *, size_t, int);
1387
1388 void (*dma_flush_range)(const void *, const void *);
1389 -};
1390 +} __no_const;
1391
1392 /*
1393 * Select the calling method
1394 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1395 index d41d7cb..9bea5e0 100644
1396 --- a/arch/arm/include/asm/cmpxchg.h
1397 +++ b/arch/arm/include/asm/cmpxchg.h
1398 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1399
1400 #define xchg(ptr,x) \
1401 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1402 +#define xchg_unchecked(ptr,x) \
1403 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1404
1405 #include <asm-generic/cmpxchg-local.h>
1406
1407 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1408 index 38050b1..9d90e8b 100644
1409 --- a/arch/arm/include/asm/elf.h
1410 +++ b/arch/arm/include/asm/elf.h
1411 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1412 the loader. We need to make sure that it is out of the way of the program
1413 that it will "exec", and that there is sufficient room for the brk. */
1414
1415 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1416 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1417 +
1418 +#ifdef CONFIG_PAX_ASLR
1419 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1420 +
1421 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1422 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1423 +#endif
1424
1425 /* When the program starts, a1 contains a pointer to a function to be
1426 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1427 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1428 extern void elf_set_personality(const struct elf32_hdr *);
1429 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1430
1431 -struct mm_struct;
1432 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1433 -#define arch_randomize_brk arch_randomize_brk
1434 -
1435 #endif
1436 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1437 index e51b1e8..32a3113 100644
1438 --- a/arch/arm/include/asm/kmap_types.h
1439 +++ b/arch/arm/include/asm/kmap_types.h
1440 @@ -21,6 +21,7 @@ enum km_type {
1441 KM_L1_CACHE,
1442 KM_L2_CACHE,
1443 KM_KDB,
1444 + KM_CLEARPAGE,
1445 KM_TYPE_NR
1446 };
1447
1448 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1449 index 53426c6..c7baff3 100644
1450 --- a/arch/arm/include/asm/outercache.h
1451 +++ b/arch/arm/include/asm/outercache.h
1452 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1453 #endif
1454 void (*set_debug)(unsigned long);
1455 void (*resume)(void);
1456 -};
1457 +} __no_const;
1458
1459 #ifdef CONFIG_OUTER_CACHE
1460
1461 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1462 index 5838361..da6e813 100644
1463 --- a/arch/arm/include/asm/page.h
1464 +++ b/arch/arm/include/asm/page.h
1465 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1466 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1467 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1468 unsigned long vaddr, struct vm_area_struct *vma);
1469 -};
1470 +} __no_const;
1471
1472 #ifdef MULTI_USER
1473 extern struct cpu_user_fns cpu_user;
1474 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1475 index 943504f..bf8d667 100644
1476 --- a/arch/arm/include/asm/pgalloc.h
1477 +++ b/arch/arm/include/asm/pgalloc.h
1478 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1479 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1480 }
1481
1482 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1483 +{
1484 + pud_populate(mm, pud, pmd);
1485 +}
1486 +
1487 #else /* !CONFIG_ARM_LPAE */
1488
1489 /*
1490 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1491 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1492 #define pmd_free(mm, pmd) do { } while (0)
1493 #define pud_populate(mm,pmd,pte) BUG()
1494 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1495
1496 #endif /* CONFIG_ARM_LPAE */
1497
1498 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1499 index 0f04d84..2be5648 100644
1500 --- a/arch/arm/include/asm/thread_info.h
1501 +++ b/arch/arm/include/asm/thread_info.h
1502 @@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1503 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1504 #define TIF_SYSCALL_TRACE 8
1505 #define TIF_SYSCALL_AUDIT 9
1506 +
1507 +/* within 8 bits of TIF_SYSCALL_TRACE
1508 + to meet flexible second operand requirements
1509 +*/
1510 +#define TIF_GRSEC_SETXID 10
1511 +
1512 #define TIF_POLLING_NRFLAG 16
1513 #define TIF_USING_IWMMXT 17
1514 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1515 @@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1516 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1517 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1518 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1519 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1520
1521 /* Checks for any syscall work in entry-common.S */
1522 -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1523 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1524 + _TIF_GRSEC_SETXID)
1525
1526 /*
1527 * Change these and you break ASM code in entry-common.S
1528 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1529 index 71f6536..602f279 100644
1530 --- a/arch/arm/include/asm/uaccess.h
1531 +++ b/arch/arm/include/asm/uaccess.h
1532 @@ -22,6 +22,8 @@
1533 #define VERIFY_READ 0
1534 #define VERIFY_WRITE 1
1535
1536 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1537 +
1538 /*
1539 * The exception table consists of pairs of addresses: the first is the
1540 * address of an instruction that is allowed to fault, and the second is
1541 @@ -387,8 +389,23 @@ do { \
1542
1543
1544 #ifdef CONFIG_MMU
1545 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1546 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1547 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1548 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1549 +
1550 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1551 +{
1552 + if (!__builtin_constant_p(n))
1553 + check_object_size(to, n, false);
1554 + return ___copy_from_user(to, from, n);
1555 +}
1556 +
1557 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1558 +{
1559 + if (!__builtin_constant_p(n))
1560 + check_object_size(from, n, true);
1561 + return ___copy_to_user(to, from, n);
1562 +}
1563 +
1564 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1565 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1566 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1567 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1568
1569 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1570 {
1571 + if ((long)n < 0)
1572 + return n;
1573 +
1574 if (access_ok(VERIFY_READ, from, n))
1575 n = __copy_from_user(to, from, n);
1576 else /* security hole - plug it */
1577 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1578
1579 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1580 {
1581 + if ((long)n < 0)
1582 + return n;
1583 +
1584 if (access_ok(VERIFY_WRITE, to, n))
1585 n = __copy_to_user(to, from, n);
1586 return n;
1587 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1588 index b57c75e..ed2d6b2 100644
1589 --- a/arch/arm/kernel/armksyms.c
1590 +++ b/arch/arm/kernel/armksyms.c
1591 @@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1592 #ifdef CONFIG_MMU
1593 EXPORT_SYMBOL(copy_page);
1594
1595 -EXPORT_SYMBOL(__copy_from_user);
1596 -EXPORT_SYMBOL(__copy_to_user);
1597 +EXPORT_SYMBOL(___copy_from_user);
1598 +EXPORT_SYMBOL(___copy_to_user);
1599 EXPORT_SYMBOL(__clear_user);
1600
1601 EXPORT_SYMBOL(__get_user_1);
1602 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1603 index 2b7b017..c380fa2 100644
1604 --- a/arch/arm/kernel/process.c
1605 +++ b/arch/arm/kernel/process.c
1606 @@ -28,7 +28,6 @@
1607 #include <linux/tick.h>
1608 #include <linux/utsname.h>
1609 #include <linux/uaccess.h>
1610 -#include <linux/random.h>
1611 #include <linux/hw_breakpoint.h>
1612 #include <linux/cpuidle.h>
1613
1614 @@ -275,9 +274,10 @@ void machine_power_off(void)
1615 machine_shutdown();
1616 if (pm_power_off)
1617 pm_power_off();
1618 + BUG();
1619 }
1620
1621 -void machine_restart(char *cmd)
1622 +__noreturn void machine_restart(char *cmd)
1623 {
1624 machine_shutdown();
1625
1626 @@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_struct *p)
1627 return 0;
1628 }
1629
1630 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1631 -{
1632 - unsigned long range_end = mm->brk + 0x02000000;
1633 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1634 -}
1635 -
1636 #ifdef CONFIG_MMU
1637 /*
1638 * The vectors page is always readable from user space for the
1639 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1640 index 9650c14..ae30cdd 100644
1641 --- a/arch/arm/kernel/ptrace.c
1642 +++ b/arch/arm/kernel/ptrace.c
1643 @@ -906,10 +906,19 @@ long arch_ptrace(struct task_struct *child, long request,
1644 return ret;
1645 }
1646
1647 +#ifdef CONFIG_GRKERNSEC_SETXID
1648 +extern void gr_delayed_cred_worker(void);
1649 +#endif
1650 +
1651 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1652 {
1653 unsigned long ip;
1654
1655 +#ifdef CONFIG_GRKERNSEC_SETXID
1656 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1657 + gr_delayed_cred_worker();
1658 +#endif
1659 +
1660 if (why)
1661 audit_syscall_exit(regs);
1662 else
1663 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1664 index ebfac78..cbea9c0 100644
1665 --- a/arch/arm/kernel/setup.c
1666 +++ b/arch/arm/kernel/setup.c
1667 @@ -111,13 +111,13 @@ struct processor processor __read_mostly;
1668 struct cpu_tlb_fns cpu_tlb __read_mostly;
1669 #endif
1670 #ifdef MULTI_USER
1671 -struct cpu_user_fns cpu_user __read_mostly;
1672 +struct cpu_user_fns cpu_user __read_only;
1673 #endif
1674 #ifdef MULTI_CACHE
1675 -struct cpu_cache_fns cpu_cache __read_mostly;
1676 +struct cpu_cache_fns cpu_cache __read_only;
1677 #endif
1678 #ifdef CONFIG_OUTER_CACHE
1679 -struct outer_cache_fns outer_cache __read_mostly;
1680 +struct outer_cache_fns outer_cache __read_only;
1681 EXPORT_SYMBOL(outer_cache);
1682 #endif
1683
1684 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1685 index 63d402f..db1d714 100644
1686 --- a/arch/arm/kernel/traps.c
1687 +++ b/arch/arm/kernel/traps.c
1688 @@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1689
1690 static DEFINE_RAW_SPINLOCK(die_lock);
1691
1692 +extern void gr_handle_kernel_exploit(void);
1693 +
1694 /*
1695 * This function is protected against re-entrancy.
1696 */
1697 @@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1698 panic("Fatal exception in interrupt");
1699 if (panic_on_oops)
1700 panic("Fatal exception");
1701 +
1702 + gr_handle_kernel_exploit();
1703 +
1704 if (ret != NOTIFY_STOP)
1705 do_exit(SIGSEGV);
1706 }
1707 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1708 index 66a477a..bee61d3 100644
1709 --- a/arch/arm/lib/copy_from_user.S
1710 +++ b/arch/arm/lib/copy_from_user.S
1711 @@ -16,7 +16,7 @@
1712 /*
1713 * Prototype:
1714 *
1715 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1716 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1717 *
1718 * Purpose:
1719 *
1720 @@ -84,11 +84,11 @@
1721
1722 .text
1723
1724 -ENTRY(__copy_from_user)
1725 +ENTRY(___copy_from_user)
1726
1727 #include "copy_template.S"
1728
1729 -ENDPROC(__copy_from_user)
1730 +ENDPROC(___copy_from_user)
1731
1732 .pushsection .fixup,"ax"
1733 .align 0
1734 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1735 index 6ee2f67..d1cce76 100644
1736 --- a/arch/arm/lib/copy_page.S
1737 +++ b/arch/arm/lib/copy_page.S
1738 @@ -10,6 +10,7 @@
1739 * ASM optimised string functions
1740 */
1741 #include <linux/linkage.h>
1742 +#include <linux/const.h>
1743 #include <asm/assembler.h>
1744 #include <asm/asm-offsets.h>
1745 #include <asm/cache.h>
1746 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1747 index d066df6..df28194 100644
1748 --- a/arch/arm/lib/copy_to_user.S
1749 +++ b/arch/arm/lib/copy_to_user.S
1750 @@ -16,7 +16,7 @@
1751 /*
1752 * Prototype:
1753 *
1754 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1755 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1756 *
1757 * Purpose:
1758 *
1759 @@ -88,11 +88,11 @@
1760 .text
1761
1762 ENTRY(__copy_to_user_std)
1763 -WEAK(__copy_to_user)
1764 +WEAK(___copy_to_user)
1765
1766 #include "copy_template.S"
1767
1768 -ENDPROC(__copy_to_user)
1769 +ENDPROC(___copy_to_user)
1770 ENDPROC(__copy_to_user_std)
1771
1772 .pushsection .fixup,"ax"
1773 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1774 index 5c908b1..e712687 100644
1775 --- a/arch/arm/lib/uaccess.S
1776 +++ b/arch/arm/lib/uaccess.S
1777 @@ -20,7 +20,7 @@
1778
1779 #define PAGE_SHIFT 12
1780
1781 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1782 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1783 * Purpose : copy a block to user memory from kernel memory
1784 * Params : to - user memory
1785 * : from - kernel memory
1786 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1787 sub r2, r2, ip
1788 b .Lc2u_dest_aligned
1789
1790 -ENTRY(__copy_to_user)
1791 +ENTRY(___copy_to_user)
1792 stmfd sp!, {r2, r4 - r7, lr}
1793 cmp r2, #4
1794 blt .Lc2u_not_enough
1795 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1796 ldrgtb r3, [r1], #0
1797 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1798 b .Lc2u_finished
1799 -ENDPROC(__copy_to_user)
1800 +ENDPROC(___copy_to_user)
1801
1802 .pushsection .fixup,"ax"
1803 .align 0
1804 9001: ldmfd sp!, {r0, r4 - r7, pc}
1805 .popsection
1806
1807 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1808 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1809 * Purpose : copy a block from user memory to kernel memory
1810 * Params : to - kernel memory
1811 * : from - user memory
1812 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1813 sub r2, r2, ip
1814 b .Lcfu_dest_aligned
1815
1816 -ENTRY(__copy_from_user)
1817 +ENTRY(___copy_from_user)
1818 stmfd sp!, {r0, r2, r4 - r7, lr}
1819 cmp r2, #4
1820 blt .Lcfu_not_enough
1821 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1822 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1823 strgtb r3, [r0], #1
1824 b .Lcfu_finished
1825 -ENDPROC(__copy_from_user)
1826 +ENDPROC(___copy_from_user)
1827
1828 .pushsection .fixup,"ax"
1829 .align 0
1830 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1831 index 025f742..8432b08 100644
1832 --- a/arch/arm/lib/uaccess_with_memcpy.c
1833 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1834 @@ -104,7 +104,7 @@ out:
1835 }
1836
1837 unsigned long
1838 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1839 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1840 {
1841 /*
1842 * This test is stubbed out of the main function above to keep
1843 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1844 index 518091c..eae9a76 100644
1845 --- a/arch/arm/mach-omap2/board-n8x0.c
1846 +++ b/arch/arm/mach-omap2/board-n8x0.c
1847 @@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1848 }
1849 #endif
1850
1851 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1852 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1853 .late_init = n8x0_menelaus_late_init,
1854 };
1855
1856 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1857 index 5bb4835..4760f68 100644
1858 --- a/arch/arm/mm/fault.c
1859 +++ b/arch/arm/mm/fault.c
1860 @@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1861 }
1862 #endif
1863
1864 +#ifdef CONFIG_PAX_PAGEEXEC
1865 + if (fsr & FSR_LNX_PF) {
1866 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1867 + do_group_exit(SIGKILL);
1868 + }
1869 +#endif
1870 +
1871 tsk->thread.address = addr;
1872 tsk->thread.error_code = fsr;
1873 tsk->thread.trap_no = 14;
1874 @@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1875 }
1876 #endif /* CONFIG_MMU */
1877
1878 +#ifdef CONFIG_PAX_PAGEEXEC
1879 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1880 +{
1881 + long i;
1882 +
1883 + printk(KERN_ERR "PAX: bytes at PC: ");
1884 + for (i = 0; i < 20; i++) {
1885 + unsigned char c;
1886 + if (get_user(c, (__force unsigned char __user *)pc+i))
1887 + printk(KERN_CONT "?? ");
1888 + else
1889 + printk(KERN_CONT "%02x ", c);
1890 + }
1891 + printk("\n");
1892 +
1893 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1894 + for (i = -1; i < 20; i++) {
1895 + unsigned long c;
1896 + if (get_user(c, (__force unsigned long __user *)sp+i))
1897 + printk(KERN_CONT "???????? ");
1898 + else
1899 + printk(KERN_CONT "%08lx ", c);
1900 + }
1901 + printk("\n");
1902 +}
1903 +#endif
1904 +
1905 /*
1906 * First Level Translation Fault Handler
1907 *
1908 @@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1909 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1910 struct siginfo info;
1911
1912 +#ifdef CONFIG_PAX_REFCOUNT
1913 + if (fsr_fs(ifsr) == 2) {
1914 + unsigned int bkpt;
1915 +
1916 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1917 + current->thread.error_code = ifsr;
1918 + current->thread.trap_no = 0;
1919 + pax_report_refcount_overflow(regs);
1920 + fixup_exception(regs);
1921 + return;
1922 + }
1923 + }
1924 +#endif
1925 +
1926 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1927 return;
1928
1929 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1930 index ce8cb19..3ec539d 100644
1931 --- a/arch/arm/mm/mmap.c
1932 +++ b/arch/arm/mm/mmap.c
1933 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1934 if (len > TASK_SIZE)
1935 return -ENOMEM;
1936
1937 +#ifdef CONFIG_PAX_RANDMMAP
1938 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1939 +#endif
1940 +
1941 if (addr) {
1942 if (do_align)
1943 addr = COLOUR_ALIGN(addr, pgoff);
1944 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1945 addr = PAGE_ALIGN(addr);
1946
1947 vma = find_vma(mm, addr);
1948 - if (TASK_SIZE - len >= addr &&
1949 - (!vma || addr + len <= vma->vm_start))
1950 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1951 return addr;
1952 }
1953 if (len > mm->cached_hole_size) {
1954 - start_addr = addr = mm->free_area_cache;
1955 + start_addr = addr = mm->free_area_cache;
1956 } else {
1957 - start_addr = addr = mm->mmap_base;
1958 - mm->cached_hole_size = 0;
1959 + start_addr = addr = mm->mmap_base;
1960 + mm->cached_hole_size = 0;
1961 }
1962
1963 full_search:
1964 @@ -124,14 +127,14 @@ full_search:
1965 * Start a new search - just in case we missed
1966 * some holes.
1967 */
1968 - if (start_addr != TASK_UNMAPPED_BASE) {
1969 - start_addr = addr = TASK_UNMAPPED_BASE;
1970 + if (start_addr != mm->mmap_base) {
1971 + start_addr = addr = mm->mmap_base;
1972 mm->cached_hole_size = 0;
1973 goto full_search;
1974 }
1975 return -ENOMEM;
1976 }
1977 - if (!vma || addr + len <= vma->vm_start) {
1978 + if (check_heap_stack_gap(vma, addr, len)) {
1979 /*
1980 * Remember the place where we stopped the search:
1981 */
1982 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1983
1984 if (mmap_is_legacy()) {
1985 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1986 +
1987 +#ifdef CONFIG_PAX_RANDMMAP
1988 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1989 + mm->mmap_base += mm->delta_mmap;
1990 +#endif
1991 +
1992 mm->get_unmapped_area = arch_get_unmapped_area;
1993 mm->unmap_area = arch_unmap_area;
1994 } else {
1995 mm->mmap_base = mmap_base(random_factor);
1996 +
1997 +#ifdef CONFIG_PAX_RANDMMAP
1998 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1999 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2000 +#endif
2001 +
2002 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2003 mm->unmap_area = arch_unmap_area_topdown;
2004 }
2005 diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2006 index fd556f7..af2e7d2 100644
2007 --- a/arch/arm/plat-orion/include/plat/addr-map.h
2008 +++ b/arch/arm/plat-orion/include/plat/addr-map.h
2009 @@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2010 value in bridge_virt_base */
2011 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2012 const int win);
2013 -};
2014 +} __no_const;
2015
2016 /*
2017 * Information needed to setup one address mapping.
2018 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2019 index 71a6827..e7fbc23 100644
2020 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2021 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2022 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
2023 int (*started)(unsigned ch);
2024 int (*flush)(unsigned ch);
2025 int (*stop)(unsigned ch);
2026 -};
2027 +} __no_const;
2028
2029 extern void *samsung_dmadev_get_ops(void);
2030 extern void *s3c_dma_get_ops(void);
2031 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2032 index 5f28cae..3d23723 100644
2033 --- a/arch/arm/plat-samsung/include/plat/ehci.h
2034 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
2035 @@ -14,7 +14,7 @@
2036 struct s5p_ehci_platdata {
2037 int (*phy_init)(struct platform_device *pdev, int type);
2038 int (*phy_exit)(struct platform_device *pdev, int type);
2039 -};
2040 +} __no_const;
2041
2042 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2043
2044 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2045 index c3a58a1..78fbf54 100644
2046 --- a/arch/avr32/include/asm/cache.h
2047 +++ b/arch/avr32/include/asm/cache.h
2048 @@ -1,8 +1,10 @@
2049 #ifndef __ASM_AVR32_CACHE_H
2050 #define __ASM_AVR32_CACHE_H
2051
2052 +#include <linux/const.h>
2053 +
2054 #define L1_CACHE_SHIFT 5
2055 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2056 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2057
2058 /*
2059 * Memory returned by kmalloc() may be used for DMA, so we must make
2060 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2061 index 3b3159b..425ea94 100644
2062 --- a/arch/avr32/include/asm/elf.h
2063 +++ b/arch/avr32/include/asm/elf.h
2064 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2065 the loader. We need to make sure that it is out of the way of the program
2066 that it will "exec", and that there is sufficient room for the brk. */
2067
2068 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2069 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2070
2071 +#ifdef CONFIG_PAX_ASLR
2072 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2073 +
2074 +#define PAX_DELTA_MMAP_LEN 15
2075 +#define PAX_DELTA_STACK_LEN 15
2076 +#endif
2077
2078 /* This yields a mask that user programs can use to figure out what
2079 instruction set this CPU supports. This could be done in user space,
2080 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2081 index b7f5c68..556135c 100644
2082 --- a/arch/avr32/include/asm/kmap_types.h
2083 +++ b/arch/avr32/include/asm/kmap_types.h
2084 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2085 D(11) KM_IRQ1,
2086 D(12) KM_SOFTIRQ0,
2087 D(13) KM_SOFTIRQ1,
2088 -D(14) KM_TYPE_NR
2089 +D(14) KM_CLEARPAGE,
2090 +D(15) KM_TYPE_NR
2091 };
2092
2093 #undef D
2094 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2095 index f7040a1..db9f300 100644
2096 --- a/arch/avr32/mm/fault.c
2097 +++ b/arch/avr32/mm/fault.c
2098 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2099
2100 int exception_trace = 1;
2101
2102 +#ifdef CONFIG_PAX_PAGEEXEC
2103 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2104 +{
2105 + unsigned long i;
2106 +
2107 + printk(KERN_ERR "PAX: bytes at PC: ");
2108 + for (i = 0; i < 20; i++) {
2109 + unsigned char c;
2110 + if (get_user(c, (unsigned char *)pc+i))
2111 + printk(KERN_CONT "???????? ");
2112 + else
2113 + printk(KERN_CONT "%02x ", c);
2114 + }
2115 + printk("\n");
2116 +}
2117 +#endif
2118 +
2119 /*
2120 * This routine handles page faults. It determines the address and the
2121 * problem, and then passes it off to one of the appropriate routines.
2122 @@ -156,6 +173,16 @@ bad_area:
2123 up_read(&mm->mmap_sem);
2124
2125 if (user_mode(regs)) {
2126 +
2127 +#ifdef CONFIG_PAX_PAGEEXEC
2128 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2129 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2130 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2131 + do_group_exit(SIGKILL);
2132 + }
2133 + }
2134 +#endif
2135 +
2136 if (exception_trace && printk_ratelimit())
2137 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2138 "sp %08lx ecr %lu\n",
2139 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2140 index 568885a..f8008df 100644
2141 --- a/arch/blackfin/include/asm/cache.h
2142 +++ b/arch/blackfin/include/asm/cache.h
2143 @@ -7,6 +7,7 @@
2144 #ifndef __ARCH_BLACKFIN_CACHE_H
2145 #define __ARCH_BLACKFIN_CACHE_H
2146
2147 +#include <linux/const.h>
2148 #include <linux/linkage.h> /* for asmlinkage */
2149
2150 /*
2151 @@ -14,7 +15,7 @@
2152 * Blackfin loads 32 bytes for cache
2153 */
2154 #define L1_CACHE_SHIFT 5
2155 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2156 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2157 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2158
2159 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2160 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2161 index aea2718..3639a60 100644
2162 --- a/arch/cris/include/arch-v10/arch/cache.h
2163 +++ b/arch/cris/include/arch-v10/arch/cache.h
2164 @@ -1,8 +1,9 @@
2165 #ifndef _ASM_ARCH_CACHE_H
2166 #define _ASM_ARCH_CACHE_H
2167
2168 +#include <linux/const.h>
2169 /* Etrax 100LX have 32-byte cache-lines. */
2170 -#define L1_CACHE_BYTES 32
2171 #define L1_CACHE_SHIFT 5
2172 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2173
2174 #endif /* _ASM_ARCH_CACHE_H */
2175 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2176 index 1de779f..336fad3 100644
2177 --- a/arch/cris/include/arch-v32/arch/cache.h
2178 +++ b/arch/cris/include/arch-v32/arch/cache.h
2179 @@ -1,11 +1,12 @@
2180 #ifndef _ASM_CRIS_ARCH_CACHE_H
2181 #define _ASM_CRIS_ARCH_CACHE_H
2182
2183 +#include <linux/const.h>
2184 #include <arch/hwregs/dma.h>
2185
2186 /* A cache-line is 32 bytes. */
2187 -#define L1_CACHE_BYTES 32
2188 #define L1_CACHE_SHIFT 5
2189 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2190
2191 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2192
2193 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2194 index b86329d..6709906 100644
2195 --- a/arch/frv/include/asm/atomic.h
2196 +++ b/arch/frv/include/asm/atomic.h
2197 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2198 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2199 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2200
2201 +#define atomic64_read_unchecked(v) atomic64_read(v)
2202 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2203 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2204 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2205 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2206 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2207 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2208 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2209 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2210 +
2211 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2212 {
2213 int c, old;
2214 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2215 index 2797163..c2a401d 100644
2216 --- a/arch/frv/include/asm/cache.h
2217 +++ b/arch/frv/include/asm/cache.h
2218 @@ -12,10 +12,11 @@
2219 #ifndef __ASM_CACHE_H
2220 #define __ASM_CACHE_H
2221
2222 +#include <linux/const.h>
2223
2224 /* bytes per L1 cache line */
2225 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2226 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2227 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2228
2229 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2230 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2231 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2232 index f8e16b2..c73ff79 100644
2233 --- a/arch/frv/include/asm/kmap_types.h
2234 +++ b/arch/frv/include/asm/kmap_types.h
2235 @@ -23,6 +23,7 @@ enum km_type {
2236 KM_IRQ1,
2237 KM_SOFTIRQ0,
2238 KM_SOFTIRQ1,
2239 + KM_CLEARPAGE,
2240 KM_TYPE_NR
2241 };
2242
2243 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2244 index 385fd30..6c3d97e 100644
2245 --- a/arch/frv/mm/elf-fdpic.c
2246 +++ b/arch/frv/mm/elf-fdpic.c
2247 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2248 if (addr) {
2249 addr = PAGE_ALIGN(addr);
2250 vma = find_vma(current->mm, addr);
2251 - if (TASK_SIZE - len >= addr &&
2252 - (!vma || addr + len <= vma->vm_start))
2253 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2254 goto success;
2255 }
2256
2257 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2258 for (; vma; vma = vma->vm_next) {
2259 if (addr > limit)
2260 break;
2261 - if (addr + len <= vma->vm_start)
2262 + if (check_heap_stack_gap(vma, addr, len))
2263 goto success;
2264 addr = vma->vm_end;
2265 }
2266 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2267 for (; vma; vma = vma->vm_next) {
2268 if (addr > limit)
2269 break;
2270 - if (addr + len <= vma->vm_start)
2271 + if (check_heap_stack_gap(vma, addr, len))
2272 goto success;
2273 addr = vma->vm_end;
2274 }
2275 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2276 index c635028..6d9445a 100644
2277 --- a/arch/h8300/include/asm/cache.h
2278 +++ b/arch/h8300/include/asm/cache.h
2279 @@ -1,8 +1,10 @@
2280 #ifndef __ARCH_H8300_CACHE_H
2281 #define __ARCH_H8300_CACHE_H
2282
2283 +#include <linux/const.h>
2284 +
2285 /* bytes per L1 cache line */
2286 -#define L1_CACHE_BYTES 4
2287 +#define L1_CACHE_BYTES _AC(4,UL)
2288
2289 /* m68k-elf-gcc 2.95.2 doesn't like these */
2290
2291 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2292 index 0f01de2..d37d309 100644
2293 --- a/arch/hexagon/include/asm/cache.h
2294 +++ b/arch/hexagon/include/asm/cache.h
2295 @@ -21,9 +21,11 @@
2296 #ifndef __ASM_CACHE_H
2297 #define __ASM_CACHE_H
2298
2299 +#include <linux/const.h>
2300 +
2301 /* Bytes per L1 cache line */
2302 -#define L1_CACHE_SHIFT (5)
2303 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2304 +#define L1_CACHE_SHIFT 5
2305 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2306
2307 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2308 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2309 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2310 index 7d91166..88ab87e 100644
2311 --- a/arch/ia64/include/asm/atomic.h
2312 +++ b/arch/ia64/include/asm/atomic.h
2313 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2314 #define atomic64_inc(v) atomic64_add(1, (v))
2315 #define atomic64_dec(v) atomic64_sub(1, (v))
2316
2317 +#define atomic64_read_unchecked(v) atomic64_read(v)
2318 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2319 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2320 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2321 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2322 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2323 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2324 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2325 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2326 +
2327 /* Atomic operations are already serializing */
2328 #define smp_mb__before_atomic_dec() barrier()
2329 #define smp_mb__after_atomic_dec() barrier()
2330 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2331 index 988254a..e1ee885 100644
2332 --- a/arch/ia64/include/asm/cache.h
2333 +++ b/arch/ia64/include/asm/cache.h
2334 @@ -1,6 +1,7 @@
2335 #ifndef _ASM_IA64_CACHE_H
2336 #define _ASM_IA64_CACHE_H
2337
2338 +#include <linux/const.h>
2339
2340 /*
2341 * Copyright (C) 1998-2000 Hewlett-Packard Co
2342 @@ -9,7 +10,7 @@
2343
2344 /* Bytes per L1 (data) cache line. */
2345 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2346 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2347 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2348
2349 #ifdef CONFIG_SMP
2350 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2351 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2352 index b5298eb..67c6e62 100644
2353 --- a/arch/ia64/include/asm/elf.h
2354 +++ b/arch/ia64/include/asm/elf.h
2355 @@ -42,6 +42,13 @@
2356 */
2357 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2358
2359 +#ifdef CONFIG_PAX_ASLR
2360 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2361 +
2362 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2363 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2364 +#endif
2365 +
2366 #define PT_IA_64_UNWIND 0x70000001
2367
2368 /* IA-64 relocations: */
2369 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2370 index 96a8d92..617a1cf 100644
2371 --- a/arch/ia64/include/asm/pgalloc.h
2372 +++ b/arch/ia64/include/asm/pgalloc.h
2373 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2374 pgd_val(*pgd_entry) = __pa(pud);
2375 }
2376
2377 +static inline void
2378 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2379 +{
2380 + pgd_populate(mm, pgd_entry, pud);
2381 +}
2382 +
2383 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2384 {
2385 return quicklist_alloc(0, GFP_KERNEL, NULL);
2386 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2387 pud_val(*pud_entry) = __pa(pmd);
2388 }
2389
2390 +static inline void
2391 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2392 +{
2393 + pud_populate(mm, pud_entry, pmd);
2394 +}
2395 +
2396 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2397 {
2398 return quicklist_alloc(0, GFP_KERNEL, NULL);
2399 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2400 index 815810c..d60bd4c 100644
2401 --- a/arch/ia64/include/asm/pgtable.h
2402 +++ b/arch/ia64/include/asm/pgtable.h
2403 @@ -12,7 +12,7 @@
2404 * David Mosberger-Tang <davidm@hpl.hp.com>
2405 */
2406
2407 -
2408 +#include <linux/const.h>
2409 #include <asm/mman.h>
2410 #include <asm/page.h>
2411 #include <asm/processor.h>
2412 @@ -142,6 +142,17 @@
2413 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2414 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2415 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2416 +
2417 +#ifdef CONFIG_PAX_PAGEEXEC
2418 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2419 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2420 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2421 +#else
2422 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2423 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2424 +# define PAGE_COPY_NOEXEC PAGE_COPY
2425 +#endif
2426 +
2427 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2428 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2429 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2430 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2431 index 54ff557..70c88b7 100644
2432 --- a/arch/ia64/include/asm/spinlock.h
2433 +++ b/arch/ia64/include/asm/spinlock.h
2434 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2435 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2436
2437 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2438 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2439 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2440 }
2441
2442 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2443 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2444 index 449c8c0..432a3d2 100644
2445 --- a/arch/ia64/include/asm/uaccess.h
2446 +++ b/arch/ia64/include/asm/uaccess.h
2447 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2448 const void *__cu_from = (from); \
2449 long __cu_len = (n); \
2450 \
2451 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2452 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2453 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2454 __cu_len; \
2455 })
2456 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2457 long __cu_len = (n); \
2458 \
2459 __chk_user_ptr(__cu_from); \
2460 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2461 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2462 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2463 __cu_len; \
2464 })
2465 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2466 index 24603be..948052d 100644
2467 --- a/arch/ia64/kernel/module.c
2468 +++ b/arch/ia64/kernel/module.c
2469 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2470 void
2471 module_free (struct module *mod, void *module_region)
2472 {
2473 - if (mod && mod->arch.init_unw_table &&
2474 - module_region == mod->module_init) {
2475 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2476 unw_remove_unwind_table(mod->arch.init_unw_table);
2477 mod->arch.init_unw_table = NULL;
2478 }
2479 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2480 }
2481
2482 static inline int
2483 +in_init_rx (const struct module *mod, uint64_t addr)
2484 +{
2485 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2486 +}
2487 +
2488 +static inline int
2489 +in_init_rw (const struct module *mod, uint64_t addr)
2490 +{
2491 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2492 +}
2493 +
2494 +static inline int
2495 in_init (const struct module *mod, uint64_t addr)
2496 {
2497 - return addr - (uint64_t) mod->module_init < mod->init_size;
2498 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2499 +}
2500 +
2501 +static inline int
2502 +in_core_rx (const struct module *mod, uint64_t addr)
2503 +{
2504 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2505 +}
2506 +
2507 +static inline int
2508 +in_core_rw (const struct module *mod, uint64_t addr)
2509 +{
2510 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2511 }
2512
2513 static inline int
2514 in_core (const struct module *mod, uint64_t addr)
2515 {
2516 - return addr - (uint64_t) mod->module_core < mod->core_size;
2517 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2518 }
2519
2520 static inline int
2521 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2522 break;
2523
2524 case RV_BDREL:
2525 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2526 + if (in_init_rx(mod, val))
2527 + val -= (uint64_t) mod->module_init_rx;
2528 + else if (in_init_rw(mod, val))
2529 + val -= (uint64_t) mod->module_init_rw;
2530 + else if (in_core_rx(mod, val))
2531 + val -= (uint64_t) mod->module_core_rx;
2532 + else if (in_core_rw(mod, val))
2533 + val -= (uint64_t) mod->module_core_rw;
2534 break;
2535
2536 case RV_LTV:
2537 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2538 * addresses have been selected...
2539 */
2540 uint64_t gp;
2541 - if (mod->core_size > MAX_LTOFF)
2542 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2543 /*
2544 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2545 * at the end of the module.
2546 */
2547 - gp = mod->core_size - MAX_LTOFF / 2;
2548 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2549 else
2550 - gp = mod->core_size / 2;
2551 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2552 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2553 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2554 mod->arch.gp = gp;
2555 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2556 }
2557 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2558 index 609d500..7dde2a8 100644
2559 --- a/arch/ia64/kernel/sys_ia64.c
2560 +++ b/arch/ia64/kernel/sys_ia64.c
2561 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2562 if (REGION_NUMBER(addr) == RGN_HPAGE)
2563 addr = 0;
2564 #endif
2565 +
2566 +#ifdef CONFIG_PAX_RANDMMAP
2567 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2568 + addr = mm->free_area_cache;
2569 + else
2570 +#endif
2571 +
2572 if (!addr)
2573 addr = mm->free_area_cache;
2574
2575 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2576 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2577 /* At this point: (!vma || addr < vma->vm_end). */
2578 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2579 - if (start_addr != TASK_UNMAPPED_BASE) {
2580 + if (start_addr != mm->mmap_base) {
2581 /* Start a new search --- just in case we missed some holes. */
2582 - addr = TASK_UNMAPPED_BASE;
2583 + addr = mm->mmap_base;
2584 goto full_search;
2585 }
2586 return -ENOMEM;
2587 }
2588 - if (!vma || addr + len <= vma->vm_start) {
2589 + if (check_heap_stack_gap(vma, addr, len)) {
2590 /* Remember the address where we stopped this search: */
2591 mm->free_area_cache = addr + len;
2592 return addr;
2593 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2594 index 0ccb28f..8992469 100644
2595 --- a/arch/ia64/kernel/vmlinux.lds.S
2596 +++ b/arch/ia64/kernel/vmlinux.lds.S
2597 @@ -198,7 +198,7 @@ SECTIONS {
2598 /* Per-cpu data: */
2599 . = ALIGN(PERCPU_PAGE_SIZE);
2600 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2601 - __phys_per_cpu_start = __per_cpu_load;
2602 + __phys_per_cpu_start = per_cpu_load;
2603 /*
2604 * ensure percpu data fits
2605 * into percpu page size
2606 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2607 index 02d29c2..ea893df 100644
2608 --- a/arch/ia64/mm/fault.c
2609 +++ b/arch/ia64/mm/fault.c
2610 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2611 return pte_present(pte);
2612 }
2613
2614 +#ifdef CONFIG_PAX_PAGEEXEC
2615 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2616 +{
2617 + unsigned long i;
2618 +
2619 + printk(KERN_ERR "PAX: bytes at PC: ");
2620 + for (i = 0; i < 8; i++) {
2621 + unsigned int c;
2622 + if (get_user(c, (unsigned int *)pc+i))
2623 + printk(KERN_CONT "???????? ");
2624 + else
2625 + printk(KERN_CONT "%08x ", c);
2626 + }
2627 + printk("\n");
2628 +}
2629 +#endif
2630 +
2631 void __kprobes
2632 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2633 {
2634 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2635 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2636 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2637
2638 - if ((vma->vm_flags & mask) != mask)
2639 + if ((vma->vm_flags & mask) != mask) {
2640 +
2641 +#ifdef CONFIG_PAX_PAGEEXEC
2642 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2643 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2644 + goto bad_area;
2645 +
2646 + up_read(&mm->mmap_sem);
2647 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2648 + do_group_exit(SIGKILL);
2649 + }
2650 +#endif
2651 +
2652 goto bad_area;
2653
2654 + }
2655 +
2656 /*
2657 * If for any reason at all we couldn't handle the fault, make
2658 * sure we exit gracefully rather than endlessly redo the
2659 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2660 index 5ca674b..e0e1b70 100644
2661 --- a/arch/ia64/mm/hugetlbpage.c
2662 +++ b/arch/ia64/mm/hugetlbpage.c
2663 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2664 /* At this point: (!vmm || addr < vmm->vm_end). */
2665 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2666 return -ENOMEM;
2667 - if (!vmm || (addr + len) <= vmm->vm_start)
2668 + if (check_heap_stack_gap(vmm, addr, len))
2669 return addr;
2670 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2671 }
2672 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2673 index 0eab454..bd794f2 100644
2674 --- a/arch/ia64/mm/init.c
2675 +++ b/arch/ia64/mm/init.c
2676 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2677 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2678 vma->vm_end = vma->vm_start + PAGE_SIZE;
2679 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2680 +
2681 +#ifdef CONFIG_PAX_PAGEEXEC
2682 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2683 + vma->vm_flags &= ~VM_EXEC;
2684 +
2685 +#ifdef CONFIG_PAX_MPROTECT
2686 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2687 + vma->vm_flags &= ~VM_MAYEXEC;
2688 +#endif
2689 +
2690 + }
2691 +#endif
2692 +
2693 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2694 down_write(&current->mm->mmap_sem);
2695 if (insert_vm_struct(current->mm, vma)) {
2696 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2697 index 40b3ee9..8c2c112 100644
2698 --- a/arch/m32r/include/asm/cache.h
2699 +++ b/arch/m32r/include/asm/cache.h
2700 @@ -1,8 +1,10 @@
2701 #ifndef _ASM_M32R_CACHE_H
2702 #define _ASM_M32R_CACHE_H
2703
2704 +#include <linux/const.h>
2705 +
2706 /* L1 cache line size */
2707 #define L1_CACHE_SHIFT 4
2708 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2709 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2710
2711 #endif /* _ASM_M32R_CACHE_H */
2712 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2713 index 82abd15..d95ae5d 100644
2714 --- a/arch/m32r/lib/usercopy.c
2715 +++ b/arch/m32r/lib/usercopy.c
2716 @@ -14,6 +14,9 @@
2717 unsigned long
2718 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2719 {
2720 + if ((long)n < 0)
2721 + return n;
2722 +
2723 prefetch(from);
2724 if (access_ok(VERIFY_WRITE, to, n))
2725 __copy_user(to,from,n);
2726 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2727 unsigned long
2728 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2729 {
2730 + if ((long)n < 0)
2731 + return n;
2732 +
2733 prefetchw(to);
2734 if (access_ok(VERIFY_READ, from, n))
2735 __copy_user_zeroing(to,from,n);
2736 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2737 index 0395c51..5f26031 100644
2738 --- a/arch/m68k/include/asm/cache.h
2739 +++ b/arch/m68k/include/asm/cache.h
2740 @@ -4,9 +4,11 @@
2741 #ifndef __ARCH_M68K_CACHE_H
2742 #define __ARCH_M68K_CACHE_H
2743
2744 +#include <linux/const.h>
2745 +
2746 /* bytes per L1 cache line */
2747 #define L1_CACHE_SHIFT 4
2748 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2749 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2750
2751 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2752
2753 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2754 index 4efe96a..60e8699 100644
2755 --- a/arch/microblaze/include/asm/cache.h
2756 +++ b/arch/microblaze/include/asm/cache.h
2757 @@ -13,11 +13,12 @@
2758 #ifndef _ASM_MICROBLAZE_CACHE_H
2759 #define _ASM_MICROBLAZE_CACHE_H
2760
2761 +#include <linux/const.h>
2762 #include <asm/registers.h>
2763
2764 #define L1_CACHE_SHIFT 5
2765 /* word-granular cache in microblaze */
2766 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2767 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2768
2769 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2770
2771 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2772 index 3f4c5cb..3439c6e 100644
2773 --- a/arch/mips/include/asm/atomic.h
2774 +++ b/arch/mips/include/asm/atomic.h
2775 @@ -21,6 +21,10 @@
2776 #include <asm/cmpxchg.h>
2777 #include <asm/war.h>
2778
2779 +#ifdef CONFIG_GENERIC_ATOMIC64
2780 +#include <asm-generic/atomic64.h>
2781 +#endif
2782 +
2783 #define ATOMIC_INIT(i) { (i) }
2784
2785 /*
2786 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2787 */
2788 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2789
2790 +#define atomic64_read_unchecked(v) atomic64_read(v)
2791 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2792 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2793 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2794 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2795 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2796 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2797 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2798 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2799 +
2800 #endif /* CONFIG_64BIT */
2801
2802 /*
2803 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2804 index b4db69f..8f3b093 100644
2805 --- a/arch/mips/include/asm/cache.h
2806 +++ b/arch/mips/include/asm/cache.h
2807 @@ -9,10 +9,11 @@
2808 #ifndef _ASM_CACHE_H
2809 #define _ASM_CACHE_H
2810
2811 +#include <linux/const.h>
2812 #include <kmalloc.h>
2813
2814 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2815 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2816 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2817
2818 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2819 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2820 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2821 index 455c0ac..ad65fbe 100644
2822 --- a/arch/mips/include/asm/elf.h
2823 +++ b/arch/mips/include/asm/elf.h
2824 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2825 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2826 #endif
2827
2828 +#ifdef CONFIG_PAX_ASLR
2829 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2830 +
2831 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2832 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2833 +#endif
2834 +
2835 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2836 struct linux_binprm;
2837 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2838 int uses_interp);
2839
2840 -struct mm_struct;
2841 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2842 -#define arch_randomize_brk arch_randomize_brk
2843 -
2844 #endif /* _ASM_ELF_H */
2845 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2846 index c1f6afa..38cc6e9 100644
2847 --- a/arch/mips/include/asm/exec.h
2848 +++ b/arch/mips/include/asm/exec.h
2849 @@ -12,6 +12,6 @@
2850 #ifndef _ASM_EXEC_H
2851 #define _ASM_EXEC_H
2852
2853 -extern unsigned long arch_align_stack(unsigned long sp);
2854 +#define arch_align_stack(x) ((x) & ~0xfUL)
2855
2856 #endif /* _ASM_EXEC_H */
2857 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2858 index da9bd7d..91aa7ab 100644
2859 --- a/arch/mips/include/asm/page.h
2860 +++ b/arch/mips/include/asm/page.h
2861 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2862 #ifdef CONFIG_CPU_MIPS32
2863 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2864 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2865 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2866 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2867 #else
2868 typedef struct { unsigned long long pte; } pte_t;
2869 #define pte_val(x) ((x).pte)
2870 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2871 index 881d18b..cea38bc 100644
2872 --- a/arch/mips/include/asm/pgalloc.h
2873 +++ b/arch/mips/include/asm/pgalloc.h
2874 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2875 {
2876 set_pud(pud, __pud((unsigned long)pmd));
2877 }
2878 +
2879 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2880 +{
2881 + pud_populate(mm, pud, pmd);
2882 +}
2883 #endif
2884
2885 /*
2886 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2887 index 0d85d8e..ec71487 100644
2888 --- a/arch/mips/include/asm/thread_info.h
2889 +++ b/arch/mips/include/asm/thread_info.h
2890 @@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2891 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2892 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2893 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2894 +/* li takes a 32bit immediate */
2895 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2896 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2897
2898 #ifdef CONFIG_MIPS32_O32
2899 @@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2900 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2901 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2902 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2903 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2904 +
2905 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2906
2907 /* work to do in syscall_trace_leave() */
2908 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2909 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2910
2911 /* work to do on interrupt/exception return */
2912 #define _TIF_WORK_MASK (0x0000ffef & \
2913 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2914 /* work to do on any return to u-space */
2915 -#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2916 +#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2917
2918 #endif /* __KERNEL__ */
2919
2920 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2921 index 9fdd8bc..4bd7f1a 100644
2922 --- a/arch/mips/kernel/binfmt_elfn32.c
2923 +++ b/arch/mips/kernel/binfmt_elfn32.c
2924 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2925 #undef ELF_ET_DYN_BASE
2926 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2927
2928 +#ifdef CONFIG_PAX_ASLR
2929 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2930 +
2931 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2932 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2933 +#endif
2934 +
2935 #include <asm/processor.h>
2936 #include <linux/module.h>
2937 #include <linux/elfcore.h>
2938 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2939 index ff44823..97f8906 100644
2940 --- a/arch/mips/kernel/binfmt_elfo32.c
2941 +++ b/arch/mips/kernel/binfmt_elfo32.c
2942 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2943 #undef ELF_ET_DYN_BASE
2944 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2945
2946 +#ifdef CONFIG_PAX_ASLR
2947 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2948 +
2949 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2950 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2951 +#endif
2952 +
2953 #include <asm/processor.h>
2954
2955 /*
2956 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2957 index e9a5fd7..378809a 100644
2958 --- a/arch/mips/kernel/process.c
2959 +++ b/arch/mips/kernel/process.c
2960 @@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2961 out:
2962 return pc;
2963 }
2964 -
2965 -/*
2966 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2967 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2968 - */
2969 -unsigned long arch_align_stack(unsigned long sp)
2970 -{
2971 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2972 - sp -= get_random_int() & ~PAGE_MASK;
2973 -
2974 - return sp & ALMASK;
2975 -}
2976 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2977 index 7c24c29..e2f1981 100644
2978 --- a/arch/mips/kernel/ptrace.c
2979 +++ b/arch/mips/kernel/ptrace.c
2980 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
2981 return arch;
2982 }
2983
2984 +#ifdef CONFIG_GRKERNSEC_SETXID
2985 +extern void gr_delayed_cred_worker(void);
2986 +#endif
2987 +
2988 /*
2989 * Notification of system call entry/exit
2990 * - triggered by current->work.syscall_trace
2991 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
2992 /* do the secure computing check first */
2993 secure_computing(regs->regs[2]);
2994
2995 +#ifdef CONFIG_GRKERNSEC_SETXID
2996 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2997 + gr_delayed_cred_worker();
2998 +#endif
2999 +
3000 if (!(current->ptrace & PT_PTRACED))
3001 goto out;
3002
3003 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3004 index a632bc1..0b77c7c 100644
3005 --- a/arch/mips/kernel/scall32-o32.S
3006 +++ b/arch/mips/kernel/scall32-o32.S
3007 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3008
3009 stack_done:
3010 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3011 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3012 + li t1, _TIF_SYSCALL_WORK
3013 and t0, t1
3014 bnez t0, syscall_trace_entry # -> yes
3015
3016 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3017 index 3b5a5e9..e1ee86d 100644
3018 --- a/arch/mips/kernel/scall64-64.S
3019 +++ b/arch/mips/kernel/scall64-64.S
3020 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3021
3022 sd a3, PT_R26(sp) # save a3 for syscall restarting
3023
3024 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3025 + li t1, _TIF_SYSCALL_WORK
3026 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3027 and t0, t1, t0
3028 bnez t0, syscall_trace_entry
3029 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3030 index 6be6f70..1859577 100644
3031 --- a/arch/mips/kernel/scall64-n32.S
3032 +++ b/arch/mips/kernel/scall64-n32.S
3033 @@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3034
3035 sd a3, PT_R26(sp) # save a3 for syscall restarting
3036
3037 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3038 + li t1, _TIF_SYSCALL_WORK
3039 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3040 and t0, t1, t0
3041 bnez t0, n32_syscall_trace_entry
3042 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3043 index 5422855..74e63a3 100644
3044 --- a/arch/mips/kernel/scall64-o32.S
3045 +++ b/arch/mips/kernel/scall64-o32.S
3046 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3047 PTR 4b, bad_stack
3048 .previous
3049
3050 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3051 + li t1, _TIF_SYSCALL_WORK
3052 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3053 and t0, t1, t0
3054 bnez t0, trace_a_syscall
3055 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3056 index c14f6df..537e729 100644
3057 --- a/arch/mips/mm/fault.c
3058 +++ b/arch/mips/mm/fault.c
3059 @@ -27,6 +27,23 @@
3060 #include <asm/highmem.h> /* For VMALLOC_END */
3061 #include <linux/kdebug.h>
3062
3063 +#ifdef CONFIG_PAX_PAGEEXEC
3064 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3065 +{
3066 + unsigned long i;
3067 +
3068 + printk(KERN_ERR "PAX: bytes at PC: ");
3069 + for (i = 0; i < 5; i++) {
3070 + unsigned int c;
3071 + if (get_user(c, (unsigned int *)pc+i))
3072 + printk(KERN_CONT "???????? ");
3073 + else
3074 + printk(KERN_CONT "%08x ", c);
3075 + }
3076 + printk("\n");
3077 +}
3078 +#endif
3079 +
3080 /*
3081 * This routine handles page faults. It determines the address,
3082 * and the problem, and then passes it off to one of the appropriate
3083 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3084 index 302d779..7d35bf8 100644
3085 --- a/arch/mips/mm/mmap.c
3086 +++ b/arch/mips/mm/mmap.c
3087 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3088 do_color_align = 1;
3089
3090 /* requesting a specific address */
3091 +
3092 +#ifdef CONFIG_PAX_RANDMMAP
3093 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3094 +#endif
3095 +
3096 if (addr) {
3097 if (do_color_align)
3098 addr = COLOUR_ALIGN(addr, pgoff);
3099 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3100 addr = PAGE_ALIGN(addr);
3101
3102 vma = find_vma(mm, addr);
3103 - if (TASK_SIZE - len >= addr &&
3104 - (!vma || addr + len <= vma->vm_start))
3105 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3106 return addr;
3107 }
3108
3109 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3110 /* At this point: (!vma || addr < vma->vm_end). */
3111 if (TASK_SIZE - len < addr)
3112 return -ENOMEM;
3113 - if (!vma || addr + len <= vma->vm_start)
3114 + if (check_heap_stack_gap(vmm, addr, len))
3115 return addr;
3116 addr = vma->vm_end;
3117 if (do_color_align)
3118 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3119 /* make sure it can fit in the remaining address space */
3120 if (likely(addr > len)) {
3121 vma = find_vma(mm, addr - len);
3122 - if (!vma || addr <= vma->vm_start) {
3123 + if (check_heap_stack_gap(vmm, addr - len, len))
3124 /* cache the address as a hint for next time */
3125 return mm->free_area_cache = addr - len;
3126 }
3127 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3128 * return with success:
3129 */
3130 vma = find_vma(mm, addr);
3131 - if (likely(!vma || addr + len <= vma->vm_start)) {
3132 + if (check_heap_stack_gap(vmm, addr, len)) {
3133 /* cache the address as a hint for next time */
3134 return mm->free_area_cache = addr;
3135 }
3136 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3137 mm->unmap_area = arch_unmap_area_topdown;
3138 }
3139 }
3140 -
3141 -static inline unsigned long brk_rnd(void)
3142 -{
3143 - unsigned long rnd = get_random_int();
3144 -
3145 - rnd = rnd << PAGE_SHIFT;
3146 - /* 8MB for 32bit, 256MB for 64bit */
3147 - if (TASK_IS_32BIT_ADDR)
3148 - rnd = rnd & 0x7ffffful;
3149 - else
3150 - rnd = rnd & 0xffffffful;
3151 -
3152 - return rnd;
3153 -}
3154 -
3155 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3156 -{
3157 - unsigned long base = mm->brk;
3158 - unsigned long ret;
3159 -
3160 - ret = PAGE_ALIGN(base + brk_rnd());
3161 -
3162 - if (ret < mm->brk)
3163 - return mm->brk;
3164 -
3165 - return ret;
3166 -}
3167 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3168 index 967d144..db12197 100644
3169 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3170 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3171 @@ -11,12 +11,14 @@
3172 #ifndef _ASM_PROC_CACHE_H
3173 #define _ASM_PROC_CACHE_H
3174
3175 +#include <linux/const.h>
3176 +
3177 /* L1 cache */
3178
3179 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3180 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3181 -#define L1_CACHE_BYTES 16 /* bytes per entry */
3182 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3183 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3184 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3185
3186 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3187 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3188 index bcb5df2..84fabd2 100644
3189 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3190 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3191 @@ -16,13 +16,15 @@
3192 #ifndef _ASM_PROC_CACHE_H
3193 #define _ASM_PROC_CACHE_H
3194
3195 +#include <linux/const.h>
3196 +
3197 /*
3198 * L1 cache
3199 */
3200 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3201 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3202 -#define L1_CACHE_BYTES 32 /* bytes per entry */
3203 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3204 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3205 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3206
3207 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3208 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3209 index 4ce7a01..449202a 100644
3210 --- a/arch/openrisc/include/asm/cache.h
3211 +++ b/arch/openrisc/include/asm/cache.h
3212 @@ -19,11 +19,13 @@
3213 #ifndef __ASM_OPENRISC_CACHE_H
3214 #define __ASM_OPENRISC_CACHE_H
3215
3216 +#include <linux/const.h>
3217 +
3218 /* FIXME: How can we replace these with values from the CPU...
3219 * they shouldn't be hard-coded!
3220 */
3221
3222 -#define L1_CACHE_BYTES 16
3223 #define L1_CACHE_SHIFT 4
3224 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3225
3226 #endif /* __ASM_OPENRISC_CACHE_H */
3227 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3228 index 6c6defc..d30653d 100644
3229 --- a/arch/parisc/include/asm/atomic.h
3230 +++ b/arch/parisc/include/asm/atomic.h
3231 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3232
3233 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3234
3235 +#define atomic64_read_unchecked(v) atomic64_read(v)
3236 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3237 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3238 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3239 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3240 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3241 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3242 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3243 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3244 +
3245 #endif /* !CONFIG_64BIT */
3246
3247
3248 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3249 index 47f11c7..3420df2 100644
3250 --- a/arch/parisc/include/asm/cache.h
3251 +++ b/arch/parisc/include/asm/cache.h
3252 @@ -5,6 +5,7 @@
3253 #ifndef __ARCH_PARISC_CACHE_H
3254 #define __ARCH_PARISC_CACHE_H
3255
3256 +#include <linux/const.h>
3257
3258 /*
3259 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3260 @@ -15,13 +16,13 @@
3261 * just ruin performance.
3262 */
3263 #ifdef CONFIG_PA20
3264 -#define L1_CACHE_BYTES 64
3265 #define L1_CACHE_SHIFT 6
3266 #else
3267 -#define L1_CACHE_BYTES 32
3268 #define L1_CACHE_SHIFT 5
3269 #endif
3270
3271 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3272 +
3273 #ifndef __ASSEMBLY__
3274
3275 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3276 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3277 index 19f6cb1..6c78cf2 100644
3278 --- a/arch/parisc/include/asm/elf.h
3279 +++ b/arch/parisc/include/asm/elf.h
3280 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3281
3282 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3283
3284 +#ifdef CONFIG_PAX_ASLR
3285 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3286 +
3287 +#define PAX_DELTA_MMAP_LEN 16
3288 +#define PAX_DELTA_STACK_LEN 16
3289 +#endif
3290 +
3291 /* This yields a mask that user programs can use to figure out what
3292 instruction set this CPU supports. This could be done in user space,
3293 but it's not easy, and we've already done it here. */
3294 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3295 index fc987a1..6e068ef 100644
3296 --- a/arch/parisc/include/asm/pgalloc.h
3297 +++ b/arch/parisc/include/asm/pgalloc.h
3298 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3299 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3300 }
3301
3302 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3303 +{
3304 + pgd_populate(mm, pgd, pmd);
3305 +}
3306 +
3307 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3308 {
3309 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3310 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3311 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3312 #define pmd_free(mm, x) do { } while (0)
3313 #define pgd_populate(mm, pmd, pte) BUG()
3314 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3315
3316 #endif
3317
3318 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3319 index ee99f23..802b0a1 100644
3320 --- a/arch/parisc/include/asm/pgtable.h
3321 +++ b/arch/parisc/include/asm/pgtable.h
3322 @@ -212,6 +212,17 @@ struct vm_area_struct;
3323 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3324 #define PAGE_COPY PAGE_EXECREAD
3325 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3326 +
3327 +#ifdef CONFIG_PAX_PAGEEXEC
3328 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3329 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3330 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3331 +#else
3332 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3333 +# define PAGE_COPY_NOEXEC PAGE_COPY
3334 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3335 +#endif
3336 +
3337 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3338 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3339 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3340 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3341 index 5e34ccf..672bc9c 100644
3342 --- a/arch/parisc/kernel/module.c
3343 +++ b/arch/parisc/kernel/module.c
3344 @@ -98,16 +98,38 @@
3345
3346 /* three functions to determine where in the module core
3347 * or init pieces the location is */
3348 +static inline int in_init_rx(struct module *me, void *loc)
3349 +{
3350 + return (loc >= me->module_init_rx &&
3351 + loc < (me->module_init_rx + me->init_size_rx));
3352 +}
3353 +
3354 +static inline int in_init_rw(struct module *me, void *loc)
3355 +{
3356 + return (loc >= me->module_init_rw &&
3357 + loc < (me->module_init_rw + me->init_size_rw));
3358 +}
3359 +
3360 static inline int in_init(struct module *me, void *loc)
3361 {
3362 - return (loc >= me->module_init &&
3363 - loc <= (me->module_init + me->init_size));
3364 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3365 +}
3366 +
3367 +static inline int in_core_rx(struct module *me, void *loc)
3368 +{
3369 + return (loc >= me->module_core_rx &&
3370 + loc < (me->module_core_rx + me->core_size_rx));
3371 +}
3372 +
3373 +static inline int in_core_rw(struct module *me, void *loc)
3374 +{
3375 + return (loc >= me->module_core_rw &&
3376 + loc < (me->module_core_rw + me->core_size_rw));
3377 }
3378
3379 static inline int in_core(struct module *me, void *loc)
3380 {
3381 - return (loc >= me->module_core &&
3382 - loc <= (me->module_core + me->core_size));
3383 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3384 }
3385
3386 static inline int in_local(struct module *me, void *loc)
3387 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3388 }
3389
3390 /* align things a bit */
3391 - me->core_size = ALIGN(me->core_size, 16);
3392 - me->arch.got_offset = me->core_size;
3393 - me->core_size += gots * sizeof(struct got_entry);
3394 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3395 + me->arch.got_offset = me->core_size_rw;
3396 + me->core_size_rw += gots * sizeof(struct got_entry);
3397
3398 - me->core_size = ALIGN(me->core_size, 16);
3399 - me->arch.fdesc_offset = me->core_size;
3400 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3401 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3402 + me->arch.fdesc_offset = me->core_size_rw;
3403 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3404
3405 me->arch.got_max = gots;
3406 me->arch.fdesc_max = fdescs;
3407 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3408
3409 BUG_ON(value == 0);
3410
3411 - got = me->module_core + me->arch.got_offset;
3412 + got = me->module_core_rw + me->arch.got_offset;
3413 for (i = 0; got[i].addr; i++)
3414 if (got[i].addr == value)
3415 goto out;
3416 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3417 #ifdef CONFIG_64BIT
3418 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3419 {
3420 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3421 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3422
3423 if (!value) {
3424 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3425 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3426
3427 /* Create new one */
3428 fdesc->addr = value;
3429 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3430 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3431 return (Elf_Addr)fdesc;
3432 }
3433 #endif /* CONFIG_64BIT */
3434 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3435
3436 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3437 end = table + sechdrs[me->arch.unwind_section].sh_size;
3438 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3439 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3440
3441 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3442 me->arch.unwind_section, table, end, gp);
3443 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3444 index c9b9322..02d8940 100644
3445 --- a/arch/parisc/kernel/sys_parisc.c
3446 +++ b/arch/parisc/kernel/sys_parisc.c
3447 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3448 /* At this point: (!vma || addr < vma->vm_end). */
3449 if (TASK_SIZE - len < addr)
3450 return -ENOMEM;
3451 - if (!vma || addr + len <= vma->vm_start)
3452 + if (check_heap_stack_gap(vma, addr, len))
3453 return addr;
3454 addr = vma->vm_end;
3455 }
3456 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3457 /* At this point: (!vma || addr < vma->vm_end). */
3458 if (TASK_SIZE - len < addr)
3459 return -ENOMEM;
3460 - if (!vma || addr + len <= vma->vm_start)
3461 + if (check_heap_stack_gap(vma, addr, len))
3462 return addr;
3463 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3464 if (addr < vma->vm_end) /* handle wraparound */
3465 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3466 if (flags & MAP_FIXED)
3467 return addr;
3468 if (!addr)
3469 - addr = TASK_UNMAPPED_BASE;
3470 + addr = current->mm->mmap_base;
3471
3472 if (filp) {
3473 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3474 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3475 index 45ba99f..8e22c33 100644
3476 --- a/arch/parisc/kernel/traps.c
3477 +++ b/arch/parisc/kernel/traps.c
3478 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3479
3480 down_read(&current->mm->mmap_sem);
3481 vma = find_vma(current->mm,regs->iaoq[0]);
3482 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3483 - && (vma->vm_flags & VM_EXEC)) {
3484 -
3485 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3486 fault_address = regs->iaoq[0];
3487 fault_space = regs->iasq[0];
3488
3489 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3490 index 18162ce..94de376 100644
3491 --- a/arch/parisc/mm/fault.c
3492 +++ b/arch/parisc/mm/fault.c
3493 @@ -15,6 +15,7 @@
3494 #include <linux/sched.h>
3495 #include <linux/interrupt.h>
3496 #include <linux/module.h>
3497 +#include <linux/unistd.h>
3498
3499 #include <asm/uaccess.h>
3500 #include <asm/traps.h>
3501 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3502 static unsigned long
3503 parisc_acctyp(unsigned long code, unsigned int inst)
3504 {
3505 - if (code == 6 || code == 16)
3506 + if (code == 6 || code == 7 || code == 16)
3507 return VM_EXEC;
3508
3509 switch (inst & 0xf0000000) {
3510 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3511 }
3512 #endif
3513
3514 +#ifdef CONFIG_PAX_PAGEEXEC
3515 +/*
3516 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3517 + *
3518 + * returns 1 when task should be killed
3519 + * 2 when rt_sigreturn trampoline was detected
3520 + * 3 when unpatched PLT trampoline was detected
3521 + */
3522 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3523 +{
3524 +
3525 +#ifdef CONFIG_PAX_EMUPLT
3526 + int err;
3527 +
3528 + do { /* PaX: unpatched PLT emulation */
3529 + unsigned int bl, depwi;
3530 +
3531 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3532 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3533 +
3534 + if (err)
3535 + break;
3536 +
3537 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3538 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3539 +
3540 + err = get_user(ldw, (unsigned int *)addr);
3541 + err |= get_user(bv, (unsigned int *)(addr+4));
3542 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3543 +
3544 + if (err)
3545 + break;
3546 +
3547 + if (ldw == 0x0E801096U &&
3548 + bv == 0xEAC0C000U &&
3549 + ldw2 == 0x0E881095U)
3550 + {
3551 + unsigned int resolver, map;
3552 +
3553 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3554 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3555 + if (err)
3556 + break;
3557 +
3558 + regs->gr[20] = instruction_pointer(regs)+8;
3559 + regs->gr[21] = map;
3560 + regs->gr[22] = resolver;
3561 + regs->iaoq[0] = resolver | 3UL;
3562 + regs->iaoq[1] = regs->iaoq[0] + 4;
3563 + return 3;
3564 + }
3565 + }
3566 + } while (0);
3567 +#endif
3568 +
3569 +#ifdef CONFIG_PAX_EMUTRAMP
3570 +
3571 +#ifndef CONFIG_PAX_EMUSIGRT
3572 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3573 + return 1;
3574 +#endif
3575 +
3576 + do { /* PaX: rt_sigreturn emulation */
3577 + unsigned int ldi1, ldi2, bel, nop;
3578 +
3579 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3580 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3581 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3582 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3583 +
3584 + if (err)
3585 + break;
3586 +
3587 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3588 + ldi2 == 0x3414015AU &&
3589 + bel == 0xE4008200U &&
3590 + nop == 0x08000240U)
3591 + {
3592 + regs->gr[25] = (ldi1 & 2) >> 1;
3593 + regs->gr[20] = __NR_rt_sigreturn;
3594 + regs->gr[31] = regs->iaoq[1] + 16;
3595 + regs->sr[0] = regs->iasq[1];
3596 + regs->iaoq[0] = 0x100UL;
3597 + regs->iaoq[1] = regs->iaoq[0] + 4;
3598 + regs->iasq[0] = regs->sr[2];
3599 + regs->iasq[1] = regs->sr[2];
3600 + return 2;
3601 + }
3602 + } while (0);
3603 +#endif
3604 +
3605 + return 1;
3606 +}
3607 +
3608 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3609 +{
3610 + unsigned long i;
3611 +
3612 + printk(KERN_ERR "PAX: bytes at PC: ");
3613 + for (i = 0; i < 5; i++) {
3614 + unsigned int c;
3615 + if (get_user(c, (unsigned int *)pc+i))
3616 + printk(KERN_CONT "???????? ");
3617 + else
3618 + printk(KERN_CONT "%08x ", c);
3619 + }
3620 + printk("\n");
3621 +}
3622 +#endif
3623 +
3624 int fixup_exception(struct pt_regs *regs)
3625 {
3626 const struct exception_table_entry *fix;
3627 @@ -192,8 +303,33 @@ good_area:
3628
3629 acc_type = parisc_acctyp(code,regs->iir);
3630
3631 - if ((vma->vm_flags & acc_type) != acc_type)
3632 + if ((vma->vm_flags & acc_type) != acc_type) {
3633 +
3634 +#ifdef CONFIG_PAX_PAGEEXEC
3635 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3636 + (address & ~3UL) == instruction_pointer(regs))
3637 + {
3638 + up_read(&mm->mmap_sem);
3639 + switch (pax_handle_fetch_fault(regs)) {
3640 +
3641 +#ifdef CONFIG_PAX_EMUPLT
3642 + case 3:
3643 + return;
3644 +#endif
3645 +
3646 +#ifdef CONFIG_PAX_EMUTRAMP
3647 + case 2:
3648 + return;
3649 +#endif
3650 +
3651 + }
3652 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3653 + do_group_exit(SIGKILL);
3654 + }
3655 +#endif
3656 +
3657 goto bad_area;
3658 + }
3659
3660 /*
3661 * If for any reason at all we couldn't handle the fault, make
3662 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3663 index da29032..f76c24c 100644
3664 --- a/arch/powerpc/include/asm/atomic.h
3665 +++ b/arch/powerpc/include/asm/atomic.h
3666 @@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3667 return t1;
3668 }
3669
3670 +#define atomic64_read_unchecked(v) atomic64_read(v)
3671 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3672 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3673 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3674 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3675 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3676 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3677 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3678 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3679 +
3680 #endif /* __powerpc64__ */
3681
3682 #endif /* __KERNEL__ */
3683 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3684 index 9e495c9..b6878e5 100644
3685 --- a/arch/powerpc/include/asm/cache.h
3686 +++ b/arch/powerpc/include/asm/cache.h
3687 @@ -3,6 +3,7 @@
3688
3689 #ifdef __KERNEL__
3690
3691 +#include <linux/const.h>
3692
3693 /* bytes per L1 cache line */
3694 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3695 @@ -22,7 +23,7 @@
3696 #define L1_CACHE_SHIFT 7
3697 #endif
3698
3699 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3700 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3701
3702 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3703
3704 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3705 index 3bf9cca..e7457d0 100644
3706 --- a/arch/powerpc/include/asm/elf.h
3707 +++ b/arch/powerpc/include/asm/elf.h
3708 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3709 the loader. We need to make sure that it is out of the way of the program
3710 that it will "exec", and that there is sufficient room for the brk. */
3711
3712 -extern unsigned long randomize_et_dyn(unsigned long base);
3713 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3714 +#define ELF_ET_DYN_BASE (0x20000000)
3715 +
3716 +#ifdef CONFIG_PAX_ASLR
3717 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3718 +
3719 +#ifdef __powerpc64__
3720 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3721 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3722 +#else
3723 +#define PAX_DELTA_MMAP_LEN 15
3724 +#define PAX_DELTA_STACK_LEN 15
3725 +#endif
3726 +#endif
3727
3728 /*
3729 * Our registers are always unsigned longs, whether we're a 32 bit
3730 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3731 (0x7ff >> (PAGE_SHIFT - 12)) : \
3732 (0x3ffff >> (PAGE_SHIFT - 12)))
3733
3734 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3735 -#define arch_randomize_brk arch_randomize_brk
3736 -
3737 #endif /* __KERNEL__ */
3738
3739 /*
3740 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3741 index 8196e9c..d83a9f3 100644
3742 --- a/arch/powerpc/include/asm/exec.h
3743 +++ b/arch/powerpc/include/asm/exec.h
3744 @@ -4,6 +4,6 @@
3745 #ifndef _ASM_POWERPC_EXEC_H
3746 #define _ASM_POWERPC_EXEC_H
3747
3748 -extern unsigned long arch_align_stack(unsigned long sp);
3749 +#define arch_align_stack(x) ((x) & ~0xfUL)
3750
3751 #endif /* _ASM_POWERPC_EXEC_H */
3752 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3753 index bca8fdc..61e9580 100644
3754 --- a/arch/powerpc/include/asm/kmap_types.h
3755 +++ b/arch/powerpc/include/asm/kmap_types.h
3756 @@ -27,6 +27,7 @@ enum km_type {
3757 KM_PPC_SYNC_PAGE,
3758 KM_PPC_SYNC_ICACHE,
3759 KM_KDB,
3760 + KM_CLEARPAGE,
3761 KM_TYPE_NR
3762 };
3763
3764 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3765 index d4a7f64..451de1c 100644
3766 --- a/arch/powerpc/include/asm/mman.h
3767 +++ b/arch/powerpc/include/asm/mman.h
3768 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3769 }
3770 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3771
3772 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3773 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3774 {
3775 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3776 }
3777 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3778 index f072e97..b436dee 100644
3779 --- a/arch/powerpc/include/asm/page.h
3780 +++ b/arch/powerpc/include/asm/page.h
3781 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3782 * and needs to be executable. This means the whole heap ends
3783 * up being executable.
3784 */
3785 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3786 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3787 +#define VM_DATA_DEFAULT_FLAGS32 \
3788 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3789 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3790
3791 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3792 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3793 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3794 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3795 #endif
3796
3797 +#define ktla_ktva(addr) (addr)
3798 +#define ktva_ktla(addr) (addr)
3799 +
3800 /*
3801 * Use the top bit of the higher-level page table entries to indicate whether
3802 * the entries we point to contain hugepages. This works because we know that
3803 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3804 index fed85e6..da5c71b 100644
3805 --- a/arch/powerpc/include/asm/page_64.h
3806 +++ b/arch/powerpc/include/asm/page_64.h
3807 @@ -146,15 +146,18 @@ do { \
3808 * stack by default, so in the absence of a PT_GNU_STACK program header
3809 * we turn execute permission off.
3810 */
3811 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3812 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3813 +#define VM_STACK_DEFAULT_FLAGS32 \
3814 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3815 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3816
3817 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3818 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3819
3820 +#ifndef CONFIG_PAX_PAGEEXEC
3821 #define VM_STACK_DEFAULT_FLAGS \
3822 (is_32bit_task() ? \
3823 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3824 +#endif
3825
3826 #include <asm-generic/getorder.h>
3827
3828 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3829 index 292725c..f87ae14 100644
3830 --- a/arch/powerpc/include/asm/pgalloc-64.h
3831 +++ b/arch/powerpc/include/asm/pgalloc-64.h
3832 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3833 #ifndef CONFIG_PPC_64K_PAGES
3834
3835 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3836 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3837
3838 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3839 {
3840 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3841 pud_set(pud, (unsigned long)pmd);
3842 }
3843
3844 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3845 +{
3846 + pud_populate(mm, pud, pmd);
3847 +}
3848 +
3849 #define pmd_populate(mm, pmd, pte_page) \
3850 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3851 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3852 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3853 #else /* CONFIG_PPC_64K_PAGES */
3854
3855 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3856 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3857
3858 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3859 pte_t *pte)
3860 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3861 index 2e0e411..7899c68 100644
3862 --- a/arch/powerpc/include/asm/pgtable.h
3863 +++ b/arch/powerpc/include/asm/pgtable.h
3864 @@ -2,6 +2,7 @@
3865 #define _ASM_POWERPC_PGTABLE_H
3866 #ifdef __KERNEL__
3867
3868 +#include <linux/const.h>
3869 #ifndef __ASSEMBLY__
3870 #include <asm/processor.h> /* For TASK_SIZE */
3871 #include <asm/mmu.h>
3872 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3873 index 4aad413..85d86bf 100644
3874 --- a/arch/powerpc/include/asm/pte-hash32.h
3875 +++ b/arch/powerpc/include/asm/pte-hash32.h
3876 @@ -21,6 +21,7 @@
3877 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3878 #define _PAGE_USER 0x004 /* usermode access allowed */
3879 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3880 +#define _PAGE_EXEC _PAGE_GUARDED
3881 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3882 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3883 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3884 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3885 index 9d7f0fb..a28fe69 100644
3886 --- a/arch/powerpc/include/asm/reg.h
3887 +++ b/arch/powerpc/include/asm/reg.h
3888 @@ -212,6 +212,7 @@
3889 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3890 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3891 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3892 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3893 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3894 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3895 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3896 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3897 index 4a741c7..c8162227b 100644
3898 --- a/arch/powerpc/include/asm/thread_info.h
3899 +++ b/arch/powerpc/include/asm/thread_info.h
3900 @@ -104,12 +104,14 @@ static inline struct thread_info *current_thread_info(void)
3901 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3902 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3903 #define TIF_SINGLESTEP 8 /* singlestepping active */
3904 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3905 #define TIF_SECCOMP 10 /* secure computing */
3906 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3907 #define TIF_NOERROR 12 /* Force successful syscall return */
3908 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3909 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3910 +#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3911 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3912 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3913
3914 /* as above, but as bit values */
3915 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3916 @@ -127,8 +129,11 @@ static inline struct thread_info *current_thread_info(void)
3917 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3918 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3919 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3920 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3921 +
3922 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3923 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3924 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3925 + _TIF_GRSEC_SETXID)
3926
3927 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3928 _TIF_NOTIFY_RESUME)
3929 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3930 index bd0fb84..a42a14b 100644
3931 --- a/arch/powerpc/include/asm/uaccess.h
3932 +++ b/arch/powerpc/include/asm/uaccess.h
3933 @@ -13,6 +13,8 @@
3934 #define VERIFY_READ 0
3935 #define VERIFY_WRITE 1
3936
3937 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3938 +
3939 /*
3940 * The fs value determines whether argument validity checking should be
3941 * performed or not. If get_fs() == USER_DS, checking is performed, with
3942 @@ -327,52 +329,6 @@ do { \
3943 extern unsigned long __copy_tofrom_user(void __user *to,
3944 const void __user *from, unsigned long size);
3945
3946 -#ifndef __powerpc64__
3947 -
3948 -static inline unsigned long copy_from_user(void *to,
3949 - const void __user *from, unsigned long n)
3950 -{
3951 - unsigned long over;
3952 -
3953 - if (access_ok(VERIFY_READ, from, n))
3954 - return __copy_tofrom_user((__force void __user *)to, from, n);
3955 - if ((unsigned long)from < TASK_SIZE) {
3956 - over = (unsigned long)from + n - TASK_SIZE;
3957 - return __copy_tofrom_user((__force void __user *)to, from,
3958 - n - over) + over;
3959 - }
3960 - return n;
3961 -}
3962 -
3963 -static inline unsigned long copy_to_user(void __user *to,
3964 - const void *from, unsigned long n)
3965 -{
3966 - unsigned long over;
3967 -
3968 - if (access_ok(VERIFY_WRITE, to, n))
3969 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3970 - if ((unsigned long)to < TASK_SIZE) {
3971 - over = (unsigned long)to + n - TASK_SIZE;
3972 - return __copy_tofrom_user(to, (__force void __user *)from,
3973 - n - over) + over;
3974 - }
3975 - return n;
3976 -}
3977 -
3978 -#else /* __powerpc64__ */
3979 -
3980 -#define __copy_in_user(to, from, size) \
3981 - __copy_tofrom_user((to), (from), (size))
3982 -
3983 -extern unsigned long copy_from_user(void *to, const void __user *from,
3984 - unsigned long n);
3985 -extern unsigned long copy_to_user(void __user *to, const void *from,
3986 - unsigned long n);
3987 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3988 - unsigned long n);
3989 -
3990 -#endif /* __powerpc64__ */
3991 -
3992 static inline unsigned long __copy_from_user_inatomic(void *to,
3993 const void __user *from, unsigned long n)
3994 {
3995 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3996 if (ret == 0)
3997 return 0;
3998 }
3999 +
4000 + if (!__builtin_constant_p(n))
4001 + check_object_size(to, n, false);
4002 +
4003 return __copy_tofrom_user((__force void __user *)to, from, n);
4004 }
4005
4006 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4007 if (ret == 0)
4008 return 0;
4009 }
4010 +
4011 + if (!__builtin_constant_p(n))
4012 + check_object_size(from, n, true);
4013 +
4014 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4015 }
4016
4017 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4018 return __copy_to_user_inatomic(to, from, size);
4019 }
4020
4021 +#ifndef __powerpc64__
4022 +
4023 +static inline unsigned long __must_check copy_from_user(void *to,
4024 + const void __user *from, unsigned long n)
4025 +{
4026 + unsigned long over;
4027 +
4028 + if ((long)n < 0)
4029 + return n;
4030 +
4031 + if (access_ok(VERIFY_READ, from, n)) {
4032 + if (!__builtin_constant_p(n))
4033 + check_object_size(to, n, false);
4034 + return __copy_tofrom_user((__force void __user *)to, from, n);
4035 + }
4036 + if ((unsigned long)from < TASK_SIZE) {
4037 + over = (unsigned long)from + n - TASK_SIZE;
4038 + if (!__builtin_constant_p(n - over))
4039 + check_object_size(to, n - over, false);
4040 + return __copy_tofrom_user((__force void __user *)to, from,
4041 + n - over) + over;
4042 + }
4043 + return n;
4044 +}
4045 +
4046 +static inline unsigned long __must_check copy_to_user(void __user *to,
4047 + const void *from, unsigned long n)
4048 +{
4049 + unsigned long over;
4050 +
4051 + if ((long)n < 0)
4052 + return n;
4053 +
4054 + if (access_ok(VERIFY_WRITE, to, n)) {
4055 + if (!__builtin_constant_p(n))
4056 + check_object_size(from, n, true);
4057 + return __copy_tofrom_user(to, (__force void __user *)from, n);
4058 + }
4059 + if ((unsigned long)to < TASK_SIZE) {
4060 + over = (unsigned long)to + n - TASK_SIZE;
4061 + if (!__builtin_constant_p(n))
4062 + check_object_size(from, n - over, true);
4063 + return __copy_tofrom_user(to, (__force void __user *)from,
4064 + n - over) + over;
4065 + }
4066 + return n;
4067 +}
4068 +
4069 +#else /* __powerpc64__ */
4070 +
4071 +#define __copy_in_user(to, from, size) \
4072 + __copy_tofrom_user((to), (from), (size))
4073 +
4074 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4075 +{
4076 + if ((long)n < 0 || n > INT_MAX)
4077 + return n;
4078 +
4079 + if (!__builtin_constant_p(n))
4080 + check_object_size(to, n, false);
4081 +
4082 + if (likely(access_ok(VERIFY_READ, from, n)))
4083 + n = __copy_from_user(to, from, n);
4084 + else
4085 + memset(to, 0, n);
4086 + return n;
4087 +}
4088 +
4089 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4090 +{
4091 + if ((long)n < 0 || n > INT_MAX)
4092 + return n;
4093 +
4094 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
4095 + if (!__builtin_constant_p(n))
4096 + check_object_size(from, n, true);
4097 + n = __copy_to_user(to, from, n);
4098 + }
4099 + return n;
4100 +}
4101 +
4102 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
4103 + unsigned long n);
4104 +
4105 +#endif /* __powerpc64__ */
4106 +
4107 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4108
4109 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4110 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4111 index 7215cc2..a9730c1 100644
4112 --- a/arch/powerpc/kernel/exceptions-64e.S
4113 +++ b/arch/powerpc/kernel/exceptions-64e.S
4114 @@ -661,6 +661,7 @@ storage_fault_common:
4115 std r14,_DAR(r1)
4116 std r15,_DSISR(r1)
4117 addi r3,r1,STACK_FRAME_OVERHEAD
4118 + bl .save_nvgprs
4119 mr r4,r14
4120 mr r5,r15
4121 ld r14,PACA_EXGEN+EX_R14(r13)
4122 @@ -669,8 +670,7 @@ storage_fault_common:
4123 cmpdi r3,0
4124 bne- 1f
4125 b .ret_from_except_lite
4126 -1: bl .save_nvgprs
4127 - mr r5,r3
4128 +1: mr r5,r3
4129 addi r3,r1,STACK_FRAME_OVERHEAD
4130 ld r4,_DAR(r1)
4131 bl .bad_page_fault
4132 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4133 index 8f880bc..c5bd2f3 100644
4134 --- a/arch/powerpc/kernel/exceptions-64s.S
4135 +++ b/arch/powerpc/kernel/exceptions-64s.S
4136 @@ -890,10 +890,10 @@ handle_page_fault:
4137 11: ld r4,_DAR(r1)
4138 ld r5,_DSISR(r1)
4139 addi r3,r1,STACK_FRAME_OVERHEAD
4140 + bl .save_nvgprs
4141 bl .do_page_fault
4142 cmpdi r3,0
4143 beq+ 12f
4144 - bl .save_nvgprs
4145 mr r5,r3
4146 addi r3,r1,STACK_FRAME_OVERHEAD
4147 lwz r4,_DAR(r1)
4148 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4149 index 0b6d796..d760ddb 100644
4150 --- a/arch/powerpc/kernel/module_32.c
4151 +++ b/arch/powerpc/kernel/module_32.c
4152 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4153 me->arch.core_plt_section = i;
4154 }
4155 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4156 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4157 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4158 return -ENOEXEC;
4159 }
4160
4161 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4162
4163 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4164 /* Init, or core PLT? */
4165 - if (location >= mod->module_core
4166 - && location < mod->module_core + mod->core_size)
4167 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4168 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4169 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4170 - else
4171 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4172 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4173 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4174 + else {
4175 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4176 + return ~0UL;
4177 + }
4178
4179 /* Find this entry, or if that fails, the next avail. entry */
4180 while (entry->jump[0]) {
4181 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4182 index 4937c96..70714b7 100644
4183 --- a/arch/powerpc/kernel/process.c
4184 +++ b/arch/powerpc/kernel/process.c
4185 @@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4186 * Lookup NIP late so we have the best change of getting the
4187 * above info out without failing
4188 */
4189 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4190 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4191 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4192 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4193 #endif
4194 show_stack(current, (unsigned long *) regs->gpr[1]);
4195 if (!user_mode(regs))
4196 @@ -1186,10 +1186,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4197 newsp = stack[0];
4198 ip = stack[STACK_FRAME_LR_SAVE];
4199 if (!firstframe || ip != lr) {
4200 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4201 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4202 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4203 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4204 - printk(" (%pS)",
4205 + printk(" (%pA)",
4206 (void *)current->ret_stack[curr_frame].ret);
4207 curr_frame--;
4208 }
4209 @@ -1209,7 +1209,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4210 struct pt_regs *regs = (struct pt_regs *)
4211 (sp + STACK_FRAME_OVERHEAD);
4212 lr = regs->link;
4213 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4214 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4215 regs->trap, (void *)regs->nip, (void *)lr);
4216 firstframe = 1;
4217 }
4218 @@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
4219 }
4220
4221 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4222 -
4223 -unsigned long arch_align_stack(unsigned long sp)
4224 -{
4225 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4226 - sp -= get_random_int() & ~PAGE_MASK;
4227 - return sp & ~0xf;
4228 -}
4229 -
4230 -static inline unsigned long brk_rnd(void)
4231 -{
4232 - unsigned long rnd = 0;
4233 -
4234 - /* 8MB for 32bit, 1GB for 64bit */
4235 - if (is_32bit_task())
4236 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4237 - else
4238 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4239 -
4240 - return rnd << PAGE_SHIFT;
4241 -}
4242 -
4243 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4244 -{
4245 - unsigned long base = mm->brk;
4246 - unsigned long ret;
4247 -
4248 -#ifdef CONFIG_PPC_STD_MMU_64
4249 - /*
4250 - * If we are using 1TB segments and we are allowed to randomise
4251 - * the heap, we can put it above 1TB so it is backed by a 1TB
4252 - * segment. Otherwise the heap will be in the bottom 1TB
4253 - * which always uses 256MB segments and this may result in a
4254 - * performance penalty.
4255 - */
4256 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4257 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4258 -#endif
4259 -
4260 - ret = PAGE_ALIGN(base + brk_rnd());
4261 -
4262 - if (ret < mm->brk)
4263 - return mm->brk;
4264 -
4265 - return ret;
4266 -}
4267 -
4268 -unsigned long randomize_et_dyn(unsigned long base)
4269 -{
4270 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4271 -
4272 - if (ret < base)
4273 - return base;
4274 -
4275 - return ret;
4276 -}
4277 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4278 index 8d8e028..c2aeb50 100644
4279 --- a/arch/powerpc/kernel/ptrace.c
4280 +++ b/arch/powerpc/kernel/ptrace.c
4281 @@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4282 return ret;
4283 }
4284
4285 +#ifdef CONFIG_GRKERNSEC_SETXID
4286 +extern void gr_delayed_cred_worker(void);
4287 +#endif
4288 +
4289 /*
4290 * We must return the syscall number to actually look up in the table.
4291 * This can be -1L to skip running any syscall at all.
4292 @@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4293
4294 secure_computing(regs->gpr[0]);
4295
4296 +#ifdef CONFIG_GRKERNSEC_SETXID
4297 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4298 + gr_delayed_cred_worker();
4299 +#endif
4300 +
4301 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4302 tracehook_report_syscall_entry(regs))
4303 /*
4304 @@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4305 {
4306 int step;
4307
4308 +#ifdef CONFIG_GRKERNSEC_SETXID
4309 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4310 + gr_delayed_cred_worker();
4311 +#endif
4312 +
4313 audit_syscall_exit(regs);
4314
4315 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4316 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4317 index 45eb998..0cb36bc 100644
4318 --- a/arch/powerpc/kernel/signal_32.c
4319 +++ b/arch/powerpc/kernel/signal_32.c
4320 @@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4321 /* Save user registers on the stack */
4322 frame = &rt_sf->uc.uc_mcontext;
4323 addr = frame;
4324 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4325 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4326 if (save_user_regs(regs, frame, 0, 1))
4327 goto badframe;
4328 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4329 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4330 index 2692efd..6673d2e 100644
4331 --- a/arch/powerpc/kernel/signal_64.c
4332 +++ b/arch/powerpc/kernel/signal_64.c
4333 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4334 current->thread.fpscr.val = 0;
4335
4336 /* Set up to return from userspace. */
4337 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4338 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4339 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4340 } else {
4341 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4342 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4343 index 1589723..cefe690 100644
4344 --- a/arch/powerpc/kernel/traps.c
4345 +++ b/arch/powerpc/kernel/traps.c
4346 @@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4347 return flags;
4348 }
4349
4350 +extern void gr_handle_kernel_exploit(void);
4351 +
4352 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4353 int signr)
4354 {
4355 @@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4356 panic("Fatal exception in interrupt");
4357 if (panic_on_oops)
4358 panic("Fatal exception");
4359 +
4360 + gr_handle_kernel_exploit();
4361 +
4362 do_exit(signr);
4363 }
4364
4365 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4366 index 9eb5b9b..e45498a 100644
4367 --- a/arch/powerpc/kernel/vdso.c
4368 +++ b/arch/powerpc/kernel/vdso.c
4369 @@ -34,6 +34,7 @@
4370 #include <asm/firmware.h>
4371 #include <asm/vdso.h>
4372 #include <asm/vdso_datapage.h>
4373 +#include <asm/mman.h>
4374
4375 #include "setup.h"
4376
4377 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4378 vdso_base = VDSO32_MBASE;
4379 #endif
4380
4381 - current->mm->context.vdso_base = 0;
4382 + current->mm->context.vdso_base = ~0UL;
4383
4384 /* vDSO has a problem and was disabled, just don't "enable" it for the
4385 * process
4386 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4387 vdso_base = get_unmapped_area(NULL, vdso_base,
4388 (vdso_pages << PAGE_SHIFT) +
4389 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4390 - 0, 0);
4391 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4392 if (IS_ERR_VALUE(vdso_base)) {
4393 rc = vdso_base;
4394 goto fail_mmapsem;
4395 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4396 index 5eea6f3..5d10396 100644
4397 --- a/arch/powerpc/lib/usercopy_64.c
4398 +++ b/arch/powerpc/lib/usercopy_64.c
4399 @@ -9,22 +9,6 @@
4400 #include <linux/module.h>
4401 #include <asm/uaccess.h>
4402
4403 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4404 -{
4405 - if (likely(access_ok(VERIFY_READ, from, n)))
4406 - n = __copy_from_user(to, from, n);
4407 - else
4408 - memset(to, 0, n);
4409 - return n;
4410 -}
4411 -
4412 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4413 -{
4414 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4415 - n = __copy_to_user(to, from, n);
4416 - return n;
4417 -}
4418 -
4419 unsigned long copy_in_user(void __user *to, const void __user *from,
4420 unsigned long n)
4421 {
4422 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4423 return n;
4424 }
4425
4426 -EXPORT_SYMBOL(copy_from_user);
4427 -EXPORT_SYMBOL(copy_to_user);
4428 EXPORT_SYMBOL(copy_in_user);
4429
4430 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4431 index 08ffcf5..a0ab912 100644
4432 --- a/arch/powerpc/mm/fault.c
4433 +++ b/arch/powerpc/mm/fault.c
4434 @@ -32,6 +32,10 @@
4435 #include <linux/perf_event.h>
4436 #include <linux/magic.h>
4437 #include <linux/ratelimit.h>
4438 +#include <linux/slab.h>
4439 +#include <linux/pagemap.h>
4440 +#include <linux/compiler.h>
4441 +#include <linux/unistd.h>
4442
4443 #include <asm/firmware.h>
4444 #include <asm/page.h>
4445 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4446 }
4447 #endif
4448
4449 +#ifdef CONFIG_PAX_PAGEEXEC
4450 +/*
4451 + * PaX: decide what to do with offenders (regs->nip = fault address)
4452 + *
4453 + * returns 1 when task should be killed
4454 + */
4455 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4456 +{
4457 + return 1;
4458 +}
4459 +
4460 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4461 +{
4462 + unsigned long i;
4463 +
4464 + printk(KERN_ERR "PAX: bytes at PC: ");
4465 + for (i = 0; i < 5; i++) {
4466 + unsigned int c;
4467 + if (get_user(c, (unsigned int __user *)pc+i))
4468 + printk(KERN_CONT "???????? ");
4469 + else
4470 + printk(KERN_CONT "%08x ", c);
4471 + }
4472 + printk("\n");
4473 +}
4474 +#endif
4475 +
4476 /*
4477 * Check whether the instruction at regs->nip is a store using
4478 * an update addressing form which will update r1.
4479 @@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4480 * indicate errors in DSISR but can validly be set in SRR1.
4481 */
4482 if (trap == 0x400)
4483 - error_code &= 0x48200000;
4484 + error_code &= 0x58200000;
4485 else
4486 is_write = error_code & DSISR_ISSTORE;
4487 #else
4488 @@ -366,7 +397,7 @@ good_area:
4489 * "undefined". Of those that can be set, this is the only
4490 * one which seems bad.
4491 */
4492 - if (error_code & 0x10000000)
4493 + if (error_code & DSISR_GUARDED)
4494 /* Guarded storage error. */
4495 goto bad_area;
4496 #endif /* CONFIG_8xx */
4497 @@ -381,7 +412,7 @@ good_area:
4498 * processors use the same I/D cache coherency mechanism
4499 * as embedded.
4500 */
4501 - if (error_code & DSISR_PROTFAULT)
4502 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4503 goto bad_area;
4504 #endif /* CONFIG_PPC_STD_MMU */
4505
4506 @@ -463,6 +494,23 @@ bad_area:
4507 bad_area_nosemaphore:
4508 /* User mode accesses cause a SIGSEGV */
4509 if (user_mode(regs)) {
4510 +
4511 +#ifdef CONFIG_PAX_PAGEEXEC
4512 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4513 +#ifdef CONFIG_PPC_STD_MMU
4514 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4515 +#else
4516 + if (is_exec && regs->nip == address) {
4517 +#endif
4518 + switch (pax_handle_fetch_fault(regs)) {
4519 + }
4520 +
4521 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4522 + do_group_exit(SIGKILL);
4523 + }
4524 + }
4525 +#endif
4526 +
4527 _exception(SIGSEGV, regs, code, address);
4528 return 0;
4529 }
4530 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4531 index 67a42ed..1c7210c 100644
4532 --- a/arch/powerpc/mm/mmap_64.c
4533 +++ b/arch/powerpc/mm/mmap_64.c
4534 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4535 */
4536 if (mmap_is_legacy()) {
4537 mm->mmap_base = TASK_UNMAPPED_BASE;
4538 +
4539 +#ifdef CONFIG_PAX_RANDMMAP
4540 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4541 + mm->mmap_base += mm->delta_mmap;
4542 +#endif
4543 +
4544 mm->get_unmapped_area = arch_get_unmapped_area;
4545 mm->unmap_area = arch_unmap_area;
4546 } else {
4547 mm->mmap_base = mmap_base();
4548 +
4549 +#ifdef CONFIG_PAX_RANDMMAP
4550 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4551 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4552 +#endif
4553 +
4554 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4555 mm->unmap_area = arch_unmap_area_topdown;
4556 }
4557 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4558 index 73709f7..6b90313 100644
4559 --- a/arch/powerpc/mm/slice.c
4560 +++ b/arch/powerpc/mm/slice.c
4561 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4562 if ((mm->task_size - len) < addr)
4563 return 0;
4564 vma = find_vma(mm, addr);
4565 - return (!vma || (addr + len) <= vma->vm_start);
4566 + return check_heap_stack_gap(vma, addr, len);
4567 }
4568
4569 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4570 @@ -256,7 +256,7 @@ full_search:
4571 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4572 continue;
4573 }
4574 - if (!vma || addr + len <= vma->vm_start) {
4575 + if (check_heap_stack_gap(vma, addr, len)) {
4576 /*
4577 * Remember the place where we stopped the search:
4578 */
4579 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4580 }
4581 }
4582
4583 - addr = mm->mmap_base;
4584 - while (addr > len) {
4585 + if (mm->mmap_base < len)
4586 + addr = -ENOMEM;
4587 + else
4588 + addr = mm->mmap_base - len;
4589 +
4590 + while (!IS_ERR_VALUE(addr)) {
4591 /* Go down by chunk size */
4592 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4593 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4594
4595 /* Check for hit with different page size */
4596 mask = slice_range_to_mask(addr, len);
4597 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4598 * return with success:
4599 */
4600 vma = find_vma(mm, addr);
4601 - if (!vma || (addr + len) <= vma->vm_start) {
4602 + if (check_heap_stack_gap(vma, addr, len)) {
4603 /* remember the address as a hint for next time */
4604 if (use_cache)
4605 mm->free_area_cache = addr;
4606 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4607 mm->cached_hole_size = vma->vm_start - addr;
4608
4609 /* try just below the current vma->vm_start */
4610 - addr = vma->vm_start;
4611 + addr = skip_heap_stack_gap(vma, len);
4612 }
4613
4614 /*
4615 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4616 if (fixed && addr > (mm->task_size - len))
4617 return -EINVAL;
4618
4619 +#ifdef CONFIG_PAX_RANDMMAP
4620 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4621 + addr = 0;
4622 +#endif
4623 +
4624 /* If hint, make sure it matches our alignment restrictions */
4625 if (!fixed && addr) {
4626 addr = _ALIGN_UP(addr, 1ul << pshift);
4627 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4628 index 748347b..81bc6c7 100644
4629 --- a/arch/s390/include/asm/atomic.h
4630 +++ b/arch/s390/include/asm/atomic.h
4631 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4632 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4633 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4634
4635 +#define atomic64_read_unchecked(v) atomic64_read(v)
4636 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4637 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4638 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4639 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4640 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4641 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4642 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4643 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4644 +
4645 #define smp_mb__before_atomic_dec() smp_mb()
4646 #define smp_mb__after_atomic_dec() smp_mb()
4647 #define smp_mb__before_atomic_inc() smp_mb()
4648 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4649 index 2a30d5a..5e5586f 100644
4650 --- a/arch/s390/include/asm/cache.h
4651 +++ b/arch/s390/include/asm/cache.h
4652 @@ -11,8 +11,10 @@
4653 #ifndef __ARCH_S390_CACHE_H
4654 #define __ARCH_S390_CACHE_H
4655
4656 -#define L1_CACHE_BYTES 256
4657 +#include <linux/const.h>
4658 +
4659 #define L1_CACHE_SHIFT 8
4660 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4661 #define NET_SKB_PAD 32
4662
4663 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4664 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4665 index c4ee39f..352881b 100644
4666 --- a/arch/s390/include/asm/elf.h
4667 +++ b/arch/s390/include/asm/elf.h
4668 @@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4669 the loader. We need to make sure that it is out of the way of the program
4670 that it will "exec", and that there is sufficient room for the brk. */
4671
4672 -extern unsigned long randomize_et_dyn(unsigned long base);
4673 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4674 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4675 +
4676 +#ifdef CONFIG_PAX_ASLR
4677 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4678 +
4679 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4680 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4681 +#endif
4682
4683 /* This yields a mask that user programs can use to figure out what
4684 instruction set this CPU supports. */
4685 @@ -210,7 +216,4 @@ struct linux_binprm;
4686 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4687 int arch_setup_additional_pages(struct linux_binprm *, int);
4688
4689 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4690 -#define arch_randomize_brk arch_randomize_brk
4691 -
4692 #endif
4693 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4694 index c4a93d6..4d2a9b4 100644
4695 --- a/arch/s390/include/asm/exec.h
4696 +++ b/arch/s390/include/asm/exec.h
4697 @@ -7,6 +7,6 @@
4698 #ifndef __ASM_EXEC_H
4699 #define __ASM_EXEC_H
4700
4701 -extern unsigned long arch_align_stack(unsigned long sp);
4702 +#define arch_align_stack(x) ((x) & ~0xfUL)
4703
4704 #endif /* __ASM_EXEC_H */
4705 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4706 index 8f2cada..1cddd55 100644
4707 --- a/arch/s390/include/asm/uaccess.h
4708 +++ b/arch/s390/include/asm/uaccess.h
4709 @@ -236,6 +236,10 @@ static inline unsigned long __must_check
4710 copy_to_user(void __user *to, const void *from, unsigned long n)
4711 {
4712 might_fault();
4713 +
4714 + if ((long)n < 0)
4715 + return n;
4716 +
4717 if (access_ok(VERIFY_WRITE, to, n))
4718 n = __copy_to_user(to, from, n);
4719 return n;
4720 @@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4721 static inline unsigned long __must_check
4722 __copy_from_user(void *to, const void __user *from, unsigned long n)
4723 {
4724 + if ((long)n < 0)
4725 + return n;
4726 +
4727 if (__builtin_constant_p(n) && (n <= 256))
4728 return uaccess.copy_from_user_small(n, from, to);
4729 else
4730 @@ -295,6 +302,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4731 unsigned int sz = __compiletime_object_size(to);
4732
4733 might_fault();
4734 +
4735 + if ((long)n < 0)
4736 + return n;
4737 +
4738 if (unlikely(sz != -1 && sz < n)) {
4739 copy_from_user_overflow();
4740 return n;
4741 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4742 index dfcb343..eda788a 100644
4743 --- a/arch/s390/kernel/module.c
4744 +++ b/arch/s390/kernel/module.c
4745 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4746
4747 /* Increase core size by size of got & plt and set start
4748 offsets for got and plt. */
4749 - me->core_size = ALIGN(me->core_size, 4);
4750 - me->arch.got_offset = me->core_size;
4751 - me->core_size += me->arch.got_size;
4752 - me->arch.plt_offset = me->core_size;
4753 - me->core_size += me->arch.plt_size;
4754 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4755 + me->arch.got_offset = me->core_size_rw;
4756 + me->core_size_rw += me->arch.got_size;
4757 + me->arch.plt_offset = me->core_size_rx;
4758 + me->core_size_rx += me->arch.plt_size;
4759 return 0;
4760 }
4761
4762 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4763 if (info->got_initialized == 0) {
4764 Elf_Addr *gotent;
4765
4766 - gotent = me->module_core + me->arch.got_offset +
4767 + gotent = me->module_core_rw + me->arch.got_offset +
4768 info->got_offset;
4769 *gotent = val;
4770 info->got_initialized = 1;
4771 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4772 else if (r_type == R_390_GOTENT ||
4773 r_type == R_390_GOTPLTENT)
4774 *(unsigned int *) loc =
4775 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4776 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4777 else if (r_type == R_390_GOT64 ||
4778 r_type == R_390_GOTPLT64)
4779 *(unsigned long *) loc = val;
4780 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4781 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4782 if (info->plt_initialized == 0) {
4783 unsigned int *ip;
4784 - ip = me->module_core + me->arch.plt_offset +
4785 + ip = me->module_core_rx + me->arch.plt_offset +
4786 info->plt_offset;
4787 #ifndef CONFIG_64BIT
4788 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4789 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4790 val - loc + 0xffffUL < 0x1ffffeUL) ||
4791 (r_type == R_390_PLT32DBL &&
4792 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4793 - val = (Elf_Addr) me->module_core +
4794 + val = (Elf_Addr) me->module_core_rx +
4795 me->arch.plt_offset +
4796 info->plt_offset;
4797 val += rela->r_addend - loc;
4798 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4799 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4800 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4801 val = val + rela->r_addend -
4802 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4803 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4804 if (r_type == R_390_GOTOFF16)
4805 *(unsigned short *) loc = val;
4806 else if (r_type == R_390_GOTOFF32)
4807 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4808 break;
4809 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4810 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4811 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4812 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4813 rela->r_addend - loc;
4814 if (r_type == R_390_GOTPC)
4815 *(unsigned int *) loc = val;
4816 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4817 index 60055ce..ee4b252 100644
4818 --- a/arch/s390/kernel/process.c
4819 +++ b/arch/s390/kernel/process.c
4820 @@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4821 }
4822 return 0;
4823 }
4824 -
4825 -unsigned long arch_align_stack(unsigned long sp)
4826 -{
4827 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4828 - sp -= get_random_int() & ~PAGE_MASK;
4829 - return sp & ~0xf;
4830 -}
4831 -
4832 -static inline unsigned long brk_rnd(void)
4833 -{
4834 - /* 8MB for 32bit, 1GB for 64bit */
4835 - if (is_32bit_task())
4836 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4837 - else
4838 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4839 -}
4840 -
4841 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4842 -{
4843 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4844 -
4845 - if (ret < mm->brk)
4846 - return mm->brk;
4847 - return ret;
4848 -}
4849 -
4850 -unsigned long randomize_et_dyn(unsigned long base)
4851 -{
4852 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4853 -
4854 - if (!(current->flags & PF_RANDOMIZE))
4855 - return base;
4856 - if (ret < base)
4857 - return base;
4858 - return ret;
4859 -}
4860 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4861 index 2857c48..d047481 100644
4862 --- a/arch/s390/mm/mmap.c
4863 +++ b/arch/s390/mm/mmap.c
4864 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4865 */
4866 if (mmap_is_legacy()) {
4867 mm->mmap_base = TASK_UNMAPPED_BASE;
4868 +
4869 +#ifdef CONFIG_PAX_RANDMMAP
4870 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4871 + mm->mmap_base += mm->delta_mmap;
4872 +#endif
4873 +
4874 mm->get_unmapped_area = arch_get_unmapped_area;
4875 mm->unmap_area = arch_unmap_area;
4876 } else {
4877 mm->mmap_base = mmap_base();
4878 +
4879 +#ifdef CONFIG_PAX_RANDMMAP
4880 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4881 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4882 +#endif
4883 +
4884 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4885 mm->unmap_area = arch_unmap_area_topdown;
4886 }
4887 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4888 */
4889 if (mmap_is_legacy()) {
4890 mm->mmap_base = TASK_UNMAPPED_BASE;
4891 +
4892 +#ifdef CONFIG_PAX_RANDMMAP
4893 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4894 + mm->mmap_base += mm->delta_mmap;
4895 +#endif
4896 +
4897 mm->get_unmapped_area = s390_get_unmapped_area;
4898 mm->unmap_area = arch_unmap_area;
4899 } else {
4900 mm->mmap_base = mmap_base();
4901 +
4902 +#ifdef CONFIG_PAX_RANDMMAP
4903 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4904 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4905 +#endif
4906 +
4907 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4908 mm->unmap_area = arch_unmap_area_topdown;
4909 }
4910 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4911 index ae3d59f..f65f075 100644
4912 --- a/arch/score/include/asm/cache.h
4913 +++ b/arch/score/include/asm/cache.h
4914 @@ -1,7 +1,9 @@
4915 #ifndef _ASM_SCORE_CACHE_H
4916 #define _ASM_SCORE_CACHE_H
4917
4918 +#include <linux/const.h>
4919 +
4920 #define L1_CACHE_SHIFT 4
4921 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4922 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4923
4924 #endif /* _ASM_SCORE_CACHE_H */
4925 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4926 index f9f3cd5..58ff438 100644
4927 --- a/arch/score/include/asm/exec.h
4928 +++ b/arch/score/include/asm/exec.h
4929 @@ -1,6 +1,6 @@
4930 #ifndef _ASM_SCORE_EXEC_H
4931 #define _ASM_SCORE_EXEC_H
4932
4933 -extern unsigned long arch_align_stack(unsigned long sp);
4934 +#define arch_align_stack(x) (x)
4935
4936 #endif /* _ASM_SCORE_EXEC_H */
4937 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4938 index 2707023..1c2a3b7 100644
4939 --- a/arch/score/kernel/process.c
4940 +++ b/arch/score/kernel/process.c
4941 @@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
4942
4943 return task_pt_regs(task)->cp0_epc;
4944 }
4945 -
4946 -unsigned long arch_align_stack(unsigned long sp)
4947 -{
4948 - return sp;
4949 -}
4950 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4951 index ef9e555..331bd29 100644
4952 --- a/arch/sh/include/asm/cache.h
4953 +++ b/arch/sh/include/asm/cache.h
4954 @@ -9,10 +9,11 @@
4955 #define __ASM_SH_CACHE_H
4956 #ifdef __KERNEL__
4957
4958 +#include <linux/const.h>
4959 #include <linux/init.h>
4960 #include <cpu/cache.h>
4961
4962 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4963 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4964
4965 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4966
4967 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4968 index afeb710..d1d1289 100644
4969 --- a/arch/sh/mm/mmap.c
4970 +++ b/arch/sh/mm/mmap.c
4971 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4972 addr = PAGE_ALIGN(addr);
4973
4974 vma = find_vma(mm, addr);
4975 - if (TASK_SIZE - len >= addr &&
4976 - (!vma || addr + len <= vma->vm_start))
4977 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4978 return addr;
4979 }
4980
4981 @@ -106,7 +105,7 @@ full_search:
4982 }
4983 return -ENOMEM;
4984 }
4985 - if (likely(!vma || addr + len <= vma->vm_start)) {
4986 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4987 /*
4988 * Remember the place where we stopped the search:
4989 */
4990 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4991 addr = PAGE_ALIGN(addr);
4992
4993 vma = find_vma(mm, addr);
4994 - if (TASK_SIZE - len >= addr &&
4995 - (!vma || addr + len <= vma->vm_start))
4996 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4997 return addr;
4998 }
4999
5000 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5001 /* make sure it can fit in the remaining address space */
5002 if (likely(addr > len)) {
5003 vma = find_vma(mm, addr-len);
5004 - if (!vma || addr <= vma->vm_start) {
5005 + if (check_heap_stack_gap(vma, addr - len, len)) {
5006 /* remember the address as a hint for next time */
5007 return (mm->free_area_cache = addr-len);
5008 }
5009 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5010 if (unlikely(mm->mmap_base < len))
5011 goto bottomup;
5012
5013 - addr = mm->mmap_base-len;
5014 - if (do_colour_align)
5015 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5016 + addr = mm->mmap_base - len;
5017
5018 do {
5019 + if (do_colour_align)
5020 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5021 /*
5022 * Lookup failure means no vma is above this address,
5023 * else if new region fits below vma->vm_start,
5024 * return with success:
5025 */
5026 vma = find_vma(mm, addr);
5027 - if (likely(!vma || addr+len <= vma->vm_start)) {
5028 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5029 /* remember the address as a hint for next time */
5030 return (mm->free_area_cache = addr);
5031 }
5032 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5033 mm->cached_hole_size = vma->vm_start - addr;
5034
5035 /* try just below the current vma->vm_start */
5036 - addr = vma->vm_start-len;
5037 - if (do_colour_align)
5038 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5039 - } while (likely(len < vma->vm_start));
5040 + addr = skip_heap_stack_gap(vma, len);
5041 + } while (!IS_ERR_VALUE(addr));
5042
5043 bottomup:
5044 /*
5045 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5046 index eddcfb3..b117d90 100644
5047 --- a/arch/sparc/Makefile
5048 +++ b/arch/sparc/Makefile
5049 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5050 # Export what is needed by arch/sparc/boot/Makefile
5051 export VMLINUX_INIT VMLINUX_MAIN
5052 VMLINUX_INIT := $(head-y) $(init-y)
5053 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5054 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5055 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5056 VMLINUX_MAIN += $(drivers-y) $(net-y)
5057
5058 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5059 index ce35a1c..2e7b8f9 100644
5060 --- a/arch/sparc/include/asm/atomic_64.h
5061 +++ b/arch/sparc/include/asm/atomic_64.h
5062 @@ -14,18 +14,40 @@
5063 #define ATOMIC64_INIT(i) { (i) }
5064
5065 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5066 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5067 +{
5068 + return v->counter;
5069 +}
5070 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5071 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5072 +{
5073 + return v->counter;
5074 +}
5075
5076 #define atomic_set(v, i) (((v)->counter) = i)
5077 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5078 +{
5079 + v->counter = i;
5080 +}
5081 #define atomic64_set(v, i) (((v)->counter) = i)
5082 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5083 +{
5084 + v->counter = i;
5085 +}
5086
5087 extern void atomic_add(int, atomic_t *);
5088 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5089 extern void atomic64_add(long, atomic64_t *);
5090 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5091 extern void atomic_sub(int, atomic_t *);
5092 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5093 extern void atomic64_sub(long, atomic64_t *);
5094 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5095
5096 extern int atomic_add_ret(int, atomic_t *);
5097 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5098 extern long atomic64_add_ret(long, atomic64_t *);
5099 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5100 extern int atomic_sub_ret(int, atomic_t *);
5101 extern long atomic64_sub_ret(long, atomic64_t *);
5102
5103 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5104 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5105
5106 #define atomic_inc_return(v) atomic_add_ret(1, v)
5107 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5108 +{
5109 + return atomic_add_ret_unchecked(1, v);
5110 +}
5111 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5112 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5113 +{
5114 + return atomic64_add_ret_unchecked(1, v);
5115 +}
5116
5117 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5118 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5119
5120 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5121 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5122 +{
5123 + return atomic_add_ret_unchecked(i, v);
5124 +}
5125 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5126 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5127 +{
5128 + return atomic64_add_ret_unchecked(i, v);
5129 +}
5130
5131 /*
5132 * atomic_inc_and_test - increment and test
5133 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5134 * other cases.
5135 */
5136 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5137 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5138 +{
5139 + return atomic_inc_return_unchecked(v) == 0;
5140 +}
5141 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5142
5143 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5144 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5145 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5146
5147 #define atomic_inc(v) atomic_add(1, v)
5148 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5149 +{
5150 + atomic_add_unchecked(1, v);
5151 +}
5152 #define atomic64_inc(v) atomic64_add(1, v)
5153 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5154 +{
5155 + atomic64_add_unchecked(1, v);
5156 +}
5157
5158 #define atomic_dec(v) atomic_sub(1, v)
5159 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5160 +{
5161 + atomic_sub_unchecked(1, v);
5162 +}
5163 #define atomic64_dec(v) atomic64_sub(1, v)
5164 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5165 +{
5166 + atomic64_sub_unchecked(1, v);
5167 +}
5168
5169 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5170 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5171
5172 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5173 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5174 +{
5175 + return cmpxchg(&v->counter, old, new);
5176 +}
5177 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5178 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5179 +{
5180 + return xchg(&v->counter, new);
5181 +}
5182
5183 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5184 {
5185 - int c, old;
5186 + int c, old, new;
5187 c = atomic_read(v);
5188 for (;;) {
5189 - if (unlikely(c == (u)))
5190 + if (unlikely(c == u))
5191 break;
5192 - old = atomic_cmpxchg((v), c, c + (a));
5193 +
5194 + asm volatile("addcc %2, %0, %0\n"
5195 +
5196 +#ifdef CONFIG_PAX_REFCOUNT
5197 + "tvs %%icc, 6\n"
5198 +#endif
5199 +
5200 + : "=r" (new)
5201 + : "0" (c), "ir" (a)
5202 + : "cc");
5203 +
5204 + old = atomic_cmpxchg(v, c, new);
5205 if (likely(old == c))
5206 break;
5207 c = old;
5208 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5209 #define atomic64_cmpxchg(v, o, n) \
5210 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5211 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5212 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5213 +{
5214 + return xchg(&v->counter, new);
5215 +}
5216
5217 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5218 {
5219 - long c, old;
5220 + long c, old, new;
5221 c = atomic64_read(v);
5222 for (;;) {
5223 - if (unlikely(c == (u)))
5224 + if (unlikely(c == u))
5225 break;
5226 - old = atomic64_cmpxchg((v), c, c + (a));
5227 +
5228 + asm volatile("addcc %2, %0, %0\n"
5229 +
5230 +#ifdef CONFIG_PAX_REFCOUNT
5231 + "tvs %%xcc, 6\n"
5232 +#endif
5233 +
5234 + : "=r" (new)
5235 + : "0" (c), "ir" (a)
5236 + : "cc");
5237 +
5238 + old = atomic64_cmpxchg(v, c, new);
5239 if (likely(old == c))
5240 break;
5241 c = old;
5242 }
5243 - return c != (u);
5244 + return c != u;
5245 }
5246
5247 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5248 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5249 index 69358b5..9d0d492 100644
5250 --- a/arch/sparc/include/asm/cache.h
5251 +++ b/arch/sparc/include/asm/cache.h
5252 @@ -7,10 +7,12 @@
5253 #ifndef _SPARC_CACHE_H
5254 #define _SPARC_CACHE_H
5255
5256 +#include <linux/const.h>
5257 +
5258 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5259
5260 #define L1_CACHE_SHIFT 5
5261 -#define L1_CACHE_BYTES 32
5262 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5263
5264 #ifdef CONFIG_SPARC32
5265 #define SMP_CACHE_BYTES_SHIFT 5
5266 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5267 index 4269ca6..e3da77f 100644
5268 --- a/arch/sparc/include/asm/elf_32.h
5269 +++ b/arch/sparc/include/asm/elf_32.h
5270 @@ -114,6 +114,13 @@ typedef struct {
5271
5272 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5273
5274 +#ifdef CONFIG_PAX_ASLR
5275 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5276 +
5277 +#define PAX_DELTA_MMAP_LEN 16
5278 +#define PAX_DELTA_STACK_LEN 16
5279 +#endif
5280 +
5281 /* This yields a mask that user programs can use to figure out what
5282 instruction set this cpu supports. This can NOT be done in userspace
5283 on Sparc. */
5284 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5285 index 7df8b7f..4946269 100644
5286 --- a/arch/sparc/include/asm/elf_64.h
5287 +++ b/arch/sparc/include/asm/elf_64.h
5288 @@ -180,6 +180,13 @@ typedef struct {
5289 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5290 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5291
5292 +#ifdef CONFIG_PAX_ASLR
5293 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5294 +
5295 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5296 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5297 +#endif
5298 +
5299 extern unsigned long sparc64_elf_hwcap;
5300 #define ELF_HWCAP sparc64_elf_hwcap
5301
5302 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5303 index ca2b344..c6084f89 100644
5304 --- a/arch/sparc/include/asm/pgalloc_32.h
5305 +++ b/arch/sparc/include/asm/pgalloc_32.h
5306 @@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5307 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5308 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5309 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5310 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5311
5312 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5313 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5314 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5315 index 40b2d7a..22a665b 100644
5316 --- a/arch/sparc/include/asm/pgalloc_64.h
5317 +++ b/arch/sparc/include/asm/pgalloc_64.h
5318 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5319 }
5320
5321 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5322 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5323
5324 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5325 {
5326 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5327 index 3d71018..48a11c5 100644
5328 --- a/arch/sparc/include/asm/pgtable_32.h
5329 +++ b/arch/sparc/include/asm/pgtable_32.h
5330 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5331 BTFIXUPDEF_INT(page_none)
5332 BTFIXUPDEF_INT(page_copy)
5333 BTFIXUPDEF_INT(page_readonly)
5334 +
5335 +#ifdef CONFIG_PAX_PAGEEXEC
5336 +BTFIXUPDEF_INT(page_shared_noexec)
5337 +BTFIXUPDEF_INT(page_copy_noexec)
5338 +BTFIXUPDEF_INT(page_readonly_noexec)
5339 +#endif
5340 +
5341 BTFIXUPDEF_INT(page_kernel)
5342
5343 #define PMD_SHIFT SUN4C_PMD_SHIFT
5344 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5345 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5346 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5347
5348 +#ifdef CONFIG_PAX_PAGEEXEC
5349 +extern pgprot_t PAGE_SHARED_NOEXEC;
5350 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5351 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5352 +#else
5353 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5354 +# define PAGE_COPY_NOEXEC PAGE_COPY
5355 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5356 +#endif
5357 +
5358 extern unsigned long page_kernel;
5359
5360 #ifdef MODULE
5361 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5362 index f6ae2b2..b03ffc7 100644
5363 --- a/arch/sparc/include/asm/pgtsrmmu.h
5364 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5365 @@ -115,6 +115,13 @@
5366 SRMMU_EXEC | SRMMU_REF)
5367 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5368 SRMMU_EXEC | SRMMU_REF)
5369 +
5370 +#ifdef CONFIG_PAX_PAGEEXEC
5371 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5372 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5373 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5374 +#endif
5375 +
5376 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5377 SRMMU_DIRTY | SRMMU_REF)
5378
5379 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5380 index 9689176..63c18ea 100644
5381 --- a/arch/sparc/include/asm/spinlock_64.h
5382 +++ b/arch/sparc/include/asm/spinlock_64.h
5383 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5384
5385 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5386
5387 -static void inline arch_read_lock(arch_rwlock_t *lock)
5388 +static inline void arch_read_lock(arch_rwlock_t *lock)
5389 {
5390 unsigned long tmp1, tmp2;
5391
5392 __asm__ __volatile__ (
5393 "1: ldsw [%2], %0\n"
5394 " brlz,pn %0, 2f\n"
5395 -"4: add %0, 1, %1\n"
5396 +"4: addcc %0, 1, %1\n"
5397 +
5398 +#ifdef CONFIG_PAX_REFCOUNT
5399 +" tvs %%icc, 6\n"
5400 +#endif
5401 +
5402 " cas [%2], %0, %1\n"
5403 " cmp %0, %1\n"
5404 " bne,pn %%icc, 1b\n"
5405 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5406 " .previous"
5407 : "=&r" (tmp1), "=&r" (tmp2)
5408 : "r" (lock)
5409 - : "memory");
5410 + : "memory", "cc");
5411 }
5412
5413 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5414 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5415 {
5416 int tmp1, tmp2;
5417
5418 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5419 "1: ldsw [%2], %0\n"
5420 " brlz,a,pn %0, 2f\n"
5421 " mov 0, %0\n"
5422 -" add %0, 1, %1\n"
5423 +" addcc %0, 1, %1\n"
5424 +
5425 +#ifdef CONFIG_PAX_REFCOUNT
5426 +" tvs %%icc, 6\n"
5427 +#endif
5428 +
5429 " cas [%2], %0, %1\n"
5430 " cmp %0, %1\n"
5431 " bne,pn %%icc, 1b\n"
5432 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5433 return tmp1;
5434 }
5435
5436 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5437 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5438 {
5439 unsigned long tmp1, tmp2;
5440
5441 __asm__ __volatile__(
5442 "1: lduw [%2], %0\n"
5443 -" sub %0, 1, %1\n"
5444 +" subcc %0, 1, %1\n"
5445 +
5446 +#ifdef CONFIG_PAX_REFCOUNT
5447 +" tvs %%icc, 6\n"
5448 +#endif
5449 +
5450 " cas [%2], %0, %1\n"
5451 " cmp %0, %1\n"
5452 " bne,pn %%xcc, 1b\n"
5453 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5454 : "memory");
5455 }
5456
5457 -static void inline arch_write_lock(arch_rwlock_t *lock)
5458 +static inline void arch_write_lock(arch_rwlock_t *lock)
5459 {
5460 unsigned long mask, tmp1, tmp2;
5461
5462 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5463 : "memory");
5464 }
5465
5466 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5467 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5468 {
5469 __asm__ __volatile__(
5470 " stw %%g0, [%0]"
5471 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5472 : "memory");
5473 }
5474
5475 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5476 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5477 {
5478 unsigned long mask, tmp1, tmp2, result;
5479
5480 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5481 index c2a1080..21ed218 100644
5482 --- a/arch/sparc/include/asm/thread_info_32.h
5483 +++ b/arch/sparc/include/asm/thread_info_32.h
5484 @@ -50,6 +50,8 @@ struct thread_info {
5485 unsigned long w_saved;
5486
5487 struct restart_block restart_block;
5488 +
5489 + unsigned long lowest_stack;
5490 };
5491
5492 /*
5493 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5494 index 01d057f..13a7d2f 100644
5495 --- a/arch/sparc/include/asm/thread_info_64.h
5496 +++ b/arch/sparc/include/asm/thread_info_64.h
5497 @@ -63,6 +63,8 @@ struct thread_info {
5498 struct pt_regs *kern_una_regs;
5499 unsigned int kern_una_insn;
5500
5501 + unsigned long lowest_stack;
5502 +
5503 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5504 };
5505
5506 @@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5507 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5508 /* flag bit 6 is available */
5509 #define TIF_32BIT 7 /* 32-bit binary */
5510 -/* flag bit 8 is available */
5511 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5512 #define TIF_SECCOMP 9 /* secure computing */
5513 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5514 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5515 +
5516 /* NOTE: Thread flags >= 12 should be ones we have no interest
5517 * in using in assembly, else we can't use the mask as
5518 * an immediate value in instructions such as andcc.
5519 @@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5520 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5521 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5522 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5523 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5524
5525 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5526 _TIF_DO_NOTIFY_RESUME_MASK | \
5527 _TIF_NEED_RESCHED)
5528 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5529
5530 +#define _TIF_WORK_SYSCALL \
5531 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5532 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5533 +
5534 +
5535 /*
5536 * Thread-synchronous status.
5537 *
5538 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5539 index e88fbe5..96b0ce5 100644
5540 --- a/arch/sparc/include/asm/uaccess.h
5541 +++ b/arch/sparc/include/asm/uaccess.h
5542 @@ -1,5 +1,13 @@
5543 #ifndef ___ASM_SPARC_UACCESS_H
5544 #define ___ASM_SPARC_UACCESS_H
5545 +
5546 +#ifdef __KERNEL__
5547 +#ifndef __ASSEMBLY__
5548 +#include <linux/types.h>
5549 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5550 +#endif
5551 +#endif
5552 +
5553 #if defined(__sparc__) && defined(__arch64__)
5554 #include <asm/uaccess_64.h>
5555 #else
5556 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5557 index 8303ac4..07f333d 100644
5558 --- a/arch/sparc/include/asm/uaccess_32.h
5559 +++ b/arch/sparc/include/asm/uaccess_32.h
5560 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5561
5562 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5563 {
5564 - if (n && __access_ok((unsigned long) to, n))
5565 + if ((long)n < 0)
5566 + return n;
5567 +
5568 + if (n && __access_ok((unsigned long) to, n)) {
5569 + if (!__builtin_constant_p(n))
5570 + check_object_size(from, n, true);
5571 return __copy_user(to, (__force void __user *) from, n);
5572 - else
5573 + } else
5574 return n;
5575 }
5576
5577 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5578 {
5579 + if ((long)n < 0)
5580 + return n;
5581 +
5582 + if (!__builtin_constant_p(n))
5583 + check_object_size(from, n, true);
5584 +
5585 return __copy_user(to, (__force void __user *) from, n);
5586 }
5587
5588 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5589 {
5590 - if (n && __access_ok((unsigned long) from, n))
5591 + if ((long)n < 0)
5592 + return n;
5593 +
5594 + if (n && __access_ok((unsigned long) from, n)) {
5595 + if (!__builtin_constant_p(n))
5596 + check_object_size(to, n, false);
5597 return __copy_user((__force void __user *) to, from, n);
5598 - else
5599 + } else
5600 return n;
5601 }
5602
5603 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5604 {
5605 + if ((long)n < 0)
5606 + return n;
5607 +
5608 return __copy_user((__force void __user *) to, from, n);
5609 }
5610
5611 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5612 index a1091afb..380228e 100644
5613 --- a/arch/sparc/include/asm/uaccess_64.h
5614 +++ b/arch/sparc/include/asm/uaccess_64.h
5615 @@ -10,6 +10,7 @@
5616 #include <linux/compiler.h>
5617 #include <linux/string.h>
5618 #include <linux/thread_info.h>
5619 +#include <linux/kernel.h>
5620 #include <asm/asi.h>
5621 #include <asm/spitfire.h>
5622 #include <asm-generic/uaccess-unaligned.h>
5623 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5624 static inline unsigned long __must_check
5625 copy_from_user(void *to, const void __user *from, unsigned long size)
5626 {
5627 - unsigned long ret = ___copy_from_user(to, from, size);
5628 + unsigned long ret;
5629
5630 + if ((long)size < 0 || size > INT_MAX)
5631 + return size;
5632 +
5633 + if (!__builtin_constant_p(size))
5634 + check_object_size(to, size, false);
5635 +
5636 + ret = ___copy_from_user(to, from, size);
5637 if (unlikely(ret))
5638 ret = copy_from_user_fixup(to, from, size);
5639
5640 @@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5641 static inline unsigned long __must_check
5642 copy_to_user(void __user *to, const void *from, unsigned long size)
5643 {
5644 - unsigned long ret = ___copy_to_user(to, from, size);
5645 + unsigned long ret;
5646
5647 + if ((long)size < 0 || size > INT_MAX)
5648 + return size;
5649 +
5650 + if (!__builtin_constant_p(size))
5651 + check_object_size(from, size, true);
5652 +
5653 + ret = ___copy_to_user(to, from, size);
5654 if (unlikely(ret))
5655 ret = copy_to_user_fixup(to, from, size);
5656 return ret;
5657 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5658 index cb85458..e063f17 100644
5659 --- a/arch/sparc/kernel/Makefile
5660 +++ b/arch/sparc/kernel/Makefile
5661 @@ -3,7 +3,7 @@
5662 #
5663
5664 asflags-y := -ansi
5665 -ccflags-y := -Werror
5666 +#ccflags-y := -Werror
5667
5668 extra-y := head_$(BITS).o
5669 extra-y += init_task.o
5670 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5671 index efa0754..74b03fe 100644
5672 --- a/arch/sparc/kernel/process_32.c
5673 +++ b/arch/sparc/kernel/process_32.c
5674 @@ -200,7 +200,7 @@ void __show_backtrace(unsigned long fp)
5675 rw->ins[4], rw->ins[5],
5676 rw->ins[6],
5677 rw->ins[7]);
5678 - printk("%pS\n", (void *) rw->ins[7]);
5679 + printk("%pA\n", (void *) rw->ins[7]);
5680 rw = (struct reg_window32 *) rw->ins[6];
5681 }
5682 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5683 @@ -267,14 +267,14 @@ void show_regs(struct pt_regs *r)
5684
5685 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5686 r->psr, r->pc, r->npc, r->y, print_tainted());
5687 - printk("PC: <%pS>\n", (void *) r->pc);
5688 + printk("PC: <%pA>\n", (void *) r->pc);
5689 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5690 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5691 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5692 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5693 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5694 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5695 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5696 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5697
5698 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5699 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5700 @@ -309,7 +309,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5701 rw = (struct reg_window32 *) fp;
5702 pc = rw->ins[7];
5703 printk("[%08lx : ", pc);
5704 - printk("%pS ] ", (void *) pc);
5705 + printk("%pA ] ", (void *) pc);
5706 fp = rw->ins[6];
5707 } while (++count < 16);
5708 printk("\n");
5709 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5710 index aff0c72..9067b39 100644
5711 --- a/arch/sparc/kernel/process_64.c
5712 +++ b/arch/sparc/kernel/process_64.c
5713 @@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5714 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5715 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5716 if (regs->tstate & TSTATE_PRIV)
5717 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5718 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5719 }
5720
5721 void show_regs(struct pt_regs *regs)
5722 {
5723 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5724 regs->tpc, regs->tnpc, regs->y, print_tainted());
5725 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5726 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5727 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5728 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5729 regs->u_regs[3]);
5730 @@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5731 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5732 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5733 regs->u_regs[15]);
5734 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5735 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5736 show_regwindow(regs);
5737 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5738 }
5739 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5740 ((tp && tp->task) ? tp->task->pid : -1));
5741
5742 if (gp->tstate & TSTATE_PRIV) {
5743 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5744 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5745 (void *) gp->tpc,
5746 (void *) gp->o7,
5747 (void *) gp->i7,
5748 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5749 index 6f97c07..b1300ec 100644
5750 --- a/arch/sparc/kernel/ptrace_64.c
5751 +++ b/arch/sparc/kernel/ptrace_64.c
5752 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5753 return ret;
5754 }
5755
5756 +#ifdef CONFIG_GRKERNSEC_SETXID
5757 +extern void gr_delayed_cred_worker(void);
5758 +#endif
5759 +
5760 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5761 {
5762 int ret = 0;
5763 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5764 /* do the secure computing check first */
5765 secure_computing(regs->u_regs[UREG_G1]);
5766
5767 +#ifdef CONFIG_GRKERNSEC_SETXID
5768 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5769 + gr_delayed_cred_worker();
5770 +#endif
5771 +
5772 if (test_thread_flag(TIF_SYSCALL_TRACE))
5773 ret = tracehook_report_syscall_entry(regs);
5774
5775 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5776
5777 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5778 {
5779 +#ifdef CONFIG_GRKERNSEC_SETXID
5780 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5781 + gr_delayed_cred_worker();
5782 +#endif
5783 +
5784 audit_syscall_exit(regs);
5785
5786 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5787 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5788 index 42b282f..28ce9f2 100644
5789 --- a/arch/sparc/kernel/sys_sparc_32.c
5790 +++ b/arch/sparc/kernel/sys_sparc_32.c
5791 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5792 if (ARCH_SUN4C && len > 0x20000000)
5793 return -ENOMEM;
5794 if (!addr)
5795 - addr = TASK_UNMAPPED_BASE;
5796 + addr = current->mm->mmap_base;
5797
5798 if (flags & MAP_SHARED)
5799 addr = COLOUR_ALIGN(addr);
5800 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5801 }
5802 if (TASK_SIZE - PAGE_SIZE - len < addr)
5803 return -ENOMEM;
5804 - if (!vmm || addr + len <= vmm->vm_start)
5805 + if (check_heap_stack_gap(vmm, addr, len))
5806 return addr;
5807 addr = vmm->vm_end;
5808 if (flags & MAP_SHARED)
5809 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5810 index 3ee51f1..2ba4913 100644
5811 --- a/arch/sparc/kernel/sys_sparc_64.c
5812 +++ b/arch/sparc/kernel/sys_sparc_64.c
5813 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5814 /* We do not accept a shared mapping if it would violate
5815 * cache aliasing constraints.
5816 */
5817 - if ((flags & MAP_SHARED) &&
5818 + if ((filp || (flags & MAP_SHARED)) &&
5819 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5820 return -EINVAL;
5821 return addr;
5822 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5823 if (filp || (flags & MAP_SHARED))
5824 do_color_align = 1;
5825
5826 +#ifdef CONFIG_PAX_RANDMMAP
5827 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5828 +#endif
5829 +
5830 if (addr) {
5831 if (do_color_align)
5832 addr = COLOUR_ALIGN(addr, pgoff);
5833 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5834 addr = PAGE_ALIGN(addr);
5835
5836 vma = find_vma(mm, addr);
5837 - if (task_size - len >= addr &&
5838 - (!vma || addr + len <= vma->vm_start))
5839 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5840 return addr;
5841 }
5842
5843 if (len > mm->cached_hole_size) {
5844 - start_addr = addr = mm->free_area_cache;
5845 + start_addr = addr = mm->free_area_cache;
5846 } else {
5847 - start_addr = addr = TASK_UNMAPPED_BASE;
5848 + start_addr = addr = mm->mmap_base;
5849 mm->cached_hole_size = 0;
5850 }
5851
5852 @@ -174,14 +177,14 @@ full_search:
5853 vma = find_vma(mm, VA_EXCLUDE_END);
5854 }
5855 if (unlikely(task_size < addr)) {
5856 - if (start_addr != TASK_UNMAPPED_BASE) {
5857 - start_addr = addr = TASK_UNMAPPED_BASE;
5858 + if (start_addr != mm->mmap_base) {
5859 + start_addr = addr = mm->mmap_base;
5860 mm->cached_hole_size = 0;
5861 goto full_search;
5862 }
5863 return -ENOMEM;
5864 }
5865 - if (likely(!vma || addr + len <= vma->vm_start)) {
5866 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5867 /*
5868 * Remember the place where we stopped the search:
5869 */
5870 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5871 /* We do not accept a shared mapping if it would violate
5872 * cache aliasing constraints.
5873 */
5874 - if ((flags & MAP_SHARED) &&
5875 + if ((filp || (flags & MAP_SHARED)) &&
5876 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5877 return -EINVAL;
5878 return addr;
5879 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5880 addr = PAGE_ALIGN(addr);
5881
5882 vma = find_vma(mm, addr);
5883 - if (task_size - len >= addr &&
5884 - (!vma || addr + len <= vma->vm_start))
5885 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5886 return addr;
5887 }
5888
5889 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5890 /* make sure it can fit in the remaining address space */
5891 if (likely(addr > len)) {
5892 vma = find_vma(mm, addr-len);
5893 - if (!vma || addr <= vma->vm_start) {
5894 + if (check_heap_stack_gap(vma, addr - len, len)) {
5895 /* remember the address as a hint for next time */
5896 return (mm->free_area_cache = addr-len);
5897 }
5898 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5899 if (unlikely(mm->mmap_base < len))
5900 goto bottomup;
5901
5902 - addr = mm->mmap_base-len;
5903 - if (do_color_align)
5904 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5905 + addr = mm->mmap_base - len;
5906
5907 do {
5908 + if (do_color_align)
5909 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5910 /*
5911 * Lookup failure means no vma is above this address,
5912 * else if new region fits below vma->vm_start,
5913 * return with success:
5914 */
5915 vma = find_vma(mm, addr);
5916 - if (likely(!vma || addr+len <= vma->vm_start)) {
5917 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5918 /* remember the address as a hint for next time */
5919 return (mm->free_area_cache = addr);
5920 }
5921 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5922 mm->cached_hole_size = vma->vm_start - addr;
5923
5924 /* try just below the current vma->vm_start */
5925 - addr = vma->vm_start-len;
5926 - if (do_color_align)
5927 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5928 - } while (likely(len < vma->vm_start));
5929 + addr = skip_heap_stack_gap(vma, len);
5930 + } while (!IS_ERR_VALUE(addr));
5931
5932 bottomup:
5933 /*
5934 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5935 gap == RLIM_INFINITY ||
5936 sysctl_legacy_va_layout) {
5937 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5938 +
5939 +#ifdef CONFIG_PAX_RANDMMAP
5940 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5941 + mm->mmap_base += mm->delta_mmap;
5942 +#endif
5943 +
5944 mm->get_unmapped_area = arch_get_unmapped_area;
5945 mm->unmap_area = arch_unmap_area;
5946 } else {
5947 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5948 gap = (task_size / 6 * 5);
5949
5950 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5951 +
5952 +#ifdef CONFIG_PAX_RANDMMAP
5953 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5954 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5955 +#endif
5956 +
5957 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5958 mm->unmap_area = arch_unmap_area_topdown;
5959 }
5960 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
5961 index 1d7e274..b39c527 100644
5962 --- a/arch/sparc/kernel/syscalls.S
5963 +++ b/arch/sparc/kernel/syscalls.S
5964 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
5965 #endif
5966 .align 32
5967 1: ldx [%g6 + TI_FLAGS], %l5
5968 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5969 + andcc %l5, _TIF_WORK_SYSCALL, %g0
5970 be,pt %icc, rtrap
5971 nop
5972 call syscall_trace_leave
5973 @@ -179,7 +179,7 @@ linux_sparc_syscall32:
5974
5975 srl %i5, 0, %o5 ! IEU1
5976 srl %i2, 0, %o2 ! IEU0 Group
5977 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5978 + andcc %l0, _TIF_WORK_SYSCALL, %g0
5979 bne,pn %icc, linux_syscall_trace32 ! CTI
5980 mov %i0, %l5 ! IEU1
5981 call %l7 ! CTI Group brk forced
5982 @@ -202,7 +202,7 @@ linux_sparc_syscall:
5983
5984 mov %i3, %o3 ! IEU1
5985 mov %i4, %o4 ! IEU0 Group
5986 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5987 + andcc %l0, _TIF_WORK_SYSCALL, %g0
5988 bne,pn %icc, linux_syscall_trace ! CTI Group
5989 mov %i0, %l5 ! IEU0
5990 2: call %l7 ! CTI Group brk forced
5991 @@ -226,7 +226,7 @@ ret_sys_call:
5992
5993 cmp %o0, -ERESTART_RESTARTBLOCK
5994 bgeu,pn %xcc, 1f
5995 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
5996 + andcc %l0, _TIF_WORK_SYSCALL, %l6
5997 80:
5998 /* System call success, clear Carry condition code. */
5999 andn %g3, %g2, %g3
6000 @@ -241,7 +241,7 @@ ret_sys_call:
6001 /* System call failure, set Carry condition code.
6002 * Also, get abs(errno) to return to the process.
6003 */
6004 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6005 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6006 sub %g0, %o0, %o0
6007 or %g3, %g2, %g3
6008 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6009 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6010 index d2de213..6b22bc3 100644
6011 --- a/arch/sparc/kernel/traps_32.c
6012 +++ b/arch/sparc/kernel/traps_32.c
6013 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6014 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6015 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6016
6017 +extern void gr_handle_kernel_exploit(void);
6018 +
6019 void die_if_kernel(char *str, struct pt_regs *regs)
6020 {
6021 static int die_counter;
6022 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6023 count++ < 30 &&
6024 (((unsigned long) rw) >= PAGE_OFFSET) &&
6025 !(((unsigned long) rw) & 0x7)) {
6026 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
6027 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
6028 (void *) rw->ins[7]);
6029 rw = (struct reg_window32 *)rw->ins[6];
6030 }
6031 }
6032 printk("Instruction DUMP:");
6033 instruction_dump ((unsigned long *) regs->pc);
6034 - if(regs->psr & PSR_PS)
6035 + if(regs->psr & PSR_PS) {
6036 + gr_handle_kernel_exploit();
6037 do_exit(SIGKILL);
6038 + }
6039 do_exit(SIGSEGV);
6040 }
6041
6042 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6043 index c72fdf5..743a344 100644
6044 --- a/arch/sparc/kernel/traps_64.c
6045 +++ b/arch/sparc/kernel/traps_64.c
6046 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6047 i + 1,
6048 p->trapstack[i].tstate, p->trapstack[i].tpc,
6049 p->trapstack[i].tnpc, p->trapstack[i].tt);
6050 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6051 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6052 }
6053 }
6054
6055 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6056
6057 lvl -= 0x100;
6058 if (regs->tstate & TSTATE_PRIV) {
6059 +
6060 +#ifdef CONFIG_PAX_REFCOUNT
6061 + if (lvl == 6)
6062 + pax_report_refcount_overflow(regs);
6063 +#endif
6064 +
6065 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6066 die_if_kernel(buffer, regs);
6067 }
6068 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6069 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6070 {
6071 char buffer[32];
6072 -
6073 +
6074 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6075 0, lvl, SIGTRAP) == NOTIFY_STOP)
6076 return;
6077
6078 +#ifdef CONFIG_PAX_REFCOUNT
6079 + if (lvl == 6)
6080 + pax_report_refcount_overflow(regs);
6081 +#endif
6082 +
6083 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6084
6085 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6086 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6087 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6088 printk("%s" "ERROR(%d): ",
6089 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6090 - printk("TPC<%pS>\n", (void *) regs->tpc);
6091 + printk("TPC<%pA>\n", (void *) regs->tpc);
6092 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6093 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6094 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6095 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6096 smp_processor_id(),
6097 (type & 0x1) ? 'I' : 'D',
6098 regs->tpc);
6099 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6100 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6101 panic("Irrecoverable Cheetah+ parity error.");
6102 }
6103
6104 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6105 smp_processor_id(),
6106 (type & 0x1) ? 'I' : 'D',
6107 regs->tpc);
6108 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6109 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6110 }
6111
6112 struct sun4v_error_entry {
6113 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6114
6115 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6116 regs->tpc, tl);
6117 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6118 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6119 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6120 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6121 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6122 (void *) regs->u_regs[UREG_I7]);
6123 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6124 "pte[%lx] error[%lx]\n",
6125 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6126
6127 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6128 regs->tpc, tl);
6129 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6130 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6131 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6132 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6133 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6134 (void *) regs->u_regs[UREG_I7]);
6135 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6136 "pte[%lx] error[%lx]\n",
6137 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6138 fp = (unsigned long)sf->fp + STACK_BIAS;
6139 }
6140
6141 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6142 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6143 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6144 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6145 int index = tsk->curr_ret_stack;
6146 if (tsk->ret_stack && index >= graph) {
6147 pc = tsk->ret_stack[index - graph].ret;
6148 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6149 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6150 graph++;
6151 }
6152 }
6153 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6154 return (struct reg_window *) (fp + STACK_BIAS);
6155 }
6156
6157 +extern void gr_handle_kernel_exploit(void);
6158 +
6159 void die_if_kernel(char *str, struct pt_regs *regs)
6160 {
6161 static int die_counter;
6162 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6163 while (rw &&
6164 count++ < 30 &&
6165 kstack_valid(tp, (unsigned long) rw)) {
6166 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6167 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6168 (void *) rw->ins[7]);
6169
6170 rw = kernel_stack_up(rw);
6171 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6172 }
6173 user_instruction_dump ((unsigned int __user *) regs->tpc);
6174 }
6175 - if (regs->tstate & TSTATE_PRIV)
6176 + if (regs->tstate & TSTATE_PRIV) {
6177 + gr_handle_kernel_exploit();
6178 do_exit(SIGKILL);
6179 + }
6180 do_exit(SIGSEGV);
6181 }
6182 EXPORT_SYMBOL(die_if_kernel);
6183 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6184 index dae85bc..af1e19d 100644
6185 --- a/arch/sparc/kernel/unaligned_64.c
6186 +++ b/arch/sparc/kernel/unaligned_64.c
6187 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6188 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6189
6190 if (__ratelimit(&ratelimit)) {
6191 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6192 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6193 regs->tpc, (void *) regs->tpc);
6194 }
6195 }
6196 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6197 index a3fc437..fea9957 100644
6198 --- a/arch/sparc/lib/Makefile
6199 +++ b/arch/sparc/lib/Makefile
6200 @@ -2,7 +2,7 @@
6201 #
6202
6203 asflags-y := -ansi -DST_DIV0=0x02
6204 -ccflags-y := -Werror
6205 +#ccflags-y := -Werror
6206
6207 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6208 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6209 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6210 index 59186e0..f747d7a 100644
6211 --- a/arch/sparc/lib/atomic_64.S
6212 +++ b/arch/sparc/lib/atomic_64.S
6213 @@ -18,7 +18,12 @@
6214 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6215 BACKOFF_SETUP(%o2)
6216 1: lduw [%o1], %g1
6217 - add %g1, %o0, %g7
6218 + addcc %g1, %o0, %g7
6219 +
6220 +#ifdef CONFIG_PAX_REFCOUNT
6221 + tvs %icc, 6
6222 +#endif
6223 +
6224 cas [%o1], %g1, %g7
6225 cmp %g1, %g7
6226 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6227 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6228 2: BACKOFF_SPIN(%o2, %o3, 1b)
6229 .size atomic_add, .-atomic_add
6230
6231 + .globl atomic_add_unchecked
6232 + .type atomic_add_unchecked,#function
6233 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6234 + BACKOFF_SETUP(%o2)
6235 +1: lduw [%o1], %g1
6236 + add %g1, %o0, %g7
6237 + cas [%o1], %g1, %g7
6238 + cmp %g1, %g7
6239 + bne,pn %icc, 2f
6240 + nop
6241 + retl
6242 + nop
6243 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6244 + .size atomic_add_unchecked, .-atomic_add_unchecked
6245 +
6246 .globl atomic_sub
6247 .type atomic_sub,#function
6248 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6249 BACKOFF_SETUP(%o2)
6250 1: lduw [%o1], %g1
6251 - sub %g1, %o0, %g7
6252 + subcc %g1, %o0, %g7
6253 +
6254 +#ifdef CONFIG_PAX_REFCOUNT
6255 + tvs %icc, 6
6256 +#endif
6257 +
6258 cas [%o1], %g1, %g7
6259 cmp %g1, %g7
6260 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6261 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6262 2: BACKOFF_SPIN(%o2, %o3, 1b)
6263 .size atomic_sub, .-atomic_sub
6264
6265 + .globl atomic_sub_unchecked
6266 + .type atomic_sub_unchecked,#function
6267 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6268 + BACKOFF_SETUP(%o2)
6269 +1: lduw [%o1], %g1
6270 + sub %g1, %o0, %g7
6271 + cas [%o1], %g1, %g7
6272 + cmp %g1, %g7
6273 + bne,pn %icc, 2f
6274 + nop
6275 + retl
6276 + nop
6277 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6278 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
6279 +
6280 .globl atomic_add_ret
6281 .type atomic_add_ret,#function
6282 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6283 BACKOFF_SETUP(%o2)
6284 1: lduw [%o1], %g1
6285 - add %g1, %o0, %g7
6286 + addcc %g1, %o0, %g7
6287 +
6288 +#ifdef CONFIG_PAX_REFCOUNT
6289 + tvs %icc, 6
6290 +#endif
6291 +
6292 cas [%o1], %g1, %g7
6293 cmp %g1, %g7
6294 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6295 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6296 2: BACKOFF_SPIN(%o2, %o3, 1b)
6297 .size atomic_add_ret, .-atomic_add_ret
6298
6299 + .globl atomic_add_ret_unchecked
6300 + .type atomic_add_ret_unchecked,#function
6301 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6302 + BACKOFF_SETUP(%o2)
6303 +1: lduw [%o1], %g1
6304 + addcc %g1, %o0, %g7
6305 + cas [%o1], %g1, %g7
6306 + cmp %g1, %g7
6307 + bne,pn %icc, 2f
6308 + add %g7, %o0, %g7
6309 + sra %g7, 0, %o0
6310 + retl
6311 + nop
6312 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6313 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6314 +
6315 .globl atomic_sub_ret
6316 .type atomic_sub_ret,#function
6317 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6318 BACKOFF_SETUP(%o2)
6319 1: lduw [%o1], %g1
6320 - sub %g1, %o0, %g7
6321 + subcc %g1, %o0, %g7
6322 +
6323 +#ifdef CONFIG_PAX_REFCOUNT
6324 + tvs %icc, 6
6325 +#endif
6326 +
6327 cas [%o1], %g1, %g7
6328 cmp %g1, %g7
6329 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6330 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6331 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6332 BACKOFF_SETUP(%o2)
6333 1: ldx [%o1], %g1
6334 - add %g1, %o0, %g7
6335 + addcc %g1, %o0, %g7
6336 +
6337 +#ifdef CONFIG_PAX_REFCOUNT
6338 + tvs %xcc, 6
6339 +#endif
6340 +
6341 casx [%o1], %g1, %g7
6342 cmp %g1, %g7
6343 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6344 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6345 2: BACKOFF_SPIN(%o2, %o3, 1b)
6346 .size atomic64_add, .-atomic64_add
6347
6348 + .globl atomic64_add_unchecked
6349 + .type atomic64_add_unchecked,#function
6350 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6351 + BACKOFF_SETUP(%o2)
6352 +1: ldx [%o1], %g1
6353 + addcc %g1, %o0, %g7
6354 + casx [%o1], %g1, %g7
6355 + cmp %g1, %g7
6356 + bne,pn %xcc, 2f
6357 + nop
6358 + retl
6359 + nop
6360 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6361 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
6362 +
6363 .globl atomic64_sub
6364 .type atomic64_sub,#function
6365 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6366 BACKOFF_SETUP(%o2)
6367 1: ldx [%o1], %g1
6368 - sub %g1, %o0, %g7
6369 + subcc %g1, %o0, %g7
6370 +
6371 +#ifdef CONFIG_PAX_REFCOUNT
6372 + tvs %xcc, 6
6373 +#endif
6374 +
6375 casx [%o1], %g1, %g7
6376 cmp %g1, %g7
6377 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6378 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6379 2: BACKOFF_SPIN(%o2, %o3, 1b)
6380 .size atomic64_sub, .-atomic64_sub
6381
6382 + .globl atomic64_sub_unchecked
6383 + .type atomic64_sub_unchecked,#function
6384 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6385 + BACKOFF_SETUP(%o2)
6386 +1: ldx [%o1], %g1
6387 + subcc %g1, %o0, %g7
6388 + casx [%o1], %g1, %g7
6389 + cmp %g1, %g7
6390 + bne,pn %xcc, 2f
6391 + nop
6392 + retl
6393 + nop
6394 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6395 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6396 +
6397 .globl atomic64_add_ret
6398 .type atomic64_add_ret,#function
6399 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6400 BACKOFF_SETUP(%o2)
6401 1: ldx [%o1], %g1
6402 - add %g1, %o0, %g7
6403 + addcc %g1, %o0, %g7
6404 +
6405 +#ifdef CONFIG_PAX_REFCOUNT
6406 + tvs %xcc, 6
6407 +#endif
6408 +
6409 casx [%o1], %g1, %g7
6410 cmp %g1, %g7
6411 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6412 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6413 2: BACKOFF_SPIN(%o2, %o3, 1b)
6414 .size atomic64_add_ret, .-atomic64_add_ret
6415
6416 + .globl atomic64_add_ret_unchecked
6417 + .type atomic64_add_ret_unchecked,#function
6418 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6419 + BACKOFF_SETUP(%o2)
6420 +1: ldx [%o1], %g1
6421 + addcc %g1, %o0, %g7
6422 + casx [%o1], %g1, %g7
6423 + cmp %g1, %g7
6424 + bne,pn %xcc, 2f
6425 + add %g7, %o0, %g7
6426 + mov %g7, %o0
6427 + retl
6428 + nop
6429 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6430 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6431 +
6432 .globl atomic64_sub_ret
6433 .type atomic64_sub_ret,#function
6434 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6435 BACKOFF_SETUP(%o2)
6436 1: ldx [%o1], %g1
6437 - sub %g1, %o0, %g7
6438 + subcc %g1, %o0, %g7
6439 +
6440 +#ifdef CONFIG_PAX_REFCOUNT
6441 + tvs %xcc, 6
6442 +#endif
6443 +
6444 casx [%o1], %g1, %g7
6445 cmp %g1, %g7
6446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6447 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6448 index f73c224..662af10 100644
6449 --- a/arch/sparc/lib/ksyms.c
6450 +++ b/arch/sparc/lib/ksyms.c
6451 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6452
6453 /* Atomic counter implementation. */
6454 EXPORT_SYMBOL(atomic_add);
6455 +EXPORT_SYMBOL(atomic_add_unchecked);
6456 EXPORT_SYMBOL(atomic_add_ret);
6457 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6458 EXPORT_SYMBOL(atomic_sub);
6459 +EXPORT_SYMBOL(atomic_sub_unchecked);
6460 EXPORT_SYMBOL(atomic_sub_ret);
6461 EXPORT_SYMBOL(atomic64_add);
6462 +EXPORT_SYMBOL(atomic64_add_unchecked);
6463 EXPORT_SYMBOL(atomic64_add_ret);
6464 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6465 EXPORT_SYMBOL(atomic64_sub);
6466 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6467 EXPORT_SYMBOL(atomic64_sub_ret);
6468
6469 /* Atomic bit operations. */
6470 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6471 index 301421c..e2535d1 100644
6472 --- a/arch/sparc/mm/Makefile
6473 +++ b/arch/sparc/mm/Makefile
6474 @@ -2,7 +2,7 @@
6475 #
6476
6477 asflags-y := -ansi
6478 -ccflags-y := -Werror
6479 +#ccflags-y := -Werror
6480
6481 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6482 obj-y += fault_$(BITS).o
6483 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6484 index df3155a..eb708b8 100644
6485 --- a/arch/sparc/mm/fault_32.c
6486 +++ b/arch/sparc/mm/fault_32.c
6487 @@ -21,6 +21,9 @@
6488 #include <linux/perf_event.h>
6489 #include <linux/interrupt.h>
6490 #include <linux/kdebug.h>
6491 +#include <linux/slab.h>
6492 +#include <linux/pagemap.h>
6493 +#include <linux/compiler.h>
6494
6495 #include <asm/page.h>
6496 #include <asm/pgtable.h>
6497 @@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6498 return safe_compute_effective_address(regs, insn);
6499 }
6500
6501 +#ifdef CONFIG_PAX_PAGEEXEC
6502 +#ifdef CONFIG_PAX_DLRESOLVE
6503 +static void pax_emuplt_close(struct vm_area_struct *vma)
6504 +{
6505 + vma->vm_mm->call_dl_resolve = 0UL;
6506 +}
6507 +
6508 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6509 +{
6510 + unsigned int *kaddr;
6511 +
6512 + vmf->page = alloc_page(GFP_HIGHUSER);
6513 + if (!vmf->page)
6514 + return VM_FAULT_OOM;
6515 +
6516 + kaddr = kmap(vmf->page);
6517 + memset(kaddr, 0, PAGE_SIZE);
6518 + kaddr[0] = 0x9DE3BFA8U; /* save */
6519 + flush_dcache_page(vmf->page);
6520 + kunmap(vmf->page);
6521 + return VM_FAULT_MAJOR;
6522 +}
6523 +
6524 +static const struct vm_operations_struct pax_vm_ops = {
6525 + .close = pax_emuplt_close,
6526 + .fault = pax_emuplt_fault
6527 +};
6528 +
6529 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6530 +{
6531 + int ret;
6532 +
6533 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6534 + vma->vm_mm = current->mm;
6535 + vma->vm_start = addr;
6536 + vma->vm_end = addr + PAGE_SIZE;
6537 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6538 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6539 + vma->vm_ops = &pax_vm_ops;
6540 +
6541 + ret = insert_vm_struct(current->mm, vma);
6542 + if (ret)
6543 + return ret;
6544 +
6545 + ++current->mm->total_vm;
6546 + return 0;
6547 +}
6548 +#endif
6549 +
6550 +/*
6551 + * PaX: decide what to do with offenders (regs->pc = fault address)
6552 + *
6553 + * returns 1 when task should be killed
6554 + * 2 when patched PLT trampoline was detected
6555 + * 3 when unpatched PLT trampoline was detected
6556 + */
6557 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6558 +{
6559 +
6560 +#ifdef CONFIG_PAX_EMUPLT
6561 + int err;
6562 +
6563 + do { /* PaX: patched PLT emulation #1 */
6564 + unsigned int sethi1, sethi2, jmpl;
6565 +
6566 + err = get_user(sethi1, (unsigned int *)regs->pc);
6567 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6568 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6569 +
6570 + if (err)
6571 + break;
6572 +
6573 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6574 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6575 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6576 + {
6577 + unsigned int addr;
6578 +
6579 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6580 + addr = regs->u_regs[UREG_G1];
6581 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6582 + regs->pc = addr;
6583 + regs->npc = addr+4;
6584 + return 2;
6585 + }
6586 + } while (0);
6587 +
6588 + { /* PaX: patched PLT emulation #2 */
6589 + unsigned int ba;
6590 +
6591 + err = get_user(ba, (unsigned int *)regs->pc);
6592 +
6593 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6594 + unsigned int addr;
6595 +
6596 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6597 + regs->pc = addr;
6598 + regs->npc = addr+4;
6599 + return 2;
6600 + }
6601 + }
6602 +
6603 + do { /* PaX: patched PLT emulation #3 */
6604 + unsigned int sethi, jmpl, nop;
6605 +
6606 + err = get_user(sethi, (unsigned int *)regs->pc);
6607 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6608 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6609 +
6610 + if (err)
6611 + break;
6612 +
6613 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6614 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6615 + nop == 0x01000000U)
6616 + {
6617 + unsigned int addr;
6618 +
6619 + addr = (sethi & 0x003FFFFFU) << 10;
6620 + regs->u_regs[UREG_G1] = addr;
6621 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6622 + regs->pc = addr;
6623 + regs->npc = addr+4;
6624 + return 2;
6625 + }
6626 + } while (0);
6627 +
6628 + do { /* PaX: unpatched PLT emulation step 1 */
6629 + unsigned int sethi, ba, nop;
6630 +
6631 + err = get_user(sethi, (unsigned int *)regs->pc);
6632 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6633 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6634 +
6635 + if (err)
6636 + break;
6637 +
6638 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6639 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6640 + nop == 0x01000000U)
6641 + {
6642 + unsigned int addr, save, call;
6643 +
6644 + if ((ba & 0xFFC00000U) == 0x30800000U)
6645 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6646 + else
6647 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6648 +
6649 + err = get_user(save, (unsigned int *)addr);
6650 + err |= get_user(call, (unsigned int *)(addr+4));
6651 + err |= get_user(nop, (unsigned int *)(addr+8));
6652 + if (err)
6653 + break;
6654 +
6655 +#ifdef CONFIG_PAX_DLRESOLVE
6656 + if (save == 0x9DE3BFA8U &&
6657 + (call & 0xC0000000U) == 0x40000000U &&
6658 + nop == 0x01000000U)
6659 + {
6660 + struct vm_area_struct *vma;
6661 + unsigned long call_dl_resolve;
6662 +
6663 + down_read(&current->mm->mmap_sem);
6664 + call_dl_resolve = current->mm->call_dl_resolve;
6665 + up_read(&current->mm->mmap_sem);
6666 + if (likely(call_dl_resolve))
6667 + goto emulate;
6668 +
6669 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6670 +
6671 + down_write(&current->mm->mmap_sem);
6672 + if (current->mm->call_dl_resolve) {
6673 + call_dl_resolve = current->mm->call_dl_resolve;
6674 + up_write(&current->mm->mmap_sem);
6675 + if (vma)
6676 + kmem_cache_free(vm_area_cachep, vma);
6677 + goto emulate;
6678 + }
6679 +
6680 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6681 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6682 + up_write(&current->mm->mmap_sem);
6683 + if (vma)
6684 + kmem_cache_free(vm_area_cachep, vma);
6685 + return 1;
6686 + }
6687 +
6688 + if (pax_insert_vma(vma, call_dl_resolve)) {
6689 + up_write(&current->mm->mmap_sem);
6690 + kmem_cache_free(vm_area_cachep, vma);
6691 + return 1;
6692 + }
6693 +
6694 + current->mm->call_dl_resolve = call_dl_resolve;
6695 + up_write(&current->mm->mmap_sem);
6696 +
6697 +emulate:
6698 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6699 + regs->pc = call_dl_resolve;
6700 + regs->npc = addr+4;
6701 + return 3;
6702 + }
6703 +#endif
6704 +
6705 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6706 + if ((save & 0xFFC00000U) == 0x05000000U &&
6707 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6708 + nop == 0x01000000U)
6709 + {
6710 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6711 + regs->u_regs[UREG_G2] = addr + 4;
6712 + addr = (save & 0x003FFFFFU) << 10;
6713 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6714 + regs->pc = addr;
6715 + regs->npc = addr+4;
6716 + return 3;
6717 + }
6718 + }
6719 + } while (0);
6720 +
6721 + do { /* PaX: unpatched PLT emulation step 2 */
6722 + unsigned int save, call, nop;
6723 +
6724 + err = get_user(save, (unsigned int *)(regs->pc-4));
6725 + err |= get_user(call, (unsigned int *)regs->pc);
6726 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6727 + if (err)
6728 + break;
6729 +
6730 + if (save == 0x9DE3BFA8U &&
6731 + (call & 0xC0000000U) == 0x40000000U &&
6732 + nop == 0x01000000U)
6733 + {
6734 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6735 +
6736 + regs->u_regs[UREG_RETPC] = regs->pc;
6737 + regs->pc = dl_resolve;
6738 + regs->npc = dl_resolve+4;
6739 + return 3;
6740 + }
6741 + } while (0);
6742 +#endif
6743 +
6744 + return 1;
6745 +}
6746 +
6747 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6748 +{
6749 + unsigned long i;
6750 +
6751 + printk(KERN_ERR "PAX: bytes at PC: ");
6752 + for (i = 0; i < 8; i++) {
6753 + unsigned int c;
6754 + if (get_user(c, (unsigned int *)pc+i))
6755 + printk(KERN_CONT "???????? ");
6756 + else
6757 + printk(KERN_CONT "%08x ", c);
6758 + }
6759 + printk("\n");
6760 +}
6761 +#endif
6762 +
6763 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6764 int text_fault)
6765 {
6766 @@ -282,6 +547,24 @@ good_area:
6767 if(!(vma->vm_flags & VM_WRITE))
6768 goto bad_area;
6769 } else {
6770 +
6771 +#ifdef CONFIG_PAX_PAGEEXEC
6772 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6773 + up_read(&mm->mmap_sem);
6774 + switch (pax_handle_fetch_fault(regs)) {
6775 +
6776 +#ifdef CONFIG_PAX_EMUPLT
6777 + case 2:
6778 + case 3:
6779 + return;
6780 +#endif
6781 +
6782 + }
6783 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6784 + do_group_exit(SIGKILL);
6785 + }
6786 +#endif
6787 +
6788 /* Allow reads even for write-only mappings */
6789 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6790 goto bad_area;
6791 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6792 index 1fe0429..aee2e87 100644
6793 --- a/arch/sparc/mm/fault_64.c
6794 +++ b/arch/sparc/mm/fault_64.c
6795 @@ -21,6 +21,9 @@
6796 #include <linux/kprobes.h>
6797 #include <linux/kdebug.h>
6798 #include <linux/percpu.h>
6799 +#include <linux/slab.h>
6800 +#include <linux/pagemap.h>
6801 +#include <linux/compiler.h>
6802
6803 #include <asm/page.h>
6804 #include <asm/pgtable.h>
6805 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6806 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6807 regs->tpc);
6808 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6809 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6810 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6811 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6812 dump_stack();
6813 unhandled_fault(regs->tpc, current, regs);
6814 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6815 show_regs(regs);
6816 }
6817
6818 +#ifdef CONFIG_PAX_PAGEEXEC
6819 +#ifdef CONFIG_PAX_DLRESOLVE
6820 +static void pax_emuplt_close(struct vm_area_struct *vma)
6821 +{
6822 + vma->vm_mm->call_dl_resolve = 0UL;
6823 +}
6824 +
6825 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6826 +{
6827 + unsigned int *kaddr;
6828 +
6829 + vmf->page = alloc_page(GFP_HIGHUSER);
6830 + if (!vmf->page)
6831 + return VM_FAULT_OOM;
6832 +
6833 + kaddr = kmap(vmf->page);
6834 + memset(kaddr, 0, PAGE_SIZE);
6835 + kaddr[0] = 0x9DE3BFA8U; /* save */
6836 + flush_dcache_page(vmf->page);
6837 + kunmap(vmf->page);
6838 + return VM_FAULT_MAJOR;
6839 +}
6840 +
6841 +static const struct vm_operations_struct pax_vm_ops = {
6842 + .close = pax_emuplt_close,
6843 + .fault = pax_emuplt_fault
6844 +};
6845 +
6846 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6847 +{
6848 + int ret;
6849 +
6850 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6851 + vma->vm_mm = current->mm;
6852 + vma->vm_start = addr;
6853 + vma->vm_end = addr + PAGE_SIZE;
6854 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6855 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6856 + vma->vm_ops = &pax_vm_ops;
6857 +
6858 + ret = insert_vm_struct(current->mm, vma);
6859 + if (ret)
6860 + return ret;
6861 +
6862 + ++current->mm->total_vm;
6863 + return 0;
6864 +}
6865 +#endif
6866 +
6867 +/*
6868 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6869 + *
6870 + * returns 1 when task should be killed
6871 + * 2 when patched PLT trampoline was detected
6872 + * 3 when unpatched PLT trampoline was detected
6873 + */
6874 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6875 +{
6876 +
6877 +#ifdef CONFIG_PAX_EMUPLT
6878 + int err;
6879 +
6880 + do { /* PaX: patched PLT emulation #1 */
6881 + unsigned int sethi1, sethi2, jmpl;
6882 +
6883 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6884 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6885 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6886 +
6887 + if (err)
6888 + break;
6889 +
6890 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6891 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6892 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6893 + {
6894 + unsigned long addr;
6895 +
6896 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6897 + addr = regs->u_regs[UREG_G1];
6898 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6899 +
6900 + if (test_thread_flag(TIF_32BIT))
6901 + addr &= 0xFFFFFFFFUL;
6902 +
6903 + regs->tpc = addr;
6904 + regs->tnpc = addr+4;
6905 + return 2;
6906 + }
6907 + } while (0);
6908 +
6909 + { /* PaX: patched PLT emulation #2 */
6910 + unsigned int ba;
6911 +
6912 + err = get_user(ba, (unsigned int *)regs->tpc);
6913 +
6914 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6915 + unsigned long addr;
6916 +
6917 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6918 +
6919 + if (test_thread_flag(TIF_32BIT))
6920 + addr &= 0xFFFFFFFFUL;
6921 +
6922 + regs->tpc = addr;
6923 + regs->tnpc = addr+4;
6924 + return 2;
6925 + }
6926 + }
6927 +
6928 + do { /* PaX: patched PLT emulation #3 */
6929 + unsigned int sethi, jmpl, nop;
6930 +
6931 + err = get_user(sethi, (unsigned int *)regs->tpc);
6932 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6933 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6934 +
6935 + if (err)
6936 + break;
6937 +
6938 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6939 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6940 + nop == 0x01000000U)
6941 + {
6942 + unsigned long addr;
6943 +
6944 + addr = (sethi & 0x003FFFFFU) << 10;
6945 + regs->u_regs[UREG_G1] = addr;
6946 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6947 +
6948 + if (test_thread_flag(TIF_32BIT))
6949 + addr &= 0xFFFFFFFFUL;
6950 +
6951 + regs->tpc = addr;
6952 + regs->tnpc = addr+4;
6953 + return 2;
6954 + }
6955 + } while (0);
6956 +
6957 + do { /* PaX: patched PLT emulation #4 */
6958 + unsigned int sethi, mov1, call, mov2;
6959 +
6960 + err = get_user(sethi, (unsigned int *)regs->tpc);
6961 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6962 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6963 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6964 +
6965 + if (err)
6966 + break;
6967 +
6968 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6969 + mov1 == 0x8210000FU &&
6970 + (call & 0xC0000000U) == 0x40000000U &&
6971 + mov2 == 0x9E100001U)
6972 + {
6973 + unsigned long addr;
6974 +
6975 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6976 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6977 +
6978 + if (test_thread_flag(TIF_32BIT))
6979 + addr &= 0xFFFFFFFFUL;
6980 +
6981 + regs->tpc = addr;
6982 + regs->tnpc = addr+4;
6983 + return 2;
6984 + }
6985 + } while (0);
6986 +
6987 + do { /* PaX: patched PLT emulation #5 */
6988 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6989 +
6990 + err = get_user(sethi, (unsigned int *)regs->tpc);
6991 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6992 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6993 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6994 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6995 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6996 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6997 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6998 +
6999 + if (err)
7000 + break;
7001 +
7002 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7003 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7004 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7005 + (or1 & 0xFFFFE000U) == 0x82106000U &&
7006 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7007 + sllx == 0x83287020U &&
7008 + jmpl == 0x81C04005U &&
7009 + nop == 0x01000000U)
7010 + {
7011 + unsigned long addr;
7012 +
7013 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7014 + regs->u_regs[UREG_G1] <<= 32;
7015 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7016 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7017 + regs->tpc = addr;
7018 + regs->tnpc = addr+4;
7019 + return 2;
7020 + }
7021 + } while (0);
7022 +
7023 + do { /* PaX: patched PLT emulation #6 */
7024 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7025 +
7026 + err = get_user(sethi, (unsigned int *)regs->tpc);
7027 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7028 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7029 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7030 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
7031 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7032 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7033 +
7034 + if (err)
7035 + break;
7036 +
7037 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7038 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7039 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7040 + sllx == 0x83287020U &&
7041 + (or & 0xFFFFE000U) == 0x8A116000U &&
7042 + jmpl == 0x81C04005U &&
7043 + nop == 0x01000000U)
7044 + {
7045 + unsigned long addr;
7046 +
7047 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7048 + regs->u_regs[UREG_G1] <<= 32;
7049 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7050 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7051 + regs->tpc = addr;
7052 + regs->tnpc = addr+4;
7053 + return 2;
7054 + }
7055 + } while (0);
7056 +
7057 + do { /* PaX: unpatched PLT emulation step 1 */
7058 + unsigned int sethi, ba, nop;
7059 +
7060 + err = get_user(sethi, (unsigned int *)regs->tpc);
7061 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7062 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7063 +
7064 + if (err)
7065 + break;
7066 +
7067 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7068 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7069 + nop == 0x01000000U)
7070 + {
7071 + unsigned long addr;
7072 + unsigned int save, call;
7073 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7074 +
7075 + if ((ba & 0xFFC00000U) == 0x30800000U)
7076 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7077 + else
7078 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7079 +
7080 + if (test_thread_flag(TIF_32BIT))
7081 + addr &= 0xFFFFFFFFUL;
7082 +
7083 + err = get_user(save, (unsigned int *)addr);
7084 + err |= get_user(call, (unsigned int *)(addr+4));
7085 + err |= get_user(nop, (unsigned int *)(addr+8));
7086 + if (err)
7087 + break;
7088 +
7089 +#ifdef CONFIG_PAX_DLRESOLVE
7090 + if (save == 0x9DE3BFA8U &&
7091 + (call & 0xC0000000U) == 0x40000000U &&
7092 + nop == 0x01000000U)
7093 + {
7094 + struct vm_area_struct *vma;
7095 + unsigned long call_dl_resolve;
7096 +
7097 + down_read(&current->mm->mmap_sem);
7098 + call_dl_resolve = current->mm->call_dl_resolve;
7099 + up_read(&current->mm->mmap_sem);
7100 + if (likely(call_dl_resolve))
7101 + goto emulate;
7102 +
7103 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7104 +
7105 + down_write(&current->mm->mmap_sem);
7106 + if (current->mm->call_dl_resolve) {
7107 + call_dl_resolve = current->mm->call_dl_resolve;
7108 + up_write(&current->mm->mmap_sem);
7109 + if (vma)
7110 + kmem_cache_free(vm_area_cachep, vma);
7111 + goto emulate;
7112 + }
7113 +
7114 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7115 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7116 + up_write(&current->mm->mmap_sem);
7117 + if (vma)
7118 + kmem_cache_free(vm_area_cachep, vma);
7119 + return 1;
7120 + }
7121 +
7122 + if (pax_insert_vma(vma, call_dl_resolve)) {
7123 + up_write(&current->mm->mmap_sem);
7124 + kmem_cache_free(vm_area_cachep, vma);
7125 + return 1;
7126 + }
7127 +
7128 + current->mm->call_dl_resolve = call_dl_resolve;
7129 + up_write(&current->mm->mmap_sem);
7130 +
7131 +emulate:
7132 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7133 + regs->tpc = call_dl_resolve;
7134 + regs->tnpc = addr+4;
7135 + return 3;
7136 + }
7137 +#endif
7138 +
7139 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7140 + if ((save & 0xFFC00000U) == 0x05000000U &&
7141 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7142 + nop == 0x01000000U)
7143 + {
7144 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7145 + regs->u_regs[UREG_G2] = addr + 4;
7146 + addr = (save & 0x003FFFFFU) << 10;
7147 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7148 +
7149 + if (test_thread_flag(TIF_32BIT))
7150 + addr &= 0xFFFFFFFFUL;
7151 +
7152 + regs->tpc = addr;
7153 + regs->tnpc = addr+4;
7154 + return 3;
7155 + }
7156 +
7157 + /* PaX: 64-bit PLT stub */
7158 + err = get_user(sethi1, (unsigned int *)addr);
7159 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7160 + err |= get_user(or1, (unsigned int *)(addr+8));
7161 + err |= get_user(or2, (unsigned int *)(addr+12));
7162 + err |= get_user(sllx, (unsigned int *)(addr+16));
7163 + err |= get_user(add, (unsigned int *)(addr+20));
7164 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7165 + err |= get_user(nop, (unsigned int *)(addr+28));
7166 + if (err)
7167 + break;
7168 +
7169 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7170 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7171 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7172 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7173 + sllx == 0x89293020U &&
7174 + add == 0x8A010005U &&
7175 + jmpl == 0x89C14000U &&
7176 + nop == 0x01000000U)
7177 + {
7178 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7179 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7180 + regs->u_regs[UREG_G4] <<= 32;
7181 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7182 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7183 + regs->u_regs[UREG_G4] = addr + 24;
7184 + addr = regs->u_regs[UREG_G5];
7185 + regs->tpc = addr;
7186 + regs->tnpc = addr+4;
7187 + return 3;
7188 + }
7189 + }
7190 + } while (0);
7191 +
7192 +#ifdef CONFIG_PAX_DLRESOLVE
7193 + do { /* PaX: unpatched PLT emulation step 2 */
7194 + unsigned int save, call, nop;
7195 +
7196 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7197 + err |= get_user(call, (unsigned int *)regs->tpc);
7198 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7199 + if (err)
7200 + break;
7201 +
7202 + if (save == 0x9DE3BFA8U &&
7203 + (call & 0xC0000000U) == 0x40000000U &&
7204 + nop == 0x01000000U)
7205 + {
7206 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7207 +
7208 + if (test_thread_flag(TIF_32BIT))
7209 + dl_resolve &= 0xFFFFFFFFUL;
7210 +
7211 + regs->u_regs[UREG_RETPC] = regs->tpc;
7212 + regs->tpc = dl_resolve;
7213 + regs->tnpc = dl_resolve+4;
7214 + return 3;
7215 + }
7216 + } while (0);
7217 +#endif
7218 +
7219 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7220 + unsigned int sethi, ba, nop;
7221 +
7222 + err = get_user(sethi, (unsigned int *)regs->tpc);
7223 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7224 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7225 +
7226 + if (err)
7227 + break;
7228 +
7229 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7230 + (ba & 0xFFF00000U) == 0x30600000U &&
7231 + nop == 0x01000000U)
7232 + {
7233 + unsigned long addr;
7234 +
7235 + addr = (sethi & 0x003FFFFFU) << 10;
7236 + regs->u_regs[UREG_G1] = addr;
7237 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7238 +
7239 + if (test_thread_flag(TIF_32BIT))
7240 + addr &= 0xFFFFFFFFUL;
7241 +
7242 + regs->tpc = addr;
7243 + regs->tnpc = addr+4;
7244 + return 2;
7245 + }
7246 + } while (0);
7247 +
7248 +#endif
7249 +
7250 + return 1;
7251 +}
7252 +
7253 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7254 +{
7255 + unsigned long i;
7256 +
7257 + printk(KERN_ERR "PAX: bytes at PC: ");
7258 + for (i = 0; i < 8; i++) {
7259 + unsigned int c;
7260 + if (get_user(c, (unsigned int *)pc+i))
7261 + printk(KERN_CONT "???????? ");
7262 + else
7263 + printk(KERN_CONT "%08x ", c);
7264 + }
7265 + printk("\n");
7266 +}
7267 +#endif
7268 +
7269 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7270 {
7271 struct mm_struct *mm = current->mm;
7272 @@ -343,6 +797,29 @@ retry:
7273 if (!vma)
7274 goto bad_area;
7275
7276 +#ifdef CONFIG_PAX_PAGEEXEC
7277 + /* PaX: detect ITLB misses on non-exec pages */
7278 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7279 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7280 + {
7281 + if (address != regs->tpc)
7282 + goto good_area;
7283 +
7284 + up_read(&mm->mmap_sem);
7285 + switch (pax_handle_fetch_fault(regs)) {
7286 +
7287 +#ifdef CONFIG_PAX_EMUPLT
7288 + case 2:
7289 + case 3:
7290 + return;
7291 +#endif
7292 +
7293 + }
7294 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7295 + do_group_exit(SIGKILL);
7296 + }
7297 +#endif
7298 +
7299 /* Pure DTLB misses do not tell us whether the fault causing
7300 * load/store/atomic was a write or not, it only says that there
7301 * was no match. So in such a case we (carefully) read the
7302 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7303 index 07e1453..0a7d9e9 100644
7304 --- a/arch/sparc/mm/hugetlbpage.c
7305 +++ b/arch/sparc/mm/hugetlbpage.c
7306 @@ -67,7 +67,7 @@ full_search:
7307 }
7308 return -ENOMEM;
7309 }
7310 - if (likely(!vma || addr + len <= vma->vm_start)) {
7311 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7312 /*
7313 * Remember the place where we stopped the search:
7314 */
7315 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7316 /* make sure it can fit in the remaining address space */
7317 if (likely(addr > len)) {
7318 vma = find_vma(mm, addr-len);
7319 - if (!vma || addr <= vma->vm_start) {
7320 + if (check_heap_stack_gap(vma, addr - len, len)) {
7321 /* remember the address as a hint for next time */
7322 return (mm->free_area_cache = addr-len);
7323 }
7324 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7325 if (unlikely(mm->mmap_base < len))
7326 goto bottomup;
7327
7328 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7329 + addr = mm->mmap_base - len;
7330
7331 do {
7332 + addr &= HPAGE_MASK;
7333 /*
7334 * Lookup failure means no vma is above this address,
7335 * else if new region fits below vma->vm_start,
7336 * return with success:
7337 */
7338 vma = find_vma(mm, addr);
7339 - if (likely(!vma || addr+len <= vma->vm_start)) {
7340 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7341 /* remember the address as a hint for next time */
7342 return (mm->free_area_cache = addr);
7343 }
7344 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7345 mm->cached_hole_size = vma->vm_start - addr;
7346
7347 /* try just below the current vma->vm_start */
7348 - addr = (vma->vm_start-len) & HPAGE_MASK;
7349 - } while (likely(len < vma->vm_start));
7350 + addr = skip_heap_stack_gap(vma, len);
7351 + } while (!IS_ERR_VALUE(addr));
7352
7353 bottomup:
7354 /*
7355 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7356 if (addr) {
7357 addr = ALIGN(addr, HPAGE_SIZE);
7358 vma = find_vma(mm, addr);
7359 - if (task_size - len >= addr &&
7360 - (!vma || addr + len <= vma->vm_start))
7361 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7362 return addr;
7363 }
7364 if (mm->get_unmapped_area == arch_get_unmapped_area)
7365 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7366 index c5f9021..7591bae 100644
7367 --- a/arch/sparc/mm/init_32.c
7368 +++ b/arch/sparc/mm/init_32.c
7369 @@ -315,6 +315,9 @@ extern void device_scan(void);
7370 pgprot_t PAGE_SHARED __read_mostly;
7371 EXPORT_SYMBOL(PAGE_SHARED);
7372
7373 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7374 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7375 +
7376 void __init paging_init(void)
7377 {
7378 switch(sparc_cpu_model) {
7379 @@ -343,17 +346,17 @@ void __init paging_init(void)
7380
7381 /* Initialize the protection map with non-constant, MMU dependent values. */
7382 protection_map[0] = PAGE_NONE;
7383 - protection_map[1] = PAGE_READONLY;
7384 - protection_map[2] = PAGE_COPY;
7385 - protection_map[3] = PAGE_COPY;
7386 + protection_map[1] = PAGE_READONLY_NOEXEC;
7387 + protection_map[2] = PAGE_COPY_NOEXEC;
7388 + protection_map[3] = PAGE_COPY_NOEXEC;
7389 protection_map[4] = PAGE_READONLY;
7390 protection_map[5] = PAGE_READONLY;
7391 protection_map[6] = PAGE_COPY;
7392 protection_map[7] = PAGE_COPY;
7393 protection_map[8] = PAGE_NONE;
7394 - protection_map[9] = PAGE_READONLY;
7395 - protection_map[10] = PAGE_SHARED;
7396 - protection_map[11] = PAGE_SHARED;
7397 + protection_map[9] = PAGE_READONLY_NOEXEC;
7398 + protection_map[10] = PAGE_SHARED_NOEXEC;
7399 + protection_map[11] = PAGE_SHARED_NOEXEC;
7400 protection_map[12] = PAGE_READONLY;
7401 protection_map[13] = PAGE_READONLY;
7402 protection_map[14] = PAGE_SHARED;
7403 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7404 index cbef74e..c38fead 100644
7405 --- a/arch/sparc/mm/srmmu.c
7406 +++ b/arch/sparc/mm/srmmu.c
7407 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7408 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7409 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7410 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7411 +
7412 +#ifdef CONFIG_PAX_PAGEEXEC
7413 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7414 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7415 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7416 +#endif
7417 +
7418 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7419 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7420
7421 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7422 index f4500c6..889656c 100644
7423 --- a/arch/tile/include/asm/atomic_64.h
7424 +++ b/arch/tile/include/asm/atomic_64.h
7425 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7426
7427 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7428
7429 +#define atomic64_read_unchecked(v) atomic64_read(v)
7430 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7431 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7432 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7433 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7434 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7435 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7436 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7437 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7438 +
7439 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7440 #define smp_mb__before_atomic_dec() smp_mb()
7441 #define smp_mb__after_atomic_dec() smp_mb()
7442 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7443 index 392e533..536b092 100644
7444 --- a/arch/tile/include/asm/cache.h
7445 +++ b/arch/tile/include/asm/cache.h
7446 @@ -15,11 +15,12 @@
7447 #ifndef _ASM_TILE_CACHE_H
7448 #define _ASM_TILE_CACHE_H
7449
7450 +#include <linux/const.h>
7451 #include <arch/chip.h>
7452
7453 /* bytes per L1 data cache line */
7454 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7455 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7456 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7457
7458 /* bytes per L2 cache line */
7459 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7460 diff --git a/arch/um/Makefile b/arch/um/Makefile
7461 index 55c0661..86ad413 100644
7462 --- a/arch/um/Makefile
7463 +++ b/arch/um/Makefile
7464 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7465 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7466 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7467
7468 +ifdef CONSTIFY_PLUGIN
7469 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7470 +endif
7471 +
7472 #This will adjust *FLAGS accordingly to the platform.
7473 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7474
7475 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7476 index 19e1bdd..3665b77 100644
7477 --- a/arch/um/include/asm/cache.h
7478 +++ b/arch/um/include/asm/cache.h
7479 @@ -1,6 +1,7 @@
7480 #ifndef __UM_CACHE_H
7481 #define __UM_CACHE_H
7482
7483 +#include <linux/const.h>
7484
7485 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7486 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7487 @@ -12,6 +13,6 @@
7488 # define L1_CACHE_SHIFT 5
7489 #endif
7490
7491 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7492 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7493
7494 #endif
7495 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7496 index 6c03acd..a5e0215 100644
7497 --- a/arch/um/include/asm/kmap_types.h
7498 +++ b/arch/um/include/asm/kmap_types.h
7499 @@ -23,6 +23,7 @@ enum km_type {
7500 KM_IRQ1,
7501 KM_SOFTIRQ0,
7502 KM_SOFTIRQ1,
7503 + KM_CLEARPAGE,
7504 KM_TYPE_NR
7505 };
7506
7507 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7508 index 7cfc3ce..cbd1a58 100644
7509 --- a/arch/um/include/asm/page.h
7510 +++ b/arch/um/include/asm/page.h
7511 @@ -14,6 +14,9 @@
7512 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7513 #define PAGE_MASK (~(PAGE_SIZE-1))
7514
7515 +#define ktla_ktva(addr) (addr)
7516 +#define ktva_ktla(addr) (addr)
7517 +
7518 #ifndef __ASSEMBLY__
7519
7520 struct page;
7521 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7522 index 0032f92..cd151e0 100644
7523 --- a/arch/um/include/asm/pgtable-3level.h
7524 +++ b/arch/um/include/asm/pgtable-3level.h
7525 @@ -58,6 +58,7 @@
7526 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7527 #define pud_populate(mm, pud, pmd) \
7528 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7529 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7530
7531 #ifdef CONFIG_64BIT
7532 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7533 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7534 index 2b73ded..804f540 100644
7535 --- a/arch/um/kernel/process.c
7536 +++ b/arch/um/kernel/process.c
7537 @@ -404,22 +404,6 @@ int singlestepping(void * t)
7538 return 2;
7539 }
7540
7541 -/*
7542 - * Only x86 and x86_64 have an arch_align_stack().
7543 - * All other arches have "#define arch_align_stack(x) (x)"
7544 - * in their asm/system.h
7545 - * As this is included in UML from asm-um/system-generic.h,
7546 - * we can use it to behave as the subarch does.
7547 - */
7548 -#ifndef arch_align_stack
7549 -unsigned long arch_align_stack(unsigned long sp)
7550 -{
7551 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7552 - sp -= get_random_int() % 8192;
7553 - return sp & ~0xf;
7554 -}
7555 -#endif
7556 -
7557 unsigned long get_wchan(struct task_struct *p)
7558 {
7559 unsigned long stack_page, sp, ip;
7560 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7561 index ad8f795..2c7eec6 100644
7562 --- a/arch/unicore32/include/asm/cache.h
7563 +++ b/arch/unicore32/include/asm/cache.h
7564 @@ -12,8 +12,10 @@
7565 #ifndef __UNICORE_CACHE_H__
7566 #define __UNICORE_CACHE_H__
7567
7568 -#define L1_CACHE_SHIFT (5)
7569 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7570 +#include <linux/const.h>
7571 +
7572 +#define L1_CACHE_SHIFT 5
7573 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7574
7575 /*
7576 * Memory returned by kmalloc() may be used for DMA, so we must make
7577 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7578 index c9866b0..fe53aef 100644
7579 --- a/arch/x86/Kconfig
7580 +++ b/arch/x86/Kconfig
7581 @@ -229,7 +229,7 @@ config X86_HT
7582
7583 config X86_32_LAZY_GS
7584 def_bool y
7585 - depends on X86_32 && !CC_STACKPROTECTOR
7586 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7587
7588 config ARCH_HWEIGHT_CFLAGS
7589 string
7590 @@ -1042,7 +1042,7 @@ choice
7591
7592 config NOHIGHMEM
7593 bool "off"
7594 - depends on !X86_NUMAQ
7595 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7596 ---help---
7597 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7598 However, the address space of 32-bit x86 processors is only 4
7599 @@ -1079,7 +1079,7 @@ config NOHIGHMEM
7600
7601 config HIGHMEM4G
7602 bool "4GB"
7603 - depends on !X86_NUMAQ
7604 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7605 ---help---
7606 Select this if you have a 32-bit processor and between 1 and 4
7607 gigabytes of physical RAM.
7608 @@ -1133,7 +1133,7 @@ config PAGE_OFFSET
7609 hex
7610 default 0xB0000000 if VMSPLIT_3G_OPT
7611 default 0x80000000 if VMSPLIT_2G
7612 - default 0x78000000 if VMSPLIT_2G_OPT
7613 + default 0x70000000 if VMSPLIT_2G_OPT
7614 default 0x40000000 if VMSPLIT_1G
7615 default 0xC0000000
7616 depends on X86_32
7617 @@ -1523,6 +1523,7 @@ config SECCOMP
7618
7619 config CC_STACKPROTECTOR
7620 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7621 + depends on X86_64 || !PAX_MEMORY_UDEREF
7622 ---help---
7623 This option turns on the -fstack-protector GCC feature. This
7624 feature puts, at the beginning of functions, a canary value on
7625 @@ -1580,6 +1581,7 @@ config KEXEC_JUMP
7626 config PHYSICAL_START
7627 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7628 default "0x1000000"
7629 + range 0x400000 0x40000000
7630 ---help---
7631 This gives the physical address where the kernel is loaded.
7632
7633 @@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
7634 config PHYSICAL_ALIGN
7635 hex "Alignment value to which kernel should be aligned" if X86_32
7636 default "0x1000000"
7637 + range 0x400000 0x1000000 if PAX_KERNEXEC
7638 range 0x2000 0x1000000
7639 ---help---
7640 This value puts the alignment restrictions on physical address
7641 @@ -1674,9 +1677,10 @@ config HOTPLUG_CPU
7642 Say N if you want to disable CPU hotplug.
7643
7644 config COMPAT_VDSO
7645 - def_bool y
7646 + def_bool n
7647 prompt "Compat VDSO support"
7648 depends on X86_32 || IA32_EMULATION
7649 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7650 ---help---
7651 Map the 32-bit VDSO to the predictable old-style address too.
7652
7653 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7654 index 706e12e..62e4feb 100644
7655 --- a/arch/x86/Kconfig.cpu
7656 +++ b/arch/x86/Kconfig.cpu
7657 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7658
7659 config X86_F00F_BUG
7660 def_bool y
7661 - depends on M586MMX || M586TSC || M586 || M486 || M386
7662 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7663
7664 config X86_INVD_BUG
7665 def_bool y
7666 @@ -358,7 +358,7 @@ config X86_POPAD_OK
7667
7668 config X86_ALIGNMENT_16
7669 def_bool y
7670 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7671 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7672
7673 config X86_INTEL_USERCOPY
7674 def_bool y
7675 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
7676 # generates cmov.
7677 config X86_CMOV
7678 def_bool y
7679 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7680 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7681
7682 config X86_MINIMUM_CPU_FAMILY
7683 int
7684 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7685 index e46c214..7c72b55 100644
7686 --- a/arch/x86/Kconfig.debug
7687 +++ b/arch/x86/Kconfig.debug
7688 @@ -84,7 +84,7 @@ config X86_PTDUMP
7689 config DEBUG_RODATA
7690 bool "Write protect kernel read-only data structures"
7691 default y
7692 - depends on DEBUG_KERNEL
7693 + depends on DEBUG_KERNEL && BROKEN
7694 ---help---
7695 Mark the kernel read-only data as write-protected in the pagetables,
7696 in order to catch accidental (and incorrect) writes to such const
7697 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7698
7699 config DEBUG_SET_MODULE_RONX
7700 bool "Set loadable kernel module data as NX and text as RO"
7701 - depends on MODULES
7702 + depends on MODULES && BROKEN
7703 ---help---
7704 This option helps catch unintended modifications to loadable
7705 kernel module's text and read-only data. It also prevents execution
7706 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7707 index b1c611e..2c1a823 100644
7708 --- a/arch/x86/Makefile
7709 +++ b/arch/x86/Makefile
7710 @@ -46,6 +46,7 @@ else
7711 UTS_MACHINE := x86_64
7712 CHECKFLAGS += -D__x86_64__ -m64
7713
7714 + biarch := $(call cc-option,-m64)
7715 KBUILD_AFLAGS += -m64
7716 KBUILD_CFLAGS += -m64
7717
7718 @@ -222,3 +223,12 @@ define archhelp
7719 echo ' FDARGS="..." arguments for the booted kernel'
7720 echo ' FDINITRD=file initrd for the booted kernel'
7721 endef
7722 +
7723 +define OLD_LD
7724 +
7725 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7726 +*** Please upgrade your binutils to 2.18 or newer
7727 +endef
7728 +
7729 +archprepare:
7730 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7731 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7732 index 5a747dd..ff7b12c 100644
7733 --- a/arch/x86/boot/Makefile
7734 +++ b/arch/x86/boot/Makefile
7735 @@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7736 $(call cc-option, -fno-stack-protector) \
7737 $(call cc-option, -mpreferred-stack-boundary=2)
7738 KBUILD_CFLAGS += $(call cc-option, -m32)
7739 +ifdef CONSTIFY_PLUGIN
7740 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7741 +endif
7742 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7743 GCOV_PROFILE := n
7744
7745 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7746 index 878e4b9..20537ab 100644
7747 --- a/arch/x86/boot/bitops.h
7748 +++ b/arch/x86/boot/bitops.h
7749 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7750 u8 v;
7751 const u32 *p = (const u32 *)addr;
7752
7753 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7754 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7755 return v;
7756 }
7757
7758 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7759
7760 static inline void set_bit(int nr, void *addr)
7761 {
7762 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7763 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7764 }
7765
7766 #endif /* BOOT_BITOPS_H */
7767 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7768 index 18997e5..83d9c67 100644
7769 --- a/arch/x86/boot/boot.h
7770 +++ b/arch/x86/boot/boot.h
7771 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7772 static inline u16 ds(void)
7773 {
7774 u16 seg;
7775 - asm("movw %%ds,%0" : "=rm" (seg));
7776 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7777 return seg;
7778 }
7779
7780 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7781 static inline int memcmp(const void *s1, const void *s2, size_t len)
7782 {
7783 u8 diff;
7784 - asm("repe; cmpsb; setnz %0"
7785 + asm volatile("repe; cmpsb; setnz %0"
7786 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7787 return diff;
7788 }
7789 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7790 index e398bb5..3a382ca 100644
7791 --- a/arch/x86/boot/compressed/Makefile
7792 +++ b/arch/x86/boot/compressed/Makefile
7793 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7794 KBUILD_CFLAGS += $(cflags-y)
7795 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7796 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7797 +ifdef CONSTIFY_PLUGIN
7798 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7799 +endif
7800
7801 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7802 GCOV_PROFILE := n
7803 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7804 index 0cdfc0d..6e79437 100644
7805 --- a/arch/x86/boot/compressed/eboot.c
7806 +++ b/arch/x86/boot/compressed/eboot.c
7807 @@ -122,7 +122,6 @@ again:
7808 *addr = max_addr;
7809 }
7810
7811 -free_pool:
7812 efi_call_phys1(sys_table->boottime->free_pool, map);
7813
7814 fail:
7815 @@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7816 if (i == map_size / desc_size)
7817 status = EFI_NOT_FOUND;
7818
7819 -free_pool:
7820 efi_call_phys1(sys_table->boottime->free_pool, map);
7821 fail:
7822 return status;
7823 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7824 index c85e3ac..6f5aa80 100644
7825 --- a/arch/x86/boot/compressed/head_32.S
7826 +++ b/arch/x86/boot/compressed/head_32.S
7827 @@ -106,7 +106,7 @@ preferred_addr:
7828 notl %eax
7829 andl %eax, %ebx
7830 #else
7831 - movl $LOAD_PHYSICAL_ADDR, %ebx
7832 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7833 #endif
7834
7835 /* Target address to relocate to for decompression */
7836 @@ -192,7 +192,7 @@ relocated:
7837 * and where it was actually loaded.
7838 */
7839 movl %ebp, %ebx
7840 - subl $LOAD_PHYSICAL_ADDR, %ebx
7841 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7842 jz 2f /* Nothing to be done if loaded at compiled addr. */
7843 /*
7844 * Process relocations.
7845 @@ -200,8 +200,7 @@ relocated:
7846
7847 1: subl $4, %edi
7848 movl (%edi), %ecx
7849 - testl %ecx, %ecx
7850 - jz 2f
7851 + jecxz 2f
7852 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7853 jmp 1b
7854 2:
7855 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7856 index 87e03a1..0d94c76 100644
7857 --- a/arch/x86/boot/compressed/head_64.S
7858 +++ b/arch/x86/boot/compressed/head_64.S
7859 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7860 notl %eax
7861 andl %eax, %ebx
7862 #else
7863 - movl $LOAD_PHYSICAL_ADDR, %ebx
7864 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7865 #endif
7866
7867 /* Target address to relocate to for decompression */
7868 @@ -263,7 +263,7 @@ preferred_addr:
7869 notq %rax
7870 andq %rax, %rbp
7871 #else
7872 - movq $LOAD_PHYSICAL_ADDR, %rbp
7873 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7874 #endif
7875
7876 /* Target address to relocate to for decompression */
7877 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7878 index 7116dcb..d9ae1d7 100644
7879 --- a/arch/x86/boot/compressed/misc.c
7880 +++ b/arch/x86/boot/compressed/misc.c
7881 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7882 case PT_LOAD:
7883 #ifdef CONFIG_RELOCATABLE
7884 dest = output;
7885 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7886 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7887 #else
7888 dest = (void *)(phdr->p_paddr);
7889 #endif
7890 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7891 error("Destination address too large");
7892 #endif
7893 #ifndef CONFIG_RELOCATABLE
7894 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7895 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7896 error("Wrong destination address");
7897 #endif
7898
7899 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7900 index 4d3ff03..e4972ff 100644
7901 --- a/arch/x86/boot/cpucheck.c
7902 +++ b/arch/x86/boot/cpucheck.c
7903 @@ -74,7 +74,7 @@ static int has_fpu(void)
7904 u16 fcw = -1, fsw = -1;
7905 u32 cr0;
7906
7907 - asm("movl %%cr0,%0" : "=r" (cr0));
7908 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7909 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7910 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7911 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7912 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7913 {
7914 u32 f0, f1;
7915
7916 - asm("pushfl ; "
7917 + asm volatile("pushfl ; "
7918 "pushfl ; "
7919 "popl %0 ; "
7920 "movl %0,%1 ; "
7921 @@ -115,7 +115,7 @@ static void get_flags(void)
7922 set_bit(X86_FEATURE_FPU, cpu.flags);
7923
7924 if (has_eflag(X86_EFLAGS_ID)) {
7925 - asm("cpuid"
7926 + asm volatile("cpuid"
7927 : "=a" (max_intel_level),
7928 "=b" (cpu_vendor[0]),
7929 "=d" (cpu_vendor[1]),
7930 @@ -124,7 +124,7 @@ static void get_flags(void)
7931
7932 if (max_intel_level >= 0x00000001 &&
7933 max_intel_level <= 0x0000ffff) {
7934 - asm("cpuid"
7935 + asm volatile("cpuid"
7936 : "=a" (tfms),
7937 "=c" (cpu.flags[4]),
7938 "=d" (cpu.flags[0])
7939 @@ -136,7 +136,7 @@ static void get_flags(void)
7940 cpu.model += ((tfms >> 16) & 0xf) << 4;
7941 }
7942
7943 - asm("cpuid"
7944 + asm volatile("cpuid"
7945 : "=a" (max_amd_level)
7946 : "a" (0x80000000)
7947 : "ebx", "ecx", "edx");
7948 @@ -144,7 +144,7 @@ static void get_flags(void)
7949 if (max_amd_level >= 0x80000001 &&
7950 max_amd_level <= 0x8000ffff) {
7951 u32 eax = 0x80000001;
7952 - asm("cpuid"
7953 + asm volatile("cpuid"
7954 : "+a" (eax),
7955 "=c" (cpu.flags[6]),
7956 "=d" (cpu.flags[1])
7957 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7958 u32 ecx = MSR_K7_HWCR;
7959 u32 eax, edx;
7960
7961 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7962 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7963 eax &= ~(1 << 15);
7964 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7965 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7966
7967 get_flags(); /* Make sure it really did something */
7968 err = check_flags();
7969 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7970 u32 ecx = MSR_VIA_FCR;
7971 u32 eax, edx;
7972
7973 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7974 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7975 eax |= (1<<1)|(1<<7);
7976 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7977 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7978
7979 set_bit(X86_FEATURE_CX8, cpu.flags);
7980 err = check_flags();
7981 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7982 u32 eax, edx;
7983 u32 level = 1;
7984
7985 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7986 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7987 - asm("cpuid"
7988 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7989 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7990 + asm volatile("cpuid"
7991 : "+a" (level), "=d" (cpu.flags[0])
7992 : : "ecx", "ebx");
7993 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7994 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7995
7996 err = check_flags();
7997 }
7998 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7999 index f1bbeeb..aff09cb 100644
8000 --- a/arch/x86/boot/header.S
8001 +++ b/arch/x86/boot/header.S
8002 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8003 # single linked list of
8004 # struct setup_data
8005
8006 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8007 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8008
8009 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8010 #define VO_INIT_SIZE (VO__end - VO__text)
8011 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8012 index db75d07..8e6d0af 100644
8013 --- a/arch/x86/boot/memory.c
8014 +++ b/arch/x86/boot/memory.c
8015 @@ -19,7 +19,7 @@
8016
8017 static int detect_memory_e820(void)
8018 {
8019 - int count = 0;
8020 + unsigned int count = 0;
8021 struct biosregs ireg, oreg;
8022 struct e820entry *desc = boot_params.e820_map;
8023 static struct e820entry buf; /* static so it is zeroed */
8024 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8025 index 11e8c6e..fdbb1ed 100644
8026 --- a/arch/x86/boot/video-vesa.c
8027 +++ b/arch/x86/boot/video-vesa.c
8028 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8029
8030 boot_params.screen_info.vesapm_seg = oreg.es;
8031 boot_params.screen_info.vesapm_off = oreg.di;
8032 + boot_params.screen_info.vesapm_size = oreg.cx;
8033 }
8034
8035 /*
8036 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8037 index 43eda28..5ab5fdb 100644
8038 --- a/arch/x86/boot/video.c
8039 +++ b/arch/x86/boot/video.c
8040 @@ -96,7 +96,7 @@ static void store_mode_params(void)
8041 static unsigned int get_entry(void)
8042 {
8043 char entry_buf[4];
8044 - int i, len = 0;
8045 + unsigned int i, len = 0;
8046 int key;
8047 unsigned int v;
8048
8049 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8050 index 5b577d5..3c1fed4 100644
8051 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8052 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8053 @@ -8,6 +8,8 @@
8054 * including this sentence is retained in full.
8055 */
8056
8057 +#include <asm/alternative-asm.h>
8058 +
8059 .extern crypto_ft_tab
8060 .extern crypto_it_tab
8061 .extern crypto_fl_tab
8062 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8063 je B192; \
8064 leaq 32(r9),r9;
8065
8066 +#define ret pax_force_retaddr 0, 1; ret
8067 +
8068 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8069 movq r1,r2; \
8070 movq r3,r4; \
8071 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8072 index be6d9e3..21fbbca 100644
8073 --- a/arch/x86/crypto/aesni-intel_asm.S
8074 +++ b/arch/x86/crypto/aesni-intel_asm.S
8075 @@ -31,6 +31,7 @@
8076
8077 #include <linux/linkage.h>
8078 #include <asm/inst.h>
8079 +#include <asm/alternative-asm.h>
8080
8081 #ifdef __x86_64__
8082 .data
8083 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8084 pop %r14
8085 pop %r13
8086 pop %r12
8087 + pax_force_retaddr 0, 1
8088 ret
8089 +ENDPROC(aesni_gcm_dec)
8090
8091
8092 /*****************************************************************************
8093 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8094 pop %r14
8095 pop %r13
8096 pop %r12
8097 + pax_force_retaddr 0, 1
8098 ret
8099 +ENDPROC(aesni_gcm_enc)
8100
8101 #endif
8102
8103 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
8104 pxor %xmm1, %xmm0
8105 movaps %xmm0, (TKEYP)
8106 add $0x10, TKEYP
8107 + pax_force_retaddr_bts
8108 ret
8109
8110 .align 4
8111 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
8112 shufps $0b01001110, %xmm2, %xmm1
8113 movaps %xmm1, 0x10(TKEYP)
8114 add $0x20, TKEYP
8115 + pax_force_retaddr_bts
8116 ret
8117
8118 .align 4
8119 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
8120
8121 movaps %xmm0, (TKEYP)
8122 add $0x10, TKEYP
8123 + pax_force_retaddr_bts
8124 ret
8125
8126 .align 4
8127 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
8128 pxor %xmm1, %xmm2
8129 movaps %xmm2, (TKEYP)
8130 add $0x10, TKEYP
8131 + pax_force_retaddr_bts
8132 ret
8133
8134 /*
8135 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8136 #ifndef __x86_64__
8137 popl KEYP
8138 #endif
8139 + pax_force_retaddr 0, 1
8140 ret
8141 +ENDPROC(aesni_set_key)
8142
8143 /*
8144 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8145 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8146 popl KLEN
8147 popl KEYP
8148 #endif
8149 + pax_force_retaddr 0, 1
8150 ret
8151 +ENDPROC(aesni_enc)
8152
8153 /*
8154 * _aesni_enc1: internal ABI
8155 @@ -1959,6 +1972,7 @@ _aesni_enc1:
8156 AESENC KEY STATE
8157 movaps 0x70(TKEYP), KEY
8158 AESENCLAST KEY STATE
8159 + pax_force_retaddr_bts
8160 ret
8161
8162 /*
8163 @@ -2067,6 +2081,7 @@ _aesni_enc4:
8164 AESENCLAST KEY STATE2
8165 AESENCLAST KEY STATE3
8166 AESENCLAST KEY STATE4
8167 + pax_force_retaddr_bts
8168 ret
8169
8170 /*
8171 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8172 popl KLEN
8173 popl KEYP
8174 #endif
8175 + pax_force_retaddr 0, 1
8176 ret
8177 +ENDPROC(aesni_dec)
8178
8179 /*
8180 * _aesni_dec1: internal ABI
8181 @@ -2146,6 +2163,7 @@ _aesni_dec1:
8182 AESDEC KEY STATE
8183 movaps 0x70(TKEYP), KEY
8184 AESDECLAST KEY STATE
8185 + pax_force_retaddr_bts
8186 ret
8187
8188 /*
8189 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8190 AESDECLAST KEY STATE2
8191 AESDECLAST KEY STATE3
8192 AESDECLAST KEY STATE4
8193 + pax_force_retaddr_bts
8194 ret
8195
8196 /*
8197 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8198 popl KEYP
8199 popl LEN
8200 #endif
8201 + pax_force_retaddr 0, 1
8202 ret
8203 +ENDPROC(aesni_ecb_enc)
8204
8205 /*
8206 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8207 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8208 popl KEYP
8209 popl LEN
8210 #endif
8211 + pax_force_retaddr 0, 1
8212 ret
8213 +ENDPROC(aesni_ecb_dec)
8214
8215 /*
8216 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8217 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8218 popl LEN
8219 popl IVP
8220 #endif
8221 + pax_force_retaddr 0, 1
8222 ret
8223 +ENDPROC(aesni_cbc_enc)
8224
8225 /*
8226 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8227 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
8228 popl LEN
8229 popl IVP
8230 #endif
8231 + pax_force_retaddr 0, 1
8232 ret
8233 +ENDPROC(aesni_cbc_dec)
8234
8235 #ifdef __x86_64__
8236 .align 16
8237 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
8238 mov $1, TCTR_LOW
8239 MOVQ_R64_XMM TCTR_LOW INC
8240 MOVQ_R64_XMM CTR TCTR_LOW
8241 + pax_force_retaddr_bts
8242 ret
8243
8244 /*
8245 @@ -2552,6 +2580,7 @@ _aesni_inc:
8246 .Linc_low:
8247 movaps CTR, IV
8248 PSHUFB_XMM BSWAP_MASK IV
8249 + pax_force_retaddr_bts
8250 ret
8251
8252 /*
8253 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
8254 .Lctr_enc_ret:
8255 movups IV, (IVP)
8256 .Lctr_enc_just_ret:
8257 + pax_force_retaddr 0, 1
8258 ret
8259 +ENDPROC(aesni_ctr_enc)
8260 #endif
8261 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8262 index 391d245..67f35c2 100644
8263 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8264 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8265 @@ -20,6 +20,8 @@
8266 *
8267 */
8268
8269 +#include <asm/alternative-asm.h>
8270 +
8271 .file "blowfish-x86_64-asm.S"
8272 .text
8273
8274 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8275 jnz __enc_xor;
8276
8277 write_block();
8278 + pax_force_retaddr 0, 1
8279 ret;
8280 __enc_xor:
8281 xor_block();
8282 + pax_force_retaddr 0, 1
8283 ret;
8284
8285 .align 8
8286 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8287
8288 movq %r11, %rbp;
8289
8290 + pax_force_retaddr 0, 1
8291 ret;
8292
8293 /**********************************************************************
8294 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8295
8296 popq %rbx;
8297 popq %rbp;
8298 + pax_force_retaddr 0, 1
8299 ret;
8300
8301 __enc_xor4:
8302 @@ -349,6 +355,7 @@ __enc_xor4:
8303
8304 popq %rbx;
8305 popq %rbp;
8306 + pax_force_retaddr 0, 1
8307 ret;
8308
8309 .align 8
8310 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8311 popq %rbx;
8312 popq %rbp;
8313
8314 + pax_force_retaddr 0, 1
8315 ret;
8316
8317 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8318 index 0b33743..7a56206 100644
8319 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8320 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8321 @@ -20,6 +20,8 @@
8322 *
8323 */
8324
8325 +#include <asm/alternative-asm.h>
8326 +
8327 .file "camellia-x86_64-asm_64.S"
8328 .text
8329
8330 @@ -229,12 +231,14 @@ __enc_done:
8331 enc_outunpack(mov, RT1);
8332
8333 movq RRBP, %rbp;
8334 + pax_force_retaddr 0, 1
8335 ret;
8336
8337 __enc_xor:
8338 enc_outunpack(xor, RT1);
8339
8340 movq RRBP, %rbp;
8341 + pax_force_retaddr 0, 1
8342 ret;
8343
8344 .global camellia_dec_blk;
8345 @@ -275,6 +279,7 @@ __dec_rounds16:
8346 dec_outunpack();
8347
8348 movq RRBP, %rbp;
8349 + pax_force_retaddr 0, 1
8350 ret;
8351
8352 /**********************************************************************
8353 @@ -468,6 +473,7 @@ __enc2_done:
8354
8355 movq RRBP, %rbp;
8356 popq %rbx;
8357 + pax_force_retaddr 0, 1
8358 ret;
8359
8360 __enc2_xor:
8361 @@ -475,6 +481,7 @@ __enc2_xor:
8362
8363 movq RRBP, %rbp;
8364 popq %rbx;
8365 + pax_force_retaddr 0, 1
8366 ret;
8367
8368 .global camellia_dec_blk_2way;
8369 @@ -517,4 +524,5 @@ __dec2_rounds16:
8370
8371 movq RRBP, %rbp;
8372 movq RXOR, %rbx;
8373 + pax_force_retaddr 0, 1
8374 ret;
8375 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8376 index 6214a9b..1f4fc9a 100644
8377 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8378 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8379 @@ -1,3 +1,5 @@
8380 +#include <asm/alternative-asm.h>
8381 +
8382 # enter ECRYPT_encrypt_bytes
8383 .text
8384 .p2align 5
8385 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8386 add %r11,%rsp
8387 mov %rdi,%rax
8388 mov %rsi,%rdx
8389 + pax_force_retaddr 0, 1
8390 ret
8391 # bytesatleast65:
8392 ._bytesatleast65:
8393 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8394 add %r11,%rsp
8395 mov %rdi,%rax
8396 mov %rsi,%rdx
8397 + pax_force_retaddr
8398 ret
8399 # enter ECRYPT_ivsetup
8400 .text
8401 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8402 add %r11,%rsp
8403 mov %rdi,%rax
8404 mov %rsi,%rdx
8405 + pax_force_retaddr
8406 ret
8407 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8408 index 3ee1ff0..cbc568b 100644
8409 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8410 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8411 @@ -24,6 +24,8 @@
8412 *
8413 */
8414
8415 +#include <asm/alternative-asm.h>
8416 +
8417 .file "serpent-sse2-x86_64-asm_64.S"
8418 .text
8419
8420 @@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8421 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8422 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8423
8424 + pax_force_retaddr
8425 ret;
8426
8427 __enc_xor8:
8428 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8429 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8430
8431 + pax_force_retaddr
8432 ret;
8433
8434 .align 8
8435 @@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8436 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8437 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8438
8439 + pax_force_retaddr
8440 ret;
8441 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8442 index b2c2f57..8470cab 100644
8443 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8444 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8445 @@ -28,6 +28,8 @@
8446 * (at your option) any later version.
8447 */
8448
8449 +#include <asm/alternative-asm.h>
8450 +
8451 #define CTX %rdi // arg1
8452 #define BUF %rsi // arg2
8453 #define CNT %rdx // arg3
8454 @@ -104,6 +106,7 @@
8455 pop %r12
8456 pop %rbp
8457 pop %rbx
8458 + pax_force_retaddr 0, 1
8459 ret
8460
8461 .size \name, .-\name
8462 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8463 index 5b012a2..36d5364 100644
8464 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8465 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8466 @@ -20,6 +20,8 @@
8467 *
8468 */
8469
8470 +#include <asm/alternative-asm.h>
8471 +
8472 .file "twofish-x86_64-asm-3way.S"
8473 .text
8474
8475 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8476 popq %r13;
8477 popq %r14;
8478 popq %r15;
8479 + pax_force_retaddr 0, 1
8480 ret;
8481
8482 __enc_xor3:
8483 @@ -271,6 +274,7 @@ __enc_xor3:
8484 popq %r13;
8485 popq %r14;
8486 popq %r15;
8487 + pax_force_retaddr 0, 1
8488 ret;
8489
8490 .global twofish_dec_blk_3way
8491 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8492 popq %r13;
8493 popq %r14;
8494 popq %r15;
8495 + pax_force_retaddr 0, 1
8496 ret;
8497
8498 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8499 index 7bcf3fc..f53832f 100644
8500 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8501 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8502 @@ -21,6 +21,7 @@
8503 .text
8504
8505 #include <asm/asm-offsets.h>
8506 +#include <asm/alternative-asm.h>
8507
8508 #define a_offset 0
8509 #define b_offset 4
8510 @@ -268,6 +269,7 @@ twofish_enc_blk:
8511
8512 popq R1
8513 movq $1,%rax
8514 + pax_force_retaddr 0, 1
8515 ret
8516
8517 twofish_dec_blk:
8518 @@ -319,4 +321,5 @@ twofish_dec_blk:
8519
8520 popq R1
8521 movq $1,%rax
8522 + pax_force_retaddr 0, 1
8523 ret
8524 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8525 index 07b3a68..bd2a388 100644
8526 --- a/arch/x86/ia32/ia32_aout.c
8527 +++ b/arch/x86/ia32/ia32_aout.c
8528 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8529 unsigned long dump_start, dump_size;
8530 struct user32 dump;
8531
8532 + memset(&dump, 0, sizeof(dump));
8533 +
8534 fs = get_fs();
8535 set_fs(KERNEL_DS);
8536 has_dumped = 1;
8537 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8538 index a69245b..6d145f4 100644
8539 --- a/arch/x86/ia32/ia32_signal.c
8540 +++ b/arch/x86/ia32/ia32_signal.c
8541 @@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8542 }
8543 seg = get_fs();
8544 set_fs(KERNEL_DS);
8545 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8546 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8547 set_fs(seg);
8548 if (ret >= 0 && uoss_ptr) {
8549 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8550 @@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8551 */
8552 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8553 size_t frame_size,
8554 - void **fpstate)
8555 + void __user **fpstate)
8556 {
8557 unsigned long sp;
8558
8559 @@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8560
8561 if (used_math()) {
8562 sp = sp - sig_xstate_ia32_size;
8563 - *fpstate = (struct _fpstate_ia32 *) sp;
8564 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8565 if (save_i387_xstate_ia32(*fpstate) < 0)
8566 return (void __user *) -1L;
8567 }
8568 @@ -398,7 +398,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8569 sp -= frame_size;
8570 /* Align the stack pointer according to the i386 ABI,
8571 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8572 - sp = ((sp + 4) & -16ul) - 4;
8573 + sp = ((sp - 12) & -16ul) - 4;
8574 return (void __user *) sp;
8575 }
8576
8577 @@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8578 * These are actually not used anymore, but left because some
8579 * gdb versions depend on them as a marker.
8580 */
8581 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8582 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8583 } put_user_catch(err);
8584
8585 if (err)
8586 @@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8587 0xb8,
8588 __NR_ia32_rt_sigreturn,
8589 0x80cd,
8590 - 0,
8591 + 0
8592 };
8593
8594 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8595 @@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8596
8597 if (ka->sa.sa_flags & SA_RESTORER)
8598 restorer = ka->sa.sa_restorer;
8599 + else if (current->mm->context.vdso)
8600 + /* Return stub is in 32bit vsyscall page */
8601 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8602 else
8603 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8604 - rt_sigreturn);
8605 + restorer = &frame->retcode;
8606 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8607
8608 /*
8609 * Not actually used anymore, but left because some gdb
8610 * versions need it.
8611 */
8612 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8613 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8614 } put_user_catch(err);
8615
8616 if (err)
8617 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8618 index e3e7340..05ed805 100644
8619 --- a/arch/x86/ia32/ia32entry.S
8620 +++ b/arch/x86/ia32/ia32entry.S
8621 @@ -13,8 +13,10 @@
8622 #include <asm/thread_info.h>
8623 #include <asm/segment.h>
8624 #include <asm/irqflags.h>
8625 +#include <asm/pgtable.h>
8626 #include <linux/linkage.h>
8627 #include <linux/err.h>
8628 +#include <asm/alternative-asm.h>
8629
8630 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8631 #include <linux/elf-em.h>
8632 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8633 ENDPROC(native_irq_enable_sysexit)
8634 #endif
8635
8636 + .macro pax_enter_kernel_user
8637 + pax_set_fptr_mask
8638 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8639 + call pax_enter_kernel_user
8640 +#endif
8641 + .endm
8642 +
8643 + .macro pax_exit_kernel_user
8644 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8645 + call pax_exit_kernel_user
8646 +#endif
8647 +#ifdef CONFIG_PAX_RANDKSTACK
8648 + pushq %rax
8649 + pushq %r11
8650 + call pax_randomize_kstack
8651 + popq %r11
8652 + popq %rax
8653 +#endif
8654 + .endm
8655 +
8656 +.macro pax_erase_kstack
8657 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8658 + call pax_erase_kstack
8659 +#endif
8660 +.endm
8661 +
8662 /*
8663 * 32bit SYSENTER instruction entry.
8664 *
8665 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8666 CFI_REGISTER rsp,rbp
8667 SWAPGS_UNSAFE_STACK
8668 movq PER_CPU_VAR(kernel_stack), %rsp
8669 - addq $(KERNEL_STACK_OFFSET),%rsp
8670 - /*
8671 - * No need to follow this irqs on/off section: the syscall
8672 - * disabled irqs, here we enable it straight after entry:
8673 - */
8674 - ENABLE_INTERRUPTS(CLBR_NONE)
8675 movl %ebp,%ebp /* zero extension */
8676 pushq_cfi $__USER32_DS
8677 /*CFI_REL_OFFSET ss,0*/
8678 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8679 CFI_REL_OFFSET rsp,0
8680 pushfq_cfi
8681 /*CFI_REL_OFFSET rflags,0*/
8682 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8683 - CFI_REGISTER rip,r10
8684 + orl $X86_EFLAGS_IF,(%rsp)
8685 + GET_THREAD_INFO(%r11)
8686 + movl TI_sysenter_return(%r11), %r11d
8687 + CFI_REGISTER rip,r11
8688 pushq_cfi $__USER32_CS
8689 /*CFI_REL_OFFSET cs,0*/
8690 movl %eax, %eax
8691 - pushq_cfi %r10
8692 + pushq_cfi %r11
8693 CFI_REL_OFFSET rip,0
8694 pushq_cfi %rax
8695 cld
8696 SAVE_ARGS 0,1,0
8697 + pax_enter_kernel_user
8698 + /*
8699 + * No need to follow this irqs on/off section: the syscall
8700 + * disabled irqs, here we enable it straight after entry:
8701 + */
8702 + ENABLE_INTERRUPTS(CLBR_NONE)
8703 /* no need to do an access_ok check here because rbp has been
8704 32bit zero extended */
8705 +
8706 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8707 + mov $PAX_USER_SHADOW_BASE,%r11
8708 + add %r11,%rbp
8709 +#endif
8710 +
8711 1: movl (%rbp),%ebp
8712 .section __ex_table,"a"
8713 .quad 1b,ia32_badarg
8714 .previous
8715 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8716 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8717 + GET_THREAD_INFO(%r11)
8718 + orl $TS_COMPAT,TI_status(%r11)
8719 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8720 CFI_REMEMBER_STATE
8721 jnz sysenter_tracesys
8722 cmpq $(IA32_NR_syscalls-1),%rax
8723 @@ -160,12 +197,15 @@ sysenter_do_call:
8724 sysenter_dispatch:
8725 call *ia32_sys_call_table(,%rax,8)
8726 movq %rax,RAX-ARGOFFSET(%rsp)
8727 + GET_THREAD_INFO(%r11)
8728 DISABLE_INTERRUPTS(CLBR_NONE)
8729 TRACE_IRQS_OFF
8730 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8731 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8732 jnz sysexit_audit
8733 sysexit_from_sys_call:
8734 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8735 + pax_exit_kernel_user
8736 + pax_erase_kstack
8737 + andl $~TS_COMPAT,TI_status(%r11)
8738 /* clear IF, that popfq doesn't enable interrupts early */
8739 andl $~0x200,EFLAGS-R11(%rsp)
8740 movl RIP-R11(%rsp),%edx /* User %eip */
8741 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8742 movl %eax,%esi /* 2nd arg: syscall number */
8743 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8744 call __audit_syscall_entry
8745 +
8746 + pax_erase_kstack
8747 +
8748 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8749 cmpq $(IA32_NR_syscalls-1),%rax
8750 ja ia32_badsys
8751 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8752 .endm
8753
8754 .macro auditsys_exit exit
8755 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8756 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8757 jnz ia32_ret_from_sys_call
8758 TRACE_IRQS_ON
8759 sti
8760 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8761 1: setbe %al /* 1 if error, 0 if not */
8762 movzbl %al,%edi /* zero-extend that into %edi */
8763 call __audit_syscall_exit
8764 + GET_THREAD_INFO(%r11)
8765 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8766 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8767 cli
8768 TRACE_IRQS_OFF
8769 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8770 + testl %edi,TI_flags(%r11)
8771 jz \exit
8772 CLEAR_RREGS -ARGOFFSET
8773 jmp int_with_check
8774 @@ -235,7 +279,7 @@ sysexit_audit:
8775
8776 sysenter_tracesys:
8777 #ifdef CONFIG_AUDITSYSCALL
8778 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8779 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8780 jz sysenter_auditsys
8781 #endif
8782 SAVE_REST
8783 @@ -243,6 +287,9 @@ sysenter_tracesys:
8784 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8785 movq %rsp,%rdi /* &pt_regs -> arg1 */
8786 call syscall_trace_enter
8787 +
8788 + pax_erase_kstack
8789 +
8790 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8791 RESTORE_REST
8792 cmpq $(IA32_NR_syscalls-1),%rax
8793 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8794 ENTRY(ia32_cstar_target)
8795 CFI_STARTPROC32 simple
8796 CFI_SIGNAL_FRAME
8797 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8798 + CFI_DEF_CFA rsp,0
8799 CFI_REGISTER rip,rcx
8800 /*CFI_REGISTER rflags,r11*/
8801 SWAPGS_UNSAFE_STACK
8802 movl %esp,%r8d
8803 CFI_REGISTER rsp,r8
8804 movq PER_CPU_VAR(kernel_stack),%rsp
8805 + SAVE_ARGS 8*6,0,0
8806 + pax_enter_kernel_user
8807 /*
8808 * No need to follow this irqs on/off section: the syscall
8809 * disabled irqs and here we enable it straight after entry:
8810 */
8811 ENABLE_INTERRUPTS(CLBR_NONE)
8812 - SAVE_ARGS 8,0,0
8813 movl %eax,%eax /* zero extension */
8814 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8815 movq %rcx,RIP-ARGOFFSET(%rsp)
8816 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8817 /* no need to do an access_ok check here because r8 has been
8818 32bit zero extended */
8819 /* hardware stack frame is complete now */
8820 +
8821 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8822 + mov $PAX_USER_SHADOW_BASE,%r11
8823 + add %r11,%r8
8824 +#endif
8825 +
8826 1: movl (%r8),%r9d
8827 .section __ex_table,"a"
8828 .quad 1b,ia32_badarg
8829 .previous
8830 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8831 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8832 + GET_THREAD_INFO(%r11)
8833 + orl $TS_COMPAT,TI_status(%r11)
8834 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8835 CFI_REMEMBER_STATE
8836 jnz cstar_tracesys
8837 cmpq $IA32_NR_syscalls-1,%rax
8838 @@ -317,12 +372,15 @@ cstar_do_call:
8839 cstar_dispatch:
8840 call *ia32_sys_call_table(,%rax,8)
8841 movq %rax,RAX-ARGOFFSET(%rsp)
8842 + GET_THREAD_INFO(%r11)
8843 DISABLE_INTERRUPTS(CLBR_NONE)
8844 TRACE_IRQS_OFF
8845 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8846 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8847 jnz sysretl_audit
8848 sysretl_from_sys_call:
8849 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8850 + pax_exit_kernel_user
8851 + pax_erase_kstack
8852 + andl $~TS_COMPAT,TI_status(%r11)
8853 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8854 movl RIP-ARGOFFSET(%rsp),%ecx
8855 CFI_REGISTER rip,rcx
8856 @@ -350,7 +408,7 @@ sysretl_audit:
8857
8858 cstar_tracesys:
8859 #ifdef CONFIG_AUDITSYSCALL
8860 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8861 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8862 jz cstar_auditsys
8863 #endif
8864 xchgl %r9d,%ebp
8865 @@ -359,6 +417,9 @@ cstar_tracesys:
8866 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8867 movq %rsp,%rdi /* &pt_regs -> arg1 */
8868 call syscall_trace_enter
8869 +
8870 + pax_erase_kstack
8871 +
8872 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8873 RESTORE_REST
8874 xchgl %ebp,%r9d
8875 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8876 CFI_REL_OFFSET rip,RIP-RIP
8877 PARAVIRT_ADJUST_EXCEPTION_FRAME
8878 SWAPGS
8879 - /*
8880 - * No need to follow this irqs on/off section: the syscall
8881 - * disabled irqs and here we enable it straight after entry:
8882 - */
8883 - ENABLE_INTERRUPTS(CLBR_NONE)
8884 movl %eax,%eax
8885 pushq_cfi %rax
8886 cld
8887 /* note the registers are not zero extended to the sf.
8888 this could be a problem. */
8889 SAVE_ARGS 0,1,0
8890 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8891 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8892 + pax_enter_kernel_user
8893 + /*
8894 + * No need to follow this irqs on/off section: the syscall
8895 + * disabled irqs and here we enable it straight after entry:
8896 + */
8897 + ENABLE_INTERRUPTS(CLBR_NONE)
8898 + GET_THREAD_INFO(%r11)
8899 + orl $TS_COMPAT,TI_status(%r11)
8900 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8901 jnz ia32_tracesys
8902 cmpq $(IA32_NR_syscalls-1),%rax
8903 ja ia32_badsys
8904 @@ -435,6 +498,9 @@ ia32_tracesys:
8905 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8906 movq %rsp,%rdi /* &pt_regs -> arg1 */
8907 call syscall_trace_enter
8908 +
8909 + pax_erase_kstack
8910 +
8911 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8912 RESTORE_REST
8913 cmpq $(IA32_NR_syscalls-1),%rax
8914 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8915 index aec2202..f76174e 100644
8916 --- a/arch/x86/ia32/sys_ia32.c
8917 +++ b/arch/x86/ia32/sys_ia32.c
8918 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8919 */
8920 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8921 {
8922 - typeof(ubuf->st_uid) uid = 0;
8923 - typeof(ubuf->st_gid) gid = 0;
8924 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8925 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8926 SET_UID(uid, stat->uid);
8927 SET_GID(gid, stat->gid);
8928 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8929 @@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8930 return alarm_setitimer(seconds);
8931 }
8932
8933 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8934 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8935 int options)
8936 {
8937 return compat_sys_wait4(pid, stat_addr, options, NULL);
8938 @@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8939 mm_segment_t old_fs = get_fs();
8940
8941 set_fs(KERNEL_DS);
8942 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8943 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8944 set_fs(old_fs);
8945 if (put_compat_timespec(&t, interval))
8946 return -EFAULT;
8947 @@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8948 mm_segment_t old_fs = get_fs();
8949
8950 set_fs(KERNEL_DS);
8951 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8952 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8953 set_fs(old_fs);
8954 if (!ret) {
8955 switch (_NSIG_WORDS) {
8956 @@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8957 if (copy_siginfo_from_user32(&info, uinfo))
8958 return -EFAULT;
8959 set_fs(KERNEL_DS);
8960 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8961 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8962 set_fs(old_fs);
8963 return ret;
8964 }
8965 @@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8966 return -EFAULT;
8967
8968 set_fs(KERNEL_DS);
8969 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8970 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8971 count);
8972 set_fs(old_fs);
8973
8974 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8975 index 952bd01..7692c6f 100644
8976 --- a/arch/x86/include/asm/alternative-asm.h
8977 +++ b/arch/x86/include/asm/alternative-asm.h
8978 @@ -15,6 +15,45 @@
8979 .endm
8980 #endif
8981
8982 +#ifdef KERNEXEC_PLUGIN
8983 + .macro pax_force_retaddr_bts rip=0
8984 + btsq $63,\rip(%rsp)
8985 + .endm
8986 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8987 + .macro pax_force_retaddr rip=0, reload=0
8988 + btsq $63,\rip(%rsp)
8989 + .endm
8990 + .macro pax_force_fptr ptr
8991 + btsq $63,\ptr
8992 + .endm
8993 + .macro pax_set_fptr_mask
8994 + .endm
8995 +#endif
8996 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8997 + .macro pax_force_retaddr rip=0, reload=0
8998 + .if \reload
8999 + pax_set_fptr_mask
9000 + .endif
9001 + orq %r10,\rip(%rsp)
9002 + .endm
9003 + .macro pax_force_fptr ptr
9004 + orq %r10,\ptr
9005 + .endm
9006 + .macro pax_set_fptr_mask
9007 + movabs $0x8000000000000000,%r10
9008 + .endm
9009 +#endif
9010 +#else
9011 + .macro pax_force_retaddr rip=0, reload=0
9012 + .endm
9013 + .macro pax_force_fptr ptr
9014 + .endm
9015 + .macro pax_force_retaddr_bts rip=0
9016 + .endm
9017 + .macro pax_set_fptr_mask
9018 + .endm
9019 +#endif
9020 +
9021 .macro altinstruction_entry orig alt feature orig_len alt_len
9022 .long \orig - .
9023 .long \alt - .
9024 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9025 index 49331be..9706065 100644
9026 --- a/arch/x86/include/asm/alternative.h
9027 +++ b/arch/x86/include/asm/alternative.h
9028 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9029 ".section .discard,\"aw\",@progbits\n" \
9030 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9031 ".previous\n" \
9032 - ".section .altinstr_replacement, \"ax\"\n" \
9033 + ".section .altinstr_replacement, \"a\"\n" \
9034 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9035 ".previous"
9036
9037 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9038 index d854101..f6ea947 100644
9039 --- a/arch/x86/include/asm/apic.h
9040 +++ b/arch/x86/include/asm/apic.h
9041 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9042
9043 #ifdef CONFIG_X86_LOCAL_APIC
9044
9045 -extern unsigned int apic_verbosity;
9046 +extern int apic_verbosity;
9047 extern int local_apic_timer_c2_ok;
9048
9049 extern int disable_apic;
9050 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9051 index 20370c6..a2eb9b0 100644
9052 --- a/arch/x86/include/asm/apm.h
9053 +++ b/arch/x86/include/asm/apm.h
9054 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9055 __asm__ __volatile__(APM_DO_ZERO_SEGS
9056 "pushl %%edi\n\t"
9057 "pushl %%ebp\n\t"
9058 - "lcall *%%cs:apm_bios_entry\n\t"
9059 + "lcall *%%ss:apm_bios_entry\n\t"
9060 "setc %%al\n\t"
9061 "popl %%ebp\n\t"
9062 "popl %%edi\n\t"
9063 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9064 __asm__ __volatile__(APM_DO_ZERO_SEGS
9065 "pushl %%edi\n\t"
9066 "pushl %%ebp\n\t"
9067 - "lcall *%%cs:apm_bios_entry\n\t"
9068 + "lcall *%%ss:apm_bios_entry\n\t"
9069 "setc %%bl\n\t"
9070 "popl %%ebp\n\t"
9071 "popl %%edi\n\t"
9072 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9073 index 58cb6d4..ca9010d 100644
9074 --- a/arch/x86/include/asm/atomic.h
9075 +++ b/arch/x86/include/asm/atomic.h
9076 @@ -22,7 +22,18 @@
9077 */
9078 static inline int atomic_read(const atomic_t *v)
9079 {
9080 - return (*(volatile int *)&(v)->counter);
9081 + return (*(volatile const int *)&(v)->counter);
9082 +}
9083 +
9084 +/**
9085 + * atomic_read_unchecked - read atomic variable
9086 + * @v: pointer of type atomic_unchecked_t
9087 + *
9088 + * Atomically reads the value of @v.
9089 + */
9090 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9091 +{
9092 + return (*(volatile const int *)&(v)->counter);
9093 }
9094
9095 /**
9096 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9097 }
9098
9099 /**
9100 + * atomic_set_unchecked - set atomic variable
9101 + * @v: pointer of type atomic_unchecked_t
9102 + * @i: required value
9103 + *
9104 + * Atomically sets the value of @v to @i.
9105 + */
9106 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9107 +{
9108 + v->counter = i;
9109 +}
9110 +
9111 +/**
9112 * atomic_add - add integer to atomic variable
9113 * @i: integer value to add
9114 * @v: pointer of type atomic_t
9115 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9116 */
9117 static inline void atomic_add(int i, atomic_t *v)
9118 {
9119 - asm volatile(LOCK_PREFIX "addl %1,%0"
9120 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9121 +
9122 +#ifdef CONFIG_PAX_REFCOUNT
9123 + "jno 0f\n"
9124 + LOCK_PREFIX "subl %1,%0\n"
9125 + "int $4\n0:\n"
9126 + _ASM_EXTABLE(0b, 0b)
9127 +#endif
9128 +
9129 + : "+m" (v->counter)
9130 + : "ir" (i));
9131 +}
9132 +
9133 +/**
9134 + * atomic_add_unchecked - add integer to atomic variable
9135 + * @i: integer value to add
9136 + * @v: pointer of type atomic_unchecked_t
9137 + *
9138 + * Atomically adds @i to @v.
9139 + */
9140 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9141 +{
9142 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9143 : "+m" (v->counter)
9144 : "ir" (i));
9145 }
9146 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9147 */
9148 static inline void atomic_sub(int i, atomic_t *v)
9149 {
9150 - asm volatile(LOCK_PREFIX "subl %1,%0"
9151 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9152 +
9153 +#ifdef CONFIG_PAX_REFCOUNT
9154 + "jno 0f\n"
9155 + LOCK_PREFIX "addl %1,%0\n"
9156 + "int $4\n0:\n"
9157 + _ASM_EXTABLE(0b, 0b)
9158 +#endif
9159 +
9160 + : "+m" (v->counter)
9161 + : "ir" (i));
9162 +}
9163 +
9164 +/**
9165 + * atomic_sub_unchecked - subtract integer from atomic variable
9166 + * @i: integer value to subtract
9167 + * @v: pointer of type atomic_unchecked_t
9168 + *
9169 + * Atomically subtracts @i from @v.
9170 + */
9171 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9172 +{
9173 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9174 : "+m" (v->counter)
9175 : "ir" (i));
9176 }
9177 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9178 {
9179 unsigned char c;
9180
9181 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9182 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9183 +
9184 +#ifdef CONFIG_PAX_REFCOUNT
9185 + "jno 0f\n"
9186 + LOCK_PREFIX "addl %2,%0\n"
9187 + "int $4\n0:\n"
9188 + _ASM_EXTABLE(0b, 0b)
9189 +#endif
9190 +
9191 + "sete %1\n"
9192 : "+m" (v->counter), "=qm" (c)
9193 : "ir" (i) : "memory");
9194 return c;
9195 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9196 */
9197 static inline void atomic_inc(atomic_t *v)
9198 {
9199 - asm volatile(LOCK_PREFIX "incl %0"
9200 + asm volatile(LOCK_PREFIX "incl %0\n"
9201 +
9202 +#ifdef CONFIG_PAX_REFCOUNT
9203 + "jno 0f\n"
9204 + LOCK_PREFIX "decl %0\n"
9205 + "int $4\n0:\n"
9206 + _ASM_EXTABLE(0b, 0b)
9207 +#endif
9208 +
9209 + : "+m" (v->counter));
9210 +}
9211 +
9212 +/**
9213 + * atomic_inc_unchecked - increment atomic variable
9214 + * @v: pointer of type atomic_unchecked_t
9215 + *
9216 + * Atomically increments @v by 1.
9217 + */
9218 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9219 +{
9220 + asm volatile(LOCK_PREFIX "incl %0\n"
9221 : "+m" (v->counter));
9222 }
9223
9224 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9225 */
9226 static inline void atomic_dec(atomic_t *v)
9227 {
9228 - asm volatile(LOCK_PREFIX "decl %0"
9229 + asm volatile(LOCK_PREFIX "decl %0\n"
9230 +
9231 +#ifdef CONFIG_PAX_REFCOUNT
9232 + "jno 0f\n"
9233 + LOCK_PREFIX "incl %0\n"
9234 + "int $4\n0:\n"
9235 + _ASM_EXTABLE(0b, 0b)
9236 +#endif
9237 +
9238 + : "+m" (v->counter));
9239 +}
9240 +
9241 +/**
9242 + * atomic_dec_unchecked - decrement atomic variable
9243 + * @v: pointer of type atomic_unchecked_t
9244 + *
9245 + * Atomically decrements @v by 1.
9246 + */
9247 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9248 +{
9249 + asm volatile(LOCK_PREFIX "decl %0\n"
9250 : "+m" (v->counter));
9251 }
9252
9253 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9254 {
9255 unsigned char c;
9256
9257 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9258 + asm volatile(LOCK_PREFIX "decl %0\n"
9259 +
9260 +#ifdef CONFIG_PAX_REFCOUNT
9261 + "jno 0f\n"
9262 + LOCK_PREFIX "incl %0\n"
9263 + "int $4\n0:\n"
9264 + _ASM_EXTABLE(0b, 0b)
9265 +#endif
9266 +
9267 + "sete %1\n"
9268 : "+m" (v->counter), "=qm" (c)
9269 : : "memory");
9270 return c != 0;
9271 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9272 {
9273 unsigned char c;
9274
9275 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9276 + asm volatile(LOCK_PREFIX "incl %0\n"
9277 +
9278 +#ifdef CONFIG_PAX_REFCOUNT
9279 + "jno 0f\n"
9280 + LOCK_PREFIX "decl %0\n"
9281 + "int $4\n0:\n"
9282 + _ASM_EXTABLE(0b, 0b)
9283 +#endif
9284 +
9285 + "sete %1\n"
9286 + : "+m" (v->counter), "=qm" (c)
9287 + : : "memory");
9288 + return c != 0;
9289 +}
9290 +
9291 +/**
9292 + * atomic_inc_and_test_unchecked - increment and test
9293 + * @v: pointer of type atomic_unchecked_t
9294 + *
9295 + * Atomically increments @v by 1
9296 + * and returns true if the result is zero, or false for all
9297 + * other cases.
9298 + */
9299 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9300 +{
9301 + unsigned char c;
9302 +
9303 + asm volatile(LOCK_PREFIX "incl %0\n"
9304 + "sete %1\n"
9305 : "+m" (v->counter), "=qm" (c)
9306 : : "memory");
9307 return c != 0;
9308 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9309 {
9310 unsigned char c;
9311
9312 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9313 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9314 +
9315 +#ifdef CONFIG_PAX_REFCOUNT
9316 + "jno 0f\n"
9317 + LOCK_PREFIX "subl %2,%0\n"
9318 + "int $4\n0:\n"
9319 + _ASM_EXTABLE(0b, 0b)
9320 +#endif
9321 +
9322 + "sets %1\n"
9323 : "+m" (v->counter), "=qm" (c)
9324 : "ir" (i) : "memory");
9325 return c;
9326 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9327 goto no_xadd;
9328 #endif
9329 /* Modern 486+ processor */
9330 - return i + xadd(&v->counter, i);
9331 + return i + xadd_check_overflow(&v->counter, i);
9332
9333 #ifdef CONFIG_M386
9334 no_xadd: /* Legacy 386 processor */
9335 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9336 }
9337
9338 /**
9339 + * atomic_add_return_unchecked - add integer and return
9340 + * @i: integer value to add
9341 + * @v: pointer of type atomic_unchecked_t
9342 + *
9343 + * Atomically adds @i to @v and returns @i + @v
9344 + */
9345 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9346 +{
9347 +#ifdef CONFIG_M386
9348 + int __i;
9349 + unsigned long flags;
9350 + if (unlikely(boot_cpu_data.x86 <= 3))
9351 + goto no_xadd;
9352 +#endif
9353 + /* Modern 486+ processor */
9354 + return i + xadd(&v->counter, i);
9355 +
9356 +#ifdef CONFIG_M386
9357 +no_xadd: /* Legacy 386 processor */
9358 + raw_local_irq_save(flags);
9359 + __i = atomic_read_unchecked(v);
9360 + atomic_set_unchecked(v, i + __i);
9361 + raw_local_irq_restore(flags);
9362 + return i + __i;
9363 +#endif
9364 +}
9365 +
9366 +/**
9367 * atomic_sub_return - subtract integer and return
9368 * @v: pointer of type atomic_t
9369 * @i: integer value to subtract
9370 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9371 }
9372
9373 #define atomic_inc_return(v) (atomic_add_return(1, v))
9374 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9375 +{
9376 + return atomic_add_return_unchecked(1, v);
9377 +}
9378 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9379
9380 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9381 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9382 return cmpxchg(&v->counter, old, new);
9383 }
9384
9385 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9386 +{
9387 + return cmpxchg(&v->counter, old, new);
9388 +}
9389 +
9390 static inline int atomic_xchg(atomic_t *v, int new)
9391 {
9392 return xchg(&v->counter, new);
9393 }
9394
9395 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9396 +{
9397 + return xchg(&v->counter, new);
9398 +}
9399 +
9400 /**
9401 * __atomic_add_unless - add unless the number is already a given value
9402 * @v: pointer of type atomic_t
9403 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9404 */
9405 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9406 {
9407 - int c, old;
9408 + int c, old, new;
9409 c = atomic_read(v);
9410 for (;;) {
9411 - if (unlikely(c == (u)))
9412 + if (unlikely(c == u))
9413 break;
9414 - old = atomic_cmpxchg((v), c, c + (a));
9415 +
9416 + asm volatile("addl %2,%0\n"
9417 +
9418 +#ifdef CONFIG_PAX_REFCOUNT
9419 + "jno 0f\n"
9420 + "subl %2,%0\n"
9421 + "int $4\n0:\n"
9422 + _ASM_EXTABLE(0b, 0b)
9423 +#endif
9424 +
9425 + : "=r" (new)
9426 + : "0" (c), "ir" (a));
9427 +
9428 + old = atomic_cmpxchg(v, c, new);
9429 if (likely(old == c))
9430 break;
9431 c = old;
9432 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9433 return c;
9434 }
9435
9436 +/**
9437 + * atomic_inc_not_zero_hint - increment if not null
9438 + * @v: pointer of type atomic_t
9439 + * @hint: probable value of the atomic before the increment
9440 + *
9441 + * This version of atomic_inc_not_zero() gives a hint of probable
9442 + * value of the atomic. This helps processor to not read the memory
9443 + * before doing the atomic read/modify/write cycle, lowering
9444 + * number of bus transactions on some arches.
9445 + *
9446 + * Returns: 0 if increment was not done, 1 otherwise.
9447 + */
9448 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9449 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9450 +{
9451 + int val, c = hint, new;
9452 +
9453 + /* sanity test, should be removed by compiler if hint is a constant */
9454 + if (!hint)
9455 + return __atomic_add_unless(v, 1, 0);
9456 +
9457 + do {
9458 + asm volatile("incl %0\n"
9459 +
9460 +#ifdef CONFIG_PAX_REFCOUNT
9461 + "jno 0f\n"
9462 + "decl %0\n"
9463 + "int $4\n0:\n"
9464 + _ASM_EXTABLE(0b, 0b)
9465 +#endif
9466 +
9467 + : "=r" (new)
9468 + : "0" (c));
9469 +
9470 + val = atomic_cmpxchg(v, c, new);
9471 + if (val == c)
9472 + return 1;
9473 + c = val;
9474 + } while (c);
9475 +
9476 + return 0;
9477 +}
9478
9479 /*
9480 * atomic_dec_if_positive - decrement by 1 if old value positive
9481 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9482 index 1981199..36b9dfb 100644
9483 --- a/arch/x86/include/asm/atomic64_32.h
9484 +++ b/arch/x86/include/asm/atomic64_32.h
9485 @@ -12,6 +12,14 @@ typedef struct {
9486 u64 __aligned(8) counter;
9487 } atomic64_t;
9488
9489 +#ifdef CONFIG_PAX_REFCOUNT
9490 +typedef struct {
9491 + u64 __aligned(8) counter;
9492 +} atomic64_unchecked_t;
9493 +#else
9494 +typedef atomic64_t atomic64_unchecked_t;
9495 +#endif
9496 +
9497 #define ATOMIC64_INIT(val) { (val) }
9498
9499 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9500 @@ -37,21 +45,31 @@ typedef struct {
9501 ATOMIC64_DECL_ONE(sym##_386)
9502
9503 ATOMIC64_DECL_ONE(add_386);
9504 +ATOMIC64_DECL_ONE(add_unchecked_386);
9505 ATOMIC64_DECL_ONE(sub_386);
9506 +ATOMIC64_DECL_ONE(sub_unchecked_386);
9507 ATOMIC64_DECL_ONE(inc_386);
9508 +ATOMIC64_DECL_ONE(inc_unchecked_386);
9509 ATOMIC64_DECL_ONE(dec_386);
9510 +ATOMIC64_DECL_ONE(dec_unchecked_386);
9511 #endif
9512
9513 #define alternative_atomic64(f, out, in...) \
9514 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9515
9516 ATOMIC64_DECL(read);
9517 +ATOMIC64_DECL(read_unchecked);
9518 ATOMIC64_DECL(set);
9519 +ATOMIC64_DECL(set_unchecked);
9520 ATOMIC64_DECL(xchg);
9521 ATOMIC64_DECL(add_return);
9522 +ATOMIC64_DECL(add_return_unchecked);
9523 ATOMIC64_DECL(sub_return);
9524 +ATOMIC64_DECL(sub_return_unchecked);
9525 ATOMIC64_DECL(inc_return);
9526 +ATOMIC64_DECL(inc_return_unchecked);
9527 ATOMIC64_DECL(dec_return);
9528 +ATOMIC64_DECL(dec_return_unchecked);
9529 ATOMIC64_DECL(dec_if_positive);
9530 ATOMIC64_DECL(inc_not_zero);
9531 ATOMIC64_DECL(add_unless);
9532 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9533 }
9534
9535 /**
9536 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9537 + * @p: pointer to type atomic64_unchecked_t
9538 + * @o: expected value
9539 + * @n: new value
9540 + *
9541 + * Atomically sets @v to @n if it was equal to @o and returns
9542 + * the old value.
9543 + */
9544 +
9545 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9546 +{
9547 + return cmpxchg64(&v->counter, o, n);
9548 +}
9549 +
9550 +/**
9551 * atomic64_xchg - xchg atomic64 variable
9552 * @v: pointer to type atomic64_t
9553 * @n: value to assign
9554 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9555 }
9556
9557 /**
9558 + * atomic64_set_unchecked - set atomic64 variable
9559 + * @v: pointer to type atomic64_unchecked_t
9560 + * @n: value to assign
9561 + *
9562 + * Atomically sets the value of @v to @n.
9563 + */
9564 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9565 +{
9566 + unsigned high = (unsigned)(i >> 32);
9567 + unsigned low = (unsigned)i;
9568 + alternative_atomic64(set, /* no output */,
9569 + "S" (v), "b" (low), "c" (high)
9570 + : "eax", "edx", "memory");
9571 +}
9572 +
9573 +/**
9574 * atomic64_read - read atomic64 variable
9575 * @v: pointer to type atomic64_t
9576 *
9577 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9578 }
9579
9580 /**
9581 + * atomic64_read_unchecked - read atomic64 variable
9582 + * @v: pointer to type atomic64_unchecked_t
9583 + *
9584 + * Atomically reads the value of @v and returns it.
9585 + */
9586 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9587 +{
9588 + long long r;
9589 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9590 + return r;
9591 + }
9592 +
9593 +/**
9594 * atomic64_add_return - add and return
9595 * @i: integer value to add
9596 * @v: pointer to type atomic64_t
9597 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9598 return i;
9599 }
9600
9601 +/**
9602 + * atomic64_add_return_unchecked - add and return
9603 + * @i: integer value to add
9604 + * @v: pointer to type atomic64_unchecked_t
9605 + *
9606 + * Atomically adds @i to @v and returns @i + *@v
9607 + */
9608 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9609 +{
9610 + alternative_atomic64(add_return_unchecked,
9611 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9612 + ASM_NO_INPUT_CLOBBER("memory"));
9613 + return i;
9614 +}
9615 +
9616 /*
9617 * Other variants with different arithmetic operators:
9618 */
9619 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9620 return a;
9621 }
9622
9623 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9624 +{
9625 + long long a;
9626 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
9627 + "S" (v) : "memory", "ecx");
9628 + return a;
9629 +}
9630 +
9631 static inline long long atomic64_dec_return(atomic64_t *v)
9632 {
9633 long long a;
9634 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9635 }
9636
9637 /**
9638 + * atomic64_add_unchecked - add integer to atomic64 variable
9639 + * @i: integer value to add
9640 + * @v: pointer to type atomic64_unchecked_t
9641 + *
9642 + * Atomically adds @i to @v.
9643 + */
9644 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9645 +{
9646 + __alternative_atomic64(add_unchecked, add_return_unchecked,
9647 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9648 + ASM_NO_INPUT_CLOBBER("memory"));
9649 + return i;
9650 +}
9651 +
9652 +/**
9653 * atomic64_sub - subtract the atomic64 variable
9654 * @i: integer value to subtract
9655 * @v: pointer to type atomic64_t
9656 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9657 index 0e1cbfc..5623683 100644
9658 --- a/arch/x86/include/asm/atomic64_64.h
9659 +++ b/arch/x86/include/asm/atomic64_64.h
9660 @@ -18,7 +18,19 @@
9661 */
9662 static inline long atomic64_read(const atomic64_t *v)
9663 {
9664 - return (*(volatile long *)&(v)->counter);
9665 + return (*(volatile const long *)&(v)->counter);
9666 +}
9667 +
9668 +/**
9669 + * atomic64_read_unchecked - read atomic64 variable
9670 + * @v: pointer of type atomic64_unchecked_t
9671 + *
9672 + * Atomically reads the value of @v.
9673 + * Doesn't imply a read memory barrier.
9674 + */
9675 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9676 +{
9677 + return (*(volatile const long *)&(v)->counter);
9678 }
9679
9680 /**
9681 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9682 }
9683
9684 /**
9685 + * atomic64_set_unchecked - set atomic64 variable
9686 + * @v: pointer to type atomic64_unchecked_t
9687 + * @i: required value
9688 + *
9689 + * Atomically sets the value of @v to @i.
9690 + */
9691 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9692 +{
9693 + v->counter = i;
9694 +}
9695 +
9696 +/**
9697 * atomic64_add - add integer to atomic64 variable
9698 * @i: integer value to add
9699 * @v: pointer to type atomic64_t
9700 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9701 */
9702 static inline void atomic64_add(long i, atomic64_t *v)
9703 {
9704 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9705 +
9706 +#ifdef CONFIG_PAX_REFCOUNT
9707 + "jno 0f\n"
9708 + LOCK_PREFIX "subq %1,%0\n"
9709 + "int $4\n0:\n"
9710 + _ASM_EXTABLE(0b, 0b)
9711 +#endif
9712 +
9713 + : "=m" (v->counter)
9714 + : "er" (i), "m" (v->counter));
9715 +}
9716 +
9717 +/**
9718 + * atomic64_add_unchecked - add integer to atomic64 variable
9719 + * @i: integer value to add
9720 + * @v: pointer to type atomic64_unchecked_t
9721 + *
9722 + * Atomically adds @i to @v.
9723 + */
9724 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9725 +{
9726 asm volatile(LOCK_PREFIX "addq %1,%0"
9727 : "=m" (v->counter)
9728 : "er" (i), "m" (v->counter));
9729 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9730 */
9731 static inline void atomic64_sub(long i, atomic64_t *v)
9732 {
9733 - asm volatile(LOCK_PREFIX "subq %1,%0"
9734 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9735 +
9736 +#ifdef CONFIG_PAX_REFCOUNT
9737 + "jno 0f\n"
9738 + LOCK_PREFIX "addq %1,%0\n"
9739 + "int $4\n0:\n"
9740 + _ASM_EXTABLE(0b, 0b)
9741 +#endif
9742 +
9743 + : "=m" (v->counter)
9744 + : "er" (i), "m" (v->counter));
9745 +}
9746 +
9747 +/**
9748 + * atomic64_sub_unchecked - subtract the atomic64 variable
9749 + * @i: integer value to subtract
9750 + * @v: pointer to type atomic64_unchecked_t
9751 + *
9752 + * Atomically subtracts @i from @v.
9753 + */
9754 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9755 +{
9756 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9757 : "=m" (v->counter)
9758 : "er" (i), "m" (v->counter));
9759 }
9760 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9761 {
9762 unsigned char c;
9763
9764 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9765 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9766 +
9767 +#ifdef CONFIG_PAX_REFCOUNT
9768 + "jno 0f\n"
9769 + LOCK_PREFIX "addq %2,%0\n"
9770 + "int $4\n0:\n"
9771 + _ASM_EXTABLE(0b, 0b)
9772 +#endif
9773 +
9774 + "sete %1\n"
9775 : "=m" (v->counter), "=qm" (c)
9776 : "er" (i), "m" (v->counter) : "memory");
9777 return c;
9778 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9779 */
9780 static inline void atomic64_inc(atomic64_t *v)
9781 {
9782 + asm volatile(LOCK_PREFIX "incq %0\n"
9783 +
9784 +#ifdef CONFIG_PAX_REFCOUNT
9785 + "jno 0f\n"
9786 + LOCK_PREFIX "decq %0\n"
9787 + "int $4\n0:\n"
9788 + _ASM_EXTABLE(0b, 0b)
9789 +#endif
9790 +
9791 + : "=m" (v->counter)
9792 + : "m" (v->counter));
9793 +}
9794 +
9795 +/**
9796 + * atomic64_inc_unchecked - increment atomic64 variable
9797 + * @v: pointer to type atomic64_unchecked_t
9798 + *
9799 + * Atomically increments @v by 1.
9800 + */
9801 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9802 +{
9803 asm volatile(LOCK_PREFIX "incq %0"
9804 : "=m" (v->counter)
9805 : "m" (v->counter));
9806 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9807 */
9808 static inline void atomic64_dec(atomic64_t *v)
9809 {
9810 - asm volatile(LOCK_PREFIX "decq %0"
9811 + asm volatile(LOCK_PREFIX "decq %0\n"
9812 +
9813 +#ifdef CONFIG_PAX_REFCOUNT
9814 + "jno 0f\n"
9815 + LOCK_PREFIX "incq %0\n"
9816 + "int $4\n0:\n"
9817 + _ASM_EXTABLE(0b, 0b)
9818 +#endif
9819 +
9820 + : "=m" (v->counter)
9821 + : "m" (v->counter));
9822 +}
9823 +
9824 +/**
9825 + * atomic64_dec_unchecked - decrement atomic64 variable
9826 + * @v: pointer to type atomic64_t
9827 + *
9828 + * Atomically decrements @v by 1.
9829 + */
9830 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9831 +{
9832 + asm volatile(LOCK_PREFIX "decq %0\n"
9833 : "=m" (v->counter)
9834 : "m" (v->counter));
9835 }
9836 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9837 {
9838 unsigned char c;
9839
9840 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9841 + asm volatile(LOCK_PREFIX "decq %0\n"
9842 +
9843 +#ifdef CONFIG_PAX_REFCOUNT
9844 + "jno 0f\n"
9845 + LOCK_PREFIX "incq %0\n"
9846 + "int $4\n0:\n"
9847 + _ASM_EXTABLE(0b, 0b)
9848 +#endif
9849 +
9850 + "sete %1\n"
9851 : "=m" (v->counter), "=qm" (c)
9852 : "m" (v->counter) : "memory");
9853 return c != 0;
9854 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9855 {
9856 unsigned char c;
9857
9858 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9859 + asm volatile(LOCK_PREFIX "incq %0\n"
9860 +
9861 +#ifdef CONFIG_PAX_REFCOUNT
9862 + "jno 0f\n"
9863 + LOCK_PREFIX "decq %0\n"
9864 + "int $4\n0:\n"
9865 + _ASM_EXTABLE(0b, 0b)
9866 +#endif
9867 +
9868 + "sete %1\n"
9869 : "=m" (v->counter), "=qm" (c)
9870 : "m" (v->counter) : "memory");
9871 return c != 0;
9872 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9873 {
9874 unsigned char c;
9875
9876 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9877 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9878 +
9879 +#ifdef CONFIG_PAX_REFCOUNT
9880 + "jno 0f\n"
9881 + LOCK_PREFIX "subq %2,%0\n"
9882 + "int $4\n0:\n"
9883 + _ASM_EXTABLE(0b, 0b)
9884 +#endif
9885 +
9886 + "sets %1\n"
9887 : "=m" (v->counter), "=qm" (c)
9888 : "er" (i), "m" (v->counter) : "memory");
9889 return c;
9890 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9891 */
9892 static inline long atomic64_add_return(long i, atomic64_t *v)
9893 {
9894 + return i + xadd_check_overflow(&v->counter, i);
9895 +}
9896 +
9897 +/**
9898 + * atomic64_add_return_unchecked - add and return
9899 + * @i: integer value to add
9900 + * @v: pointer to type atomic64_unchecked_t
9901 + *
9902 + * Atomically adds @i to @v and returns @i + @v
9903 + */
9904 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9905 +{
9906 return i + xadd(&v->counter, i);
9907 }
9908
9909 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9910 }
9911
9912 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9913 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9914 +{
9915 + return atomic64_add_return_unchecked(1, v);
9916 +}
9917 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9918
9919 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9920 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9921 return cmpxchg(&v->counter, old, new);
9922 }
9923
9924 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9925 +{
9926 + return cmpxchg(&v->counter, old, new);
9927 +}
9928 +
9929 static inline long atomic64_xchg(atomic64_t *v, long new)
9930 {
9931 return xchg(&v->counter, new);
9932 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9933 */
9934 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9935 {
9936 - long c, old;
9937 + long c, old, new;
9938 c = atomic64_read(v);
9939 for (;;) {
9940 - if (unlikely(c == (u)))
9941 + if (unlikely(c == u))
9942 break;
9943 - old = atomic64_cmpxchg((v), c, c + (a));
9944 +
9945 + asm volatile("add %2,%0\n"
9946 +
9947 +#ifdef CONFIG_PAX_REFCOUNT
9948 + "jno 0f\n"
9949 + "sub %2,%0\n"
9950 + "int $4\n0:\n"
9951 + _ASM_EXTABLE(0b, 0b)
9952 +#endif
9953 +
9954 + : "=r" (new)
9955 + : "0" (c), "ir" (a));
9956 +
9957 + old = atomic64_cmpxchg(v, c, new);
9958 if (likely(old == c))
9959 break;
9960 c = old;
9961 }
9962 - return c != (u);
9963 + return c != u;
9964 }
9965
9966 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9967 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9968 index b97596e..9bd48b06 100644
9969 --- a/arch/x86/include/asm/bitops.h
9970 +++ b/arch/x86/include/asm/bitops.h
9971 @@ -38,7 +38,7 @@
9972 * a mask operation on a byte.
9973 */
9974 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9975 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9976 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9977 #define CONST_MASK(nr) (1 << ((nr) & 7))
9978
9979 /**
9980 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9981 index 5e1a2ee..c9f9533 100644
9982 --- a/arch/x86/include/asm/boot.h
9983 +++ b/arch/x86/include/asm/boot.h
9984 @@ -11,10 +11,15 @@
9985 #include <asm/pgtable_types.h>
9986
9987 /* Physical address where kernel should be loaded. */
9988 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9989 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9990 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9991 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9992
9993 +#ifndef __ASSEMBLY__
9994 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9995 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9996 +#endif
9997 +
9998 /* Minimum kernel alignment, as a power of two */
9999 #ifdef CONFIG_X86_64
10000 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10001 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10002 index 48f99f1..d78ebf9 100644
10003 --- a/arch/x86/include/asm/cache.h
10004 +++ b/arch/x86/include/asm/cache.h
10005 @@ -5,12 +5,13 @@
10006
10007 /* L1 cache line size */
10008 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10009 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10010 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10011
10012 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10013 +#define __read_only __attribute__((__section__(".data..read_only")))
10014
10015 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10016 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10017 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10018
10019 #ifdef CONFIG_X86_VSMP
10020 #ifdef CONFIG_SMP
10021 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10022 index 9863ee3..4a1f8e1 100644
10023 --- a/arch/x86/include/asm/cacheflush.h
10024 +++ b/arch/x86/include/asm/cacheflush.h
10025 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10026 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10027
10028 if (pg_flags == _PGMT_DEFAULT)
10029 - return -1;
10030 + return ~0UL;
10031 else if (pg_flags == _PGMT_WC)
10032 return _PAGE_CACHE_WC;
10033 else if (pg_flags == _PGMT_UC_MINUS)
10034 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10035 index 46fc474..b02b0f9 100644
10036 --- a/arch/x86/include/asm/checksum_32.h
10037 +++ b/arch/x86/include/asm/checksum_32.h
10038 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10039 int len, __wsum sum,
10040 int *src_err_ptr, int *dst_err_ptr);
10041
10042 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10043 + int len, __wsum sum,
10044 + int *src_err_ptr, int *dst_err_ptr);
10045 +
10046 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10047 + int len, __wsum sum,
10048 + int *src_err_ptr, int *dst_err_ptr);
10049 +
10050 /*
10051 * Note: when you get a NULL pointer exception here this means someone
10052 * passed in an incorrect kernel address to one of these functions.
10053 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10054 int *err_ptr)
10055 {
10056 might_sleep();
10057 - return csum_partial_copy_generic((__force void *)src, dst,
10058 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10059 len, sum, err_ptr, NULL);
10060 }
10061
10062 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10063 {
10064 might_sleep();
10065 if (access_ok(VERIFY_WRITE, dst, len))
10066 - return csum_partial_copy_generic(src, (__force void *)dst,
10067 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10068 len, sum, NULL, err_ptr);
10069
10070 if (len)
10071 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10072 index 99480e5..d81165b 100644
10073 --- a/arch/x86/include/asm/cmpxchg.h
10074 +++ b/arch/x86/include/asm/cmpxchg.h
10075 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10076 __compiletime_error("Bad argument size for cmpxchg");
10077 extern void __xadd_wrong_size(void)
10078 __compiletime_error("Bad argument size for xadd");
10079 +extern void __xadd_check_overflow_wrong_size(void)
10080 + __compiletime_error("Bad argument size for xadd_check_overflow");
10081 extern void __add_wrong_size(void)
10082 __compiletime_error("Bad argument size for add");
10083 +extern void __add_check_overflow_wrong_size(void)
10084 + __compiletime_error("Bad argument size for add_check_overflow");
10085
10086 /*
10087 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10088 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10089 __ret; \
10090 })
10091
10092 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10093 + ({ \
10094 + __typeof__ (*(ptr)) __ret = (arg); \
10095 + switch (sizeof(*(ptr))) { \
10096 + case __X86_CASE_L: \
10097 + asm volatile (lock #op "l %0, %1\n" \
10098 + "jno 0f\n" \
10099 + "mov %0,%1\n" \
10100 + "int $4\n0:\n" \
10101 + _ASM_EXTABLE(0b, 0b) \
10102 + : "+r" (__ret), "+m" (*(ptr)) \
10103 + : : "memory", "cc"); \
10104 + break; \
10105 + case __X86_CASE_Q: \
10106 + asm volatile (lock #op "q %q0, %1\n" \
10107 + "jno 0f\n" \
10108 + "mov %0,%1\n" \
10109 + "int $4\n0:\n" \
10110 + _ASM_EXTABLE(0b, 0b) \
10111 + : "+r" (__ret), "+m" (*(ptr)) \
10112 + : : "memory", "cc"); \
10113 + break; \
10114 + default: \
10115 + __ ## op ## _check_overflow_wrong_size(); \
10116 + } \
10117 + __ret; \
10118 + })
10119 +
10120 /*
10121 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10122 * Since this is generally used to protect other memory information, we
10123 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10124 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10125 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10126
10127 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10128 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10129 +
10130 #define __add(ptr, inc, lock) \
10131 ({ \
10132 __typeof__ (*(ptr)) __ret = (inc); \
10133 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10134 index 340ee49..4238ced 100644
10135 --- a/arch/x86/include/asm/cpufeature.h
10136 +++ b/arch/x86/include/asm/cpufeature.h
10137 @@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10138 ".section .discard,\"aw\",@progbits\n"
10139 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10140 ".previous\n"
10141 - ".section .altinstr_replacement,\"ax\"\n"
10142 + ".section .altinstr_replacement,\"a\"\n"
10143 "3: movb $1,%0\n"
10144 "4:\n"
10145 ".previous\n"
10146 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10147 index e95822d..a90010e 100644
10148 --- a/arch/x86/include/asm/desc.h
10149 +++ b/arch/x86/include/asm/desc.h
10150 @@ -4,6 +4,7 @@
10151 #include <asm/desc_defs.h>
10152 #include <asm/ldt.h>
10153 #include <asm/mmu.h>
10154 +#include <asm/pgtable.h>
10155
10156 #include <linux/smp.h>
10157
10158 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10159
10160 desc->type = (info->read_exec_only ^ 1) << 1;
10161 desc->type |= info->contents << 2;
10162 + desc->type |= info->seg_not_present ^ 1;
10163
10164 desc->s = 1;
10165 desc->dpl = 0x3;
10166 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10167 }
10168
10169 extern struct desc_ptr idt_descr;
10170 -extern gate_desc idt_table[];
10171 extern struct desc_ptr nmi_idt_descr;
10172 -extern gate_desc nmi_idt_table[];
10173 -
10174 -struct gdt_page {
10175 - struct desc_struct gdt[GDT_ENTRIES];
10176 -} __attribute__((aligned(PAGE_SIZE)));
10177 -
10178 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10179 +extern gate_desc idt_table[256];
10180 +extern gate_desc nmi_idt_table[256];
10181
10182 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10183 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10184 {
10185 - return per_cpu(gdt_page, cpu).gdt;
10186 + return cpu_gdt_table[cpu];
10187 }
10188
10189 #ifdef CONFIG_X86_64
10190 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10191 unsigned long base, unsigned dpl, unsigned flags,
10192 unsigned short seg)
10193 {
10194 - gate->a = (seg << 16) | (base & 0xffff);
10195 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10196 + gate->gate.offset_low = base;
10197 + gate->gate.seg = seg;
10198 + gate->gate.reserved = 0;
10199 + gate->gate.type = type;
10200 + gate->gate.s = 0;
10201 + gate->gate.dpl = dpl;
10202 + gate->gate.p = 1;
10203 + gate->gate.offset_high = base >> 16;
10204 }
10205
10206 #endif
10207 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10208
10209 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10210 {
10211 + pax_open_kernel();
10212 memcpy(&idt[entry], gate, sizeof(*gate));
10213 + pax_close_kernel();
10214 }
10215
10216 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10217 {
10218 + pax_open_kernel();
10219 memcpy(&ldt[entry], desc, 8);
10220 + pax_close_kernel();
10221 }
10222
10223 static inline void
10224 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10225 default: size = sizeof(*gdt); break;
10226 }
10227
10228 + pax_open_kernel();
10229 memcpy(&gdt[entry], desc, size);
10230 + pax_close_kernel();
10231 }
10232
10233 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10234 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10235
10236 static inline void native_load_tr_desc(void)
10237 {
10238 + pax_open_kernel();
10239 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10240 + pax_close_kernel();
10241 }
10242
10243 static inline void native_load_gdt(const struct desc_ptr *dtr)
10244 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10245 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10246 unsigned int i;
10247
10248 + pax_open_kernel();
10249 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10250 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10251 + pax_close_kernel();
10252 }
10253
10254 #define _LDT_empty(info) \
10255 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10256 }
10257
10258 #ifdef CONFIG_X86_64
10259 -static inline void set_nmi_gate(int gate, void *addr)
10260 +static inline void set_nmi_gate(int gate, const void *addr)
10261 {
10262 gate_desc s;
10263
10264 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10265 }
10266 #endif
10267
10268 -static inline void _set_gate(int gate, unsigned type, void *addr,
10269 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10270 unsigned dpl, unsigned ist, unsigned seg)
10271 {
10272 gate_desc s;
10273 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10274 * Pentium F0 0F bugfix can have resulted in the mapped
10275 * IDT being write-protected.
10276 */
10277 -static inline void set_intr_gate(unsigned int n, void *addr)
10278 +static inline void set_intr_gate(unsigned int n, const void *addr)
10279 {
10280 BUG_ON((unsigned)n > 0xFF);
10281 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10282 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10283 /*
10284 * This routine sets up an interrupt gate at directory privilege level 3.
10285 */
10286 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10287 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10288 {
10289 BUG_ON((unsigned)n > 0xFF);
10290 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10291 }
10292
10293 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10294 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10295 {
10296 BUG_ON((unsigned)n > 0xFF);
10297 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10298 }
10299
10300 -static inline void set_trap_gate(unsigned int n, void *addr)
10301 +static inline void set_trap_gate(unsigned int n, const void *addr)
10302 {
10303 BUG_ON((unsigned)n > 0xFF);
10304 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10305 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10306 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10307 {
10308 BUG_ON((unsigned)n > 0xFF);
10309 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10310 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10311 }
10312
10313 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10314 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10315 {
10316 BUG_ON((unsigned)n > 0xFF);
10317 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10318 }
10319
10320 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10321 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10322 {
10323 BUG_ON((unsigned)n > 0xFF);
10324 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10325 }
10326
10327 +#ifdef CONFIG_X86_32
10328 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10329 +{
10330 + struct desc_struct d;
10331 +
10332 + if (likely(limit))
10333 + limit = (limit - 1UL) >> PAGE_SHIFT;
10334 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10335 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10336 +}
10337 +#endif
10338 +
10339 #endif /* _ASM_X86_DESC_H */
10340 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10341 index 278441f..b95a174 100644
10342 --- a/arch/x86/include/asm/desc_defs.h
10343 +++ b/arch/x86/include/asm/desc_defs.h
10344 @@ -31,6 +31,12 @@ struct desc_struct {
10345 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10346 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10347 };
10348 + struct {
10349 + u16 offset_low;
10350 + u16 seg;
10351 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10352 + unsigned offset_high: 16;
10353 + } gate;
10354 };
10355 } __attribute__((packed));
10356
10357 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10358 index 3778256..c5d4fce 100644
10359 --- a/arch/x86/include/asm/e820.h
10360 +++ b/arch/x86/include/asm/e820.h
10361 @@ -69,7 +69,7 @@ struct e820map {
10362 #define ISA_START_ADDRESS 0xa0000
10363 #define ISA_END_ADDRESS 0x100000
10364
10365 -#define BIOS_BEGIN 0x000a0000
10366 +#define BIOS_BEGIN 0x000c0000
10367 #define BIOS_END 0x00100000
10368
10369 #define BIOS_ROM_BASE 0xffe00000
10370 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10371 index 5939f44..193f4a7 100644
10372 --- a/arch/x86/include/asm/elf.h
10373 +++ b/arch/x86/include/asm/elf.h
10374 @@ -243,7 +243,25 @@ extern int force_personality32;
10375 the loader. We need to make sure that it is out of the way of the program
10376 that it will "exec", and that there is sufficient room for the brk. */
10377
10378 +#ifdef CONFIG_PAX_SEGMEXEC
10379 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10380 +#else
10381 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10382 +#endif
10383 +
10384 +#ifdef CONFIG_PAX_ASLR
10385 +#ifdef CONFIG_X86_32
10386 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10387 +
10388 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10389 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10390 +#else
10391 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10392 +
10393 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10394 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10395 +#endif
10396 +#endif
10397
10398 /* This yields a mask that user programs can use to figure out what
10399 instruction set this CPU supports. This could be done in user space,
10400 @@ -296,16 +314,12 @@ do { \
10401
10402 #define ARCH_DLINFO \
10403 do { \
10404 - if (vdso_enabled) \
10405 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10406 - (unsigned long)current->mm->context.vdso); \
10407 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10408 } while (0)
10409
10410 #define ARCH_DLINFO_X32 \
10411 do { \
10412 - if (vdso_enabled) \
10413 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10414 - (unsigned long)current->mm->context.vdso); \
10415 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10416 } while (0)
10417
10418 #define AT_SYSINFO 32
10419 @@ -320,7 +334,7 @@ else \
10420
10421 #endif /* !CONFIG_X86_32 */
10422
10423 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10424 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10425
10426 #define VDSO_ENTRY \
10427 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10428 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10429 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10430 #define compat_arch_setup_additional_pages syscall32_setup_pages
10431
10432 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10433 -#define arch_randomize_brk arch_randomize_brk
10434 -
10435 /*
10436 * True on X86_32 or when emulating IA32 on X86_64
10437 */
10438 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10439 index cc70c1c..d96d011 100644
10440 --- a/arch/x86/include/asm/emergency-restart.h
10441 +++ b/arch/x86/include/asm/emergency-restart.h
10442 @@ -15,6 +15,6 @@ enum reboot_type {
10443
10444 extern enum reboot_type reboot_type;
10445
10446 -extern void machine_emergency_restart(void);
10447 +extern void machine_emergency_restart(void) __noreturn;
10448
10449 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10450 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10451 index 4fa8815..71b121a 100644
10452 --- a/arch/x86/include/asm/fpu-internal.h
10453 +++ b/arch/x86/include/asm/fpu-internal.h
10454 @@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10455 {
10456 int err;
10457
10458 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10459 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10460 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10461 +#endif
10462 +
10463 /* See comment in fxsave() below. */
10464 #ifdef CONFIG_AS_FXSAVEQ
10465 asm volatile("1: fxrstorq %[fx]\n\t"
10466 @@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10467 {
10468 int err;
10469
10470 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10471 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10472 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10473 +#endif
10474 +
10475 /*
10476 * Clear the bytes not touched by the fxsave and reserved
10477 * for the SW usage.
10478 @@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10479 "emms\n\t" /* clear stack tags */
10480 "fildl %P[addr]", /* set F?P to defined value */
10481 X86_FEATURE_FXSAVE_LEAK,
10482 - [addr] "m" (tsk->thread.fpu.has_fpu));
10483 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10484
10485 return fpu_restore_checking(&tsk->thread.fpu);
10486 }
10487 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10488 index 71ecbcb..bac10b7 100644
10489 --- a/arch/x86/include/asm/futex.h
10490 +++ b/arch/x86/include/asm/futex.h
10491 @@ -11,16 +11,18 @@
10492 #include <asm/processor.h>
10493
10494 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10495 + typecheck(u32 __user *, uaddr); \
10496 asm volatile("1:\t" insn "\n" \
10497 "2:\t.section .fixup,\"ax\"\n" \
10498 "3:\tmov\t%3, %1\n" \
10499 "\tjmp\t2b\n" \
10500 "\t.previous\n" \
10501 _ASM_EXTABLE(1b, 3b) \
10502 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10503 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10504 : "i" (-EFAULT), "0" (oparg), "1" (0))
10505
10506 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10507 + typecheck(u32 __user *, uaddr); \
10508 asm volatile("1:\tmovl %2, %0\n" \
10509 "\tmovl\t%0, %3\n" \
10510 "\t" insn "\n" \
10511 @@ -33,7 +35,7 @@
10512 _ASM_EXTABLE(1b, 4b) \
10513 _ASM_EXTABLE(2b, 4b) \
10514 : "=&a" (oldval), "=&r" (ret), \
10515 - "+m" (*uaddr), "=&r" (tem) \
10516 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10517 : "r" (oparg), "i" (-EFAULT), "1" (0))
10518
10519 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10520 @@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10521
10522 switch (op) {
10523 case FUTEX_OP_SET:
10524 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10525 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10526 break;
10527 case FUTEX_OP_ADD:
10528 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10529 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10530 uaddr, oparg);
10531 break;
10532 case FUTEX_OP_OR:
10533 @@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10534 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10535 return -EFAULT;
10536
10537 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10538 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10539 "2:\t.section .fixup, \"ax\"\n"
10540 "3:\tmov %3, %0\n"
10541 "\tjmp 2b\n"
10542 "\t.previous\n"
10543 _ASM_EXTABLE(1b, 3b)
10544 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10545 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10546 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10547 : "memory"
10548 );
10549 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10550 index eb92a6e..b98b2f4 100644
10551 --- a/arch/x86/include/asm/hw_irq.h
10552 +++ b/arch/x86/include/asm/hw_irq.h
10553 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10554 extern void enable_IO_APIC(void);
10555
10556 /* Statistics */
10557 -extern atomic_t irq_err_count;
10558 -extern atomic_t irq_mis_count;
10559 +extern atomic_unchecked_t irq_err_count;
10560 +extern atomic_unchecked_t irq_mis_count;
10561
10562 /* EISA */
10563 extern void eisa_set_level_irq(unsigned int irq);
10564 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10565 index d8e8eef..99f81ae 100644
10566 --- a/arch/x86/include/asm/io.h
10567 +++ b/arch/x86/include/asm/io.h
10568 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10569
10570 #include <linux/vmalloc.h>
10571
10572 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10573 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10574 +{
10575 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10576 +}
10577 +
10578 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10579 +{
10580 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10581 +}
10582 +
10583 /*
10584 * Convert a virtual cached pointer to an uncached pointer
10585 */
10586 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10587 index bba3cf8..06bc8da 100644
10588 --- a/arch/x86/include/asm/irqflags.h
10589 +++ b/arch/x86/include/asm/irqflags.h
10590 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10591 sti; \
10592 sysexit
10593
10594 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10595 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10596 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10597 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10598 +
10599 #else
10600 #define INTERRUPT_RETURN iret
10601 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10602 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10603 index 5478825..839e88c 100644
10604 --- a/arch/x86/include/asm/kprobes.h
10605 +++ b/arch/x86/include/asm/kprobes.h
10606 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10607 #define RELATIVEJUMP_SIZE 5
10608 #define RELATIVECALL_OPCODE 0xe8
10609 #define RELATIVE_ADDR_SIZE 4
10610 -#define MAX_STACK_SIZE 64
10611 -#define MIN_STACK_SIZE(ADDR) \
10612 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10613 - THREAD_SIZE - (unsigned long)(ADDR))) \
10614 - ? (MAX_STACK_SIZE) \
10615 - : (((unsigned long)current_thread_info()) + \
10616 - THREAD_SIZE - (unsigned long)(ADDR)))
10617 +#define MAX_STACK_SIZE 64UL
10618 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10619
10620 #define flush_insn_slot(p) do { } while (0)
10621
10622 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10623 index e216ba0..453f6ec 100644
10624 --- a/arch/x86/include/asm/kvm_host.h
10625 +++ b/arch/x86/include/asm/kvm_host.h
10626 @@ -679,7 +679,7 @@ struct kvm_x86_ops {
10627 int (*check_intercept)(struct kvm_vcpu *vcpu,
10628 struct x86_instruction_info *info,
10629 enum x86_intercept_stage stage);
10630 -};
10631 +} __do_const;
10632
10633 struct kvm_arch_async_pf {
10634 u32 token;
10635 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10636 index c8bed0d..e5721fa 100644
10637 --- a/arch/x86/include/asm/local.h
10638 +++ b/arch/x86/include/asm/local.h
10639 @@ -17,26 +17,58 @@ typedef struct {
10640
10641 static inline void local_inc(local_t *l)
10642 {
10643 - asm volatile(_ASM_INC "%0"
10644 + asm volatile(_ASM_INC "%0\n"
10645 +
10646 +#ifdef CONFIG_PAX_REFCOUNT
10647 + "jno 0f\n"
10648 + _ASM_DEC "%0\n"
10649 + "int $4\n0:\n"
10650 + _ASM_EXTABLE(0b, 0b)
10651 +#endif
10652 +
10653 : "+m" (l->a.counter));
10654 }
10655
10656 static inline void local_dec(local_t *l)
10657 {
10658 - asm volatile(_ASM_DEC "%0"
10659 + asm volatile(_ASM_DEC "%0\n"
10660 +
10661 +#ifdef CONFIG_PAX_REFCOUNT
10662 + "jno 0f\n"
10663 + _ASM_INC "%0\n"
10664 + "int $4\n0:\n"
10665 + _ASM_EXTABLE(0b, 0b)
10666 +#endif
10667 +
10668 : "+m" (l->a.counter));
10669 }
10670
10671 static inline void local_add(long i, local_t *l)
10672 {
10673 - asm volatile(_ASM_ADD "%1,%0"
10674 + asm volatile(_ASM_ADD "%1,%0\n"
10675 +
10676 +#ifdef CONFIG_PAX_REFCOUNT
10677 + "jno 0f\n"
10678 + _ASM_SUB "%1,%0\n"
10679 + "int $4\n0:\n"
10680 + _ASM_EXTABLE(0b, 0b)
10681 +#endif
10682 +
10683 : "+m" (l->a.counter)
10684 : "ir" (i));
10685 }
10686
10687 static inline void local_sub(long i, local_t *l)
10688 {
10689 - asm volatile(_ASM_SUB "%1,%0"
10690 + asm volatile(_ASM_SUB "%1,%0\n"
10691 +
10692 +#ifdef CONFIG_PAX_REFCOUNT
10693 + "jno 0f\n"
10694 + _ASM_ADD "%1,%0\n"
10695 + "int $4\n0:\n"
10696 + _ASM_EXTABLE(0b, 0b)
10697 +#endif
10698 +
10699 : "+m" (l->a.counter)
10700 : "ir" (i));
10701 }
10702 @@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10703 {
10704 unsigned char c;
10705
10706 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10707 + asm volatile(_ASM_SUB "%2,%0\n"
10708 +
10709 +#ifdef CONFIG_PAX_REFCOUNT
10710 + "jno 0f\n"
10711 + _ASM_ADD "%2,%0\n"
10712 + "int $4\n0:\n"
10713 + _ASM_EXTABLE(0b, 0b)
10714 +#endif
10715 +
10716 + "sete %1\n"
10717 : "+m" (l->a.counter), "=qm" (c)
10718 : "ir" (i) : "memory");
10719 return c;
10720 @@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10721 {
10722 unsigned char c;
10723
10724 - asm volatile(_ASM_DEC "%0; sete %1"
10725 + asm volatile(_ASM_DEC "%0\n"
10726 +
10727 +#ifdef CONFIG_PAX_REFCOUNT
10728 + "jno 0f\n"
10729 + _ASM_INC "%0\n"
10730 + "int $4\n0:\n"
10731 + _ASM_EXTABLE(0b, 0b)
10732 +#endif
10733 +
10734 + "sete %1\n"
10735 : "+m" (l->a.counter), "=qm" (c)
10736 : : "memory");
10737 return c != 0;
10738 @@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10739 {
10740 unsigned char c;
10741
10742 - asm volatile(_ASM_INC "%0; sete %1"
10743 + asm volatile(_ASM_INC "%0\n"
10744 +
10745 +#ifdef CONFIG_PAX_REFCOUNT
10746 + "jno 0f\n"
10747 + _ASM_DEC "%0\n"
10748 + "int $4\n0:\n"
10749 + _ASM_EXTABLE(0b, 0b)
10750 +#endif
10751 +
10752 + "sete %1\n"
10753 : "+m" (l->a.counter), "=qm" (c)
10754 : : "memory");
10755 return c != 0;
10756 @@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10757 {
10758 unsigned char c;
10759
10760 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10761 + asm volatile(_ASM_ADD "%2,%0\n"
10762 +
10763 +#ifdef CONFIG_PAX_REFCOUNT
10764 + "jno 0f\n"
10765 + _ASM_SUB "%2,%0\n"
10766 + "int $4\n0:\n"
10767 + _ASM_EXTABLE(0b, 0b)
10768 +#endif
10769 +
10770 + "sets %1\n"
10771 : "+m" (l->a.counter), "=qm" (c)
10772 : "ir" (i) : "memory");
10773 return c;
10774 @@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10775 #endif
10776 /* Modern 486+ processor */
10777 __i = i;
10778 - asm volatile(_ASM_XADD "%0, %1;"
10779 + asm volatile(_ASM_XADD "%0, %1\n"
10780 +
10781 +#ifdef CONFIG_PAX_REFCOUNT
10782 + "jno 0f\n"
10783 + _ASM_MOV "%0,%1\n"
10784 + "int $4\n0:\n"
10785 + _ASM_EXTABLE(0b, 0b)
10786 +#endif
10787 +
10788 : "+r" (i), "+m" (l->a.counter)
10789 : : "memory");
10790 return i + __i;
10791 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10792 index 593e51d..fa69c9a 100644
10793 --- a/arch/x86/include/asm/mman.h
10794 +++ b/arch/x86/include/asm/mman.h
10795 @@ -5,4 +5,14 @@
10796
10797 #include <asm-generic/mman.h>
10798
10799 +#ifdef __KERNEL__
10800 +#ifndef __ASSEMBLY__
10801 +#ifdef CONFIG_X86_32
10802 +#define arch_mmap_check i386_mmap_check
10803 +int i386_mmap_check(unsigned long addr, unsigned long len,
10804 + unsigned long flags);
10805 +#endif
10806 +#endif
10807 +#endif
10808 +
10809 #endif /* _ASM_X86_MMAN_H */
10810 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10811 index 5f55e69..e20bfb1 100644
10812 --- a/arch/x86/include/asm/mmu.h
10813 +++ b/arch/x86/include/asm/mmu.h
10814 @@ -9,7 +9,7 @@
10815 * we put the segment information here.
10816 */
10817 typedef struct {
10818 - void *ldt;
10819 + struct desc_struct *ldt;
10820 int size;
10821
10822 #ifdef CONFIG_X86_64
10823 @@ -18,7 +18,19 @@ typedef struct {
10824 #endif
10825
10826 struct mutex lock;
10827 - void *vdso;
10828 + unsigned long vdso;
10829 +
10830 +#ifdef CONFIG_X86_32
10831 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10832 + unsigned long user_cs_base;
10833 + unsigned long user_cs_limit;
10834 +
10835 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10836 + cpumask_t cpu_user_cs_mask;
10837 +#endif
10838 +
10839 +#endif
10840 +#endif
10841 } mm_context_t;
10842
10843 #ifdef CONFIG_SMP
10844 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10845 index 6902152..da4283a 100644
10846 --- a/arch/x86/include/asm/mmu_context.h
10847 +++ b/arch/x86/include/asm/mmu_context.h
10848 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10849
10850 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10851 {
10852 +
10853 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10854 + unsigned int i;
10855 + pgd_t *pgd;
10856 +
10857 + pax_open_kernel();
10858 + pgd = get_cpu_pgd(smp_processor_id());
10859 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10860 + set_pgd_batched(pgd+i, native_make_pgd(0));
10861 + pax_close_kernel();
10862 +#endif
10863 +
10864 #ifdef CONFIG_SMP
10865 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10866 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10867 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10868 struct task_struct *tsk)
10869 {
10870 unsigned cpu = smp_processor_id();
10871 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10872 + int tlbstate = TLBSTATE_OK;
10873 +#endif
10874
10875 if (likely(prev != next)) {
10876 #ifdef CONFIG_SMP
10877 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10878 + tlbstate = percpu_read(cpu_tlbstate.state);
10879 +#endif
10880 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10881 percpu_write(cpu_tlbstate.active_mm, next);
10882 #endif
10883 cpumask_set_cpu(cpu, mm_cpumask(next));
10884
10885 /* Re-load page tables */
10886 +#ifdef CONFIG_PAX_PER_CPU_PGD
10887 + pax_open_kernel();
10888 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10889 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10890 + pax_close_kernel();
10891 + load_cr3(get_cpu_pgd(cpu));
10892 +#else
10893 load_cr3(next->pgd);
10894 +#endif
10895
10896 /* stop flush ipis for the previous mm */
10897 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10898 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10899 */
10900 if (unlikely(prev->context.ldt != next->context.ldt))
10901 load_LDT_nolock(&next->context);
10902 - }
10903 +
10904 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10905 + if (!(__supported_pte_mask & _PAGE_NX)) {
10906 + smp_mb__before_clear_bit();
10907 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10908 + smp_mb__after_clear_bit();
10909 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10910 + }
10911 +#endif
10912 +
10913 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10914 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10915 + prev->context.user_cs_limit != next->context.user_cs_limit))
10916 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10917 #ifdef CONFIG_SMP
10918 + else if (unlikely(tlbstate != TLBSTATE_OK))
10919 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10920 +#endif
10921 +#endif
10922 +
10923 + }
10924 else {
10925 +
10926 +#ifdef CONFIG_PAX_PER_CPU_PGD
10927 + pax_open_kernel();
10928 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10929 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10930 + pax_close_kernel();
10931 + load_cr3(get_cpu_pgd(cpu));
10932 +#endif
10933 +
10934 +#ifdef CONFIG_SMP
10935 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10936 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10937
10938 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10939 * tlb flush IPI delivery. We must reload CR3
10940 * to make sure to use no freed page tables.
10941 */
10942 +
10943 +#ifndef CONFIG_PAX_PER_CPU_PGD
10944 load_cr3(next->pgd);
10945 +#endif
10946 +
10947 load_LDT_nolock(&next->context);
10948 +
10949 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10950 + if (!(__supported_pte_mask & _PAGE_NX))
10951 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10952 +#endif
10953 +
10954 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10955 +#ifdef CONFIG_PAX_PAGEEXEC
10956 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10957 +#endif
10958 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10959 +#endif
10960 +
10961 }
10962 +#endif
10963 }
10964 -#endif
10965 }
10966
10967 #define activate_mm(prev, next) \
10968 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10969 index 9eae775..c914fea 100644
10970 --- a/arch/x86/include/asm/module.h
10971 +++ b/arch/x86/include/asm/module.h
10972 @@ -5,6 +5,7 @@
10973
10974 #ifdef CONFIG_X86_64
10975 /* X86_64 does not define MODULE_PROC_FAMILY */
10976 +#define MODULE_PROC_FAMILY ""
10977 #elif defined CONFIG_M386
10978 #define MODULE_PROC_FAMILY "386 "
10979 #elif defined CONFIG_M486
10980 @@ -59,8 +60,20 @@
10981 #error unknown processor family
10982 #endif
10983
10984 -#ifdef CONFIG_X86_32
10985 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10986 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10987 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10988 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10989 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10990 +#else
10991 +#define MODULE_PAX_KERNEXEC ""
10992 #endif
10993
10994 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10995 +#define MODULE_PAX_UDEREF "UDEREF "
10996 +#else
10997 +#define MODULE_PAX_UDEREF ""
10998 +#endif
10999 +
11000 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11001 +
11002 #endif /* _ASM_X86_MODULE_H */
11003 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11004 index 7639dbf..e08a58c 100644
11005 --- a/arch/x86/include/asm/page_64_types.h
11006 +++ b/arch/x86/include/asm/page_64_types.h
11007 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11008
11009 /* duplicated to the one in bootmem.h */
11010 extern unsigned long max_pfn;
11011 -extern unsigned long phys_base;
11012 +extern const unsigned long phys_base;
11013
11014 extern unsigned long __phys_addr(unsigned long);
11015 #define __phys_reloc_hide(x) (x)
11016 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11017 index aa0f913..0c5bc6a 100644
11018 --- a/arch/x86/include/asm/paravirt.h
11019 +++ b/arch/x86/include/asm/paravirt.h
11020 @@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11021 val);
11022 }
11023
11024 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11025 +{
11026 + pgdval_t val = native_pgd_val(pgd);
11027 +
11028 + if (sizeof(pgdval_t) > sizeof(long))
11029 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11030 + val, (u64)val >> 32);
11031 + else
11032 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11033 + val);
11034 +}
11035 +
11036 static inline void pgd_clear(pgd_t *pgdp)
11037 {
11038 set_pgd(pgdp, __pgd(0));
11039 @@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11040 pv_mmu_ops.set_fixmap(idx, phys, flags);
11041 }
11042
11043 +#ifdef CONFIG_PAX_KERNEXEC
11044 +static inline unsigned long pax_open_kernel(void)
11045 +{
11046 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11047 +}
11048 +
11049 +static inline unsigned long pax_close_kernel(void)
11050 +{
11051 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11052 +}
11053 +#else
11054 +static inline unsigned long pax_open_kernel(void) { return 0; }
11055 +static inline unsigned long pax_close_kernel(void) { return 0; }
11056 +#endif
11057 +
11058 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11059
11060 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11061 @@ -965,7 +992,7 @@ extern void default_banner(void);
11062
11063 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11064 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11065 -#define PARA_INDIRECT(addr) *%cs:addr
11066 +#define PARA_INDIRECT(addr) *%ss:addr
11067 #endif
11068
11069 #define INTERRUPT_RETURN \
11070 @@ -1042,6 +1069,21 @@ extern void default_banner(void);
11071 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11072 CLBR_NONE, \
11073 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11074 +
11075 +#define GET_CR0_INTO_RDI \
11076 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11077 + mov %rax,%rdi
11078 +
11079 +#define SET_RDI_INTO_CR0 \
11080 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11081 +
11082 +#define GET_CR3_INTO_RDI \
11083 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11084 + mov %rax,%rdi
11085 +
11086 +#define SET_RDI_INTO_CR3 \
11087 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11088 +
11089 #endif /* CONFIG_X86_32 */
11090
11091 #endif /* __ASSEMBLY__ */
11092 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11093 index 8e8b9a4..f07d725 100644
11094 --- a/arch/x86/include/asm/paravirt_types.h
11095 +++ b/arch/x86/include/asm/paravirt_types.h
11096 @@ -84,20 +84,20 @@ struct pv_init_ops {
11097 */
11098 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11099 unsigned long addr, unsigned len);
11100 -};
11101 +} __no_const;
11102
11103
11104 struct pv_lazy_ops {
11105 /* Set deferred update mode, used for batching operations. */
11106 void (*enter)(void);
11107 void (*leave)(void);
11108 -};
11109 +} __no_const;
11110
11111 struct pv_time_ops {
11112 unsigned long long (*sched_clock)(void);
11113 unsigned long long (*steal_clock)(int cpu);
11114 unsigned long (*get_tsc_khz)(void);
11115 -};
11116 +} __no_const;
11117
11118 struct pv_cpu_ops {
11119 /* hooks for various privileged instructions */
11120 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
11121
11122 void (*start_context_switch)(struct task_struct *prev);
11123 void (*end_context_switch)(struct task_struct *next);
11124 -};
11125 +} __no_const;
11126
11127 struct pv_irq_ops {
11128 /*
11129 @@ -224,7 +224,7 @@ struct pv_apic_ops {
11130 unsigned long start_eip,
11131 unsigned long start_esp);
11132 #endif
11133 -};
11134 +} __no_const;
11135
11136 struct pv_mmu_ops {
11137 unsigned long (*read_cr2)(void);
11138 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
11139 struct paravirt_callee_save make_pud;
11140
11141 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11142 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11143 #endif /* PAGETABLE_LEVELS == 4 */
11144 #endif /* PAGETABLE_LEVELS >= 3 */
11145
11146 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
11147 an mfn. We can tell which is which from the index. */
11148 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11149 phys_addr_t phys, pgprot_t flags);
11150 +
11151 +#ifdef CONFIG_PAX_KERNEXEC
11152 + unsigned long (*pax_open_kernel)(void);
11153 + unsigned long (*pax_close_kernel)(void);
11154 +#endif
11155 +
11156 };
11157
11158 struct arch_spinlock;
11159 @@ -334,7 +341,7 @@ struct pv_lock_ops {
11160 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11161 int (*spin_trylock)(struct arch_spinlock *lock);
11162 void (*spin_unlock)(struct arch_spinlock *lock);
11163 -};
11164 +} __no_const;
11165
11166 /* This contains all the paravirt structures: we get a convenient
11167 * number for each function using the offset which we use to indicate
11168 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11169 index b4389a4..7024269 100644
11170 --- a/arch/x86/include/asm/pgalloc.h
11171 +++ b/arch/x86/include/asm/pgalloc.h
11172 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11173 pmd_t *pmd, pte_t *pte)
11174 {
11175 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11176 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11177 +}
11178 +
11179 +static inline void pmd_populate_user(struct mm_struct *mm,
11180 + pmd_t *pmd, pte_t *pte)
11181 +{
11182 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11183 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11184 }
11185
11186 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11187
11188 #ifdef CONFIG_X86_PAE
11189 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11190 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11191 +{
11192 + pud_populate(mm, pudp, pmd);
11193 +}
11194 #else /* !CONFIG_X86_PAE */
11195 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11196 {
11197 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11198 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11199 }
11200 +
11201 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11202 +{
11203 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11204 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11205 +}
11206 #endif /* CONFIG_X86_PAE */
11207
11208 #if PAGETABLE_LEVELS > 3
11209 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11210 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11211 }
11212
11213 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11214 +{
11215 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11216 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11217 +}
11218 +
11219 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11220 {
11221 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11222 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11223 index 98391db..8f6984e 100644
11224 --- a/arch/x86/include/asm/pgtable-2level.h
11225 +++ b/arch/x86/include/asm/pgtable-2level.h
11226 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11227
11228 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11229 {
11230 + pax_open_kernel();
11231 *pmdp = pmd;
11232 + pax_close_kernel();
11233 }
11234
11235 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11236 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11237 index effff47..bbb8295 100644
11238 --- a/arch/x86/include/asm/pgtable-3level.h
11239 +++ b/arch/x86/include/asm/pgtable-3level.h
11240 @@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
11241 ptep->pte_low = pte.pte_low;
11242 }
11243
11244 +#define __HAVE_ARCH_READ_PMD_ATOMIC
11245 +/*
11246 + * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
11247 + * a "*pmdp" dereference done by gcc. Problem is, in certain places
11248 + * where pte_offset_map_lock is called, concurrent page faults are
11249 + * allowed, if the mmap_sem is hold for reading. An example is mincore
11250 + * vs page faults vs MADV_DONTNEED. On the page fault side
11251 + * pmd_populate rightfully does a set_64bit, but if we're reading the
11252 + * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
11253 + * because gcc will not read the 64bit of the pmd atomically. To fix
11254 + * this all places running pmd_offset_map_lock() while holding the
11255 + * mmap_sem in read mode, shall read the pmdp pointer using this
11256 + * function to know if the pmd is null nor not, and in turn to know if
11257 + * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
11258 + * operations.
11259 + *
11260 + * Without THP if the mmap_sem is hold for reading, the
11261 + * pmd can only transition from null to not null while read_pmd_atomic runs.
11262 + * So there's no need of literally reading it atomically.
11263 + *
11264 + * With THP if the mmap_sem is hold for reading, the pmd can become
11265 + * THP or null or point to a pte (and in turn become "stable") at any
11266 + * time under read_pmd_atomic, so it's mandatory to read it atomically
11267 + * with cmpxchg8b.
11268 + */
11269 +#ifndef CONFIG_TRANSPARENT_HUGEPAGE
11270 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11271 +{
11272 + pmdval_t ret;
11273 + u32 *tmp = (u32 *)pmdp;
11274 +
11275 + ret = (pmdval_t) (*tmp);
11276 + if (ret) {
11277 + /*
11278 + * If the low part is null, we must not read the high part
11279 + * or we can end up with a partial pmd.
11280 + */
11281 + smp_rmb();
11282 + ret |= ((pmdval_t)*(tmp + 1)) << 32;
11283 + }
11284 +
11285 + return __pmd(ret);
11286 +}
11287 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
11288 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11289 +{
11290 + return __pmd(atomic64_read((atomic64_t *)pmdp));
11291 +}
11292 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
11293 +
11294 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11295 {
11296 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
11297 @@ -38,12 +88,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11298
11299 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11300 {
11301 + pax_open_kernel();
11302 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11303 + pax_close_kernel();
11304 }
11305
11306 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11307 {
11308 + pax_open_kernel();
11309 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11310 + pax_close_kernel();
11311 }
11312
11313 /*
11314 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11315 index 49afb3f..91a8c63 100644
11316 --- a/arch/x86/include/asm/pgtable.h
11317 +++ b/arch/x86/include/asm/pgtable.h
11318 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11319
11320 #ifndef __PAGETABLE_PUD_FOLDED
11321 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11322 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11323 #define pgd_clear(pgd) native_pgd_clear(pgd)
11324 #endif
11325
11326 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11327
11328 #define arch_end_context_switch(prev) do {} while(0)
11329
11330 +#define pax_open_kernel() native_pax_open_kernel()
11331 +#define pax_close_kernel() native_pax_close_kernel()
11332 #endif /* CONFIG_PARAVIRT */
11333
11334 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11335 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11336 +
11337 +#ifdef CONFIG_PAX_KERNEXEC
11338 +static inline unsigned long native_pax_open_kernel(void)
11339 +{
11340 + unsigned long cr0;
11341 +
11342 + preempt_disable();
11343 + barrier();
11344 + cr0 = read_cr0() ^ X86_CR0_WP;
11345 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11346 + write_cr0(cr0);
11347 + return cr0 ^ X86_CR0_WP;
11348 +}
11349 +
11350 +static inline unsigned long native_pax_close_kernel(void)
11351 +{
11352 + unsigned long cr0;
11353 +
11354 + cr0 = read_cr0() ^ X86_CR0_WP;
11355 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11356 + write_cr0(cr0);
11357 + barrier();
11358 + preempt_enable_no_resched();
11359 + return cr0 ^ X86_CR0_WP;
11360 +}
11361 +#else
11362 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11363 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11364 +#endif
11365 +
11366 /*
11367 * The following only work if pte_present() is true.
11368 * Undefined behaviour if not..
11369 */
11370 +static inline int pte_user(pte_t pte)
11371 +{
11372 + return pte_val(pte) & _PAGE_USER;
11373 +}
11374 +
11375 static inline int pte_dirty(pte_t pte)
11376 {
11377 return pte_flags(pte) & _PAGE_DIRTY;
11378 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11379 return pte_clear_flags(pte, _PAGE_RW);
11380 }
11381
11382 +static inline pte_t pte_mkread(pte_t pte)
11383 +{
11384 + return __pte(pte_val(pte) | _PAGE_USER);
11385 +}
11386 +
11387 static inline pte_t pte_mkexec(pte_t pte)
11388 {
11389 - return pte_clear_flags(pte, _PAGE_NX);
11390 +#ifdef CONFIG_X86_PAE
11391 + if (__supported_pte_mask & _PAGE_NX)
11392 + return pte_clear_flags(pte, _PAGE_NX);
11393 + else
11394 +#endif
11395 + return pte_set_flags(pte, _PAGE_USER);
11396 +}
11397 +
11398 +static inline pte_t pte_exprotect(pte_t pte)
11399 +{
11400 +#ifdef CONFIG_X86_PAE
11401 + if (__supported_pte_mask & _PAGE_NX)
11402 + return pte_set_flags(pte, _PAGE_NX);
11403 + else
11404 +#endif
11405 + return pte_clear_flags(pte, _PAGE_USER);
11406 }
11407
11408 static inline pte_t pte_mkdirty(pte_t pte)
11409 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11410 #endif
11411
11412 #ifndef __ASSEMBLY__
11413 +
11414 +#ifdef CONFIG_PAX_PER_CPU_PGD
11415 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11416 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11417 +{
11418 + return cpu_pgd[cpu];
11419 +}
11420 +#endif
11421 +
11422 #include <linux/mm_types.h>
11423
11424 static inline int pte_none(pte_t pte)
11425 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11426
11427 static inline int pgd_bad(pgd_t pgd)
11428 {
11429 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11430 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11431 }
11432
11433 static inline int pgd_none(pgd_t pgd)
11434 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11435 * pgd_offset() returns a (pgd_t *)
11436 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11437 */
11438 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11439 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11440 +
11441 +#ifdef CONFIG_PAX_PER_CPU_PGD
11442 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11443 +#endif
11444 +
11445 /*
11446 * a shortcut which implies the use of the kernel's pgd, instead
11447 * of a process's
11448 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11449 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11450 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11451
11452 +#ifdef CONFIG_X86_32
11453 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11454 +#else
11455 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11456 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11457 +
11458 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11459 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11460 +#else
11461 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11462 +#endif
11463 +
11464 +#endif
11465 +
11466 #ifndef __ASSEMBLY__
11467
11468 extern int direct_gbpages;
11469 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11470 * dst and src can be on the same page, but the range must not overlap,
11471 * and must not cross a page boundary.
11472 */
11473 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11474 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11475 {
11476 - memcpy(dst, src, count * sizeof(pgd_t));
11477 + pax_open_kernel();
11478 + while (count--)
11479 + *dst++ = *src++;
11480 + pax_close_kernel();
11481 }
11482
11483 +#ifdef CONFIG_PAX_PER_CPU_PGD
11484 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11485 +#endif
11486 +
11487 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11488 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11489 +#else
11490 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11491 +#endif
11492
11493 #include <asm-generic/pgtable.h>
11494 #endif /* __ASSEMBLY__ */
11495 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11496 index 0c92113..34a77c6 100644
11497 --- a/arch/x86/include/asm/pgtable_32.h
11498 +++ b/arch/x86/include/asm/pgtable_32.h
11499 @@ -25,9 +25,6 @@
11500 struct mm_struct;
11501 struct vm_area_struct;
11502
11503 -extern pgd_t swapper_pg_dir[1024];
11504 -extern pgd_t initial_page_table[1024];
11505 -
11506 static inline void pgtable_cache_init(void) { }
11507 static inline void check_pgt_cache(void) { }
11508 void paging_init(void);
11509 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11510 # include <asm/pgtable-2level.h>
11511 #endif
11512
11513 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11514 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11515 +#ifdef CONFIG_X86_PAE
11516 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11517 +#endif
11518 +
11519 #if defined(CONFIG_HIGHPTE)
11520 #define pte_offset_map(dir, address) \
11521 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11522 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11523 /* Clear a kernel PTE and flush it from the TLB */
11524 #define kpte_clear_flush(ptep, vaddr) \
11525 do { \
11526 + pax_open_kernel(); \
11527 pte_clear(&init_mm, (vaddr), (ptep)); \
11528 + pax_close_kernel(); \
11529 __flush_tlb_one((vaddr)); \
11530 } while (0)
11531
11532 @@ -74,6 +79,9 @@ do { \
11533
11534 #endif /* !__ASSEMBLY__ */
11535
11536 +#define HAVE_ARCH_UNMAPPED_AREA
11537 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11538 +
11539 /*
11540 * kern_addr_valid() is (1) for FLATMEM and (0) for
11541 * SPARSEMEM and DISCONTIGMEM
11542 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11543 index ed5903b..c7fe163 100644
11544 --- a/arch/x86/include/asm/pgtable_32_types.h
11545 +++ b/arch/x86/include/asm/pgtable_32_types.h
11546 @@ -8,7 +8,7 @@
11547 */
11548 #ifdef CONFIG_X86_PAE
11549 # include <asm/pgtable-3level_types.h>
11550 -# define PMD_SIZE (1UL << PMD_SHIFT)
11551 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11552 # define PMD_MASK (~(PMD_SIZE - 1))
11553 #else
11554 # include <asm/pgtable-2level_types.h>
11555 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11556 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11557 #endif
11558
11559 +#ifdef CONFIG_PAX_KERNEXEC
11560 +#ifndef __ASSEMBLY__
11561 +extern unsigned char MODULES_EXEC_VADDR[];
11562 +extern unsigned char MODULES_EXEC_END[];
11563 +#endif
11564 +#include <asm/boot.h>
11565 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11566 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11567 +#else
11568 +#define ktla_ktva(addr) (addr)
11569 +#define ktva_ktla(addr) (addr)
11570 +#endif
11571 +
11572 #define MODULES_VADDR VMALLOC_START
11573 #define MODULES_END VMALLOC_END
11574 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11575 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11576 index 975f709..9f779c9 100644
11577 --- a/arch/x86/include/asm/pgtable_64.h
11578 +++ b/arch/x86/include/asm/pgtable_64.h
11579 @@ -16,10 +16,14 @@
11580
11581 extern pud_t level3_kernel_pgt[512];
11582 extern pud_t level3_ident_pgt[512];
11583 +extern pud_t level3_vmalloc_start_pgt[512];
11584 +extern pud_t level3_vmalloc_end_pgt[512];
11585 +extern pud_t level3_vmemmap_pgt[512];
11586 +extern pud_t level2_vmemmap_pgt[512];
11587 extern pmd_t level2_kernel_pgt[512];
11588 extern pmd_t level2_fixmap_pgt[512];
11589 -extern pmd_t level2_ident_pgt[512];
11590 -extern pgd_t init_level4_pgt[];
11591 +extern pmd_t level2_ident_pgt[512*2];
11592 +extern pgd_t init_level4_pgt[512];
11593
11594 #define swapper_pg_dir init_level4_pgt
11595
11596 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11597
11598 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11599 {
11600 + pax_open_kernel();
11601 *pmdp = pmd;
11602 + pax_close_kernel();
11603 }
11604
11605 static inline void native_pmd_clear(pmd_t *pmd)
11606 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11607
11608 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11609 {
11610 + pax_open_kernel();
11611 *pudp = pud;
11612 + pax_close_kernel();
11613 }
11614
11615 static inline void native_pud_clear(pud_t *pud)
11616 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11617
11618 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11619 {
11620 + pax_open_kernel();
11621 + *pgdp = pgd;
11622 + pax_close_kernel();
11623 +}
11624 +
11625 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11626 +{
11627 *pgdp = pgd;
11628 }
11629
11630 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11631 index 766ea16..5b96cb3 100644
11632 --- a/arch/x86/include/asm/pgtable_64_types.h
11633 +++ b/arch/x86/include/asm/pgtable_64_types.h
11634 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11635 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11636 #define MODULES_END _AC(0xffffffffff000000, UL)
11637 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11638 +#define MODULES_EXEC_VADDR MODULES_VADDR
11639 +#define MODULES_EXEC_END MODULES_END
11640 +
11641 +#define ktla_ktva(addr) (addr)
11642 +#define ktva_ktla(addr) (addr)
11643
11644 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11645 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11646 index 013286a..8b42f4f 100644
11647 --- a/arch/x86/include/asm/pgtable_types.h
11648 +++ b/arch/x86/include/asm/pgtable_types.h
11649 @@ -16,13 +16,12 @@
11650 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11651 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11652 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11653 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11654 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11655 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11656 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11657 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11658 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11659 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11660 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11661 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11662 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11663 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11664
11665 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11666 @@ -40,7 +39,6 @@
11667 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11668 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11669 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11670 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11671 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11672 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11673 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11674 @@ -57,8 +55,10 @@
11675
11676 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11677 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11678 -#else
11679 +#elif defined(CONFIG_KMEMCHECK)
11680 #define _PAGE_NX (_AT(pteval_t, 0))
11681 +#else
11682 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11683 #endif
11684
11685 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11686 @@ -96,6 +96,9 @@
11687 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11688 _PAGE_ACCESSED)
11689
11690 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11691 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11692 +
11693 #define __PAGE_KERNEL_EXEC \
11694 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11695 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11696 @@ -106,7 +109,7 @@
11697 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11698 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11699 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11700 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11701 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11702 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11703 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11704 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11705 @@ -168,8 +171,8 @@
11706 * bits are combined, this will alow user to access the high address mapped
11707 * VDSO in the presence of CONFIG_COMPAT_VDSO
11708 */
11709 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11710 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11711 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11712 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11713 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11714 #endif
11715
11716 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11717 {
11718 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11719 }
11720 +#endif
11721
11722 +#if PAGETABLE_LEVELS == 3
11723 +#include <asm-generic/pgtable-nopud.h>
11724 +#endif
11725 +
11726 +#if PAGETABLE_LEVELS == 2
11727 +#include <asm-generic/pgtable-nopmd.h>
11728 +#endif
11729 +
11730 +#ifndef __ASSEMBLY__
11731 #if PAGETABLE_LEVELS > 3
11732 typedef struct { pudval_t pud; } pud_t;
11733
11734 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11735 return pud.pud;
11736 }
11737 #else
11738 -#include <asm-generic/pgtable-nopud.h>
11739 -
11740 static inline pudval_t native_pud_val(pud_t pud)
11741 {
11742 return native_pgd_val(pud.pgd);
11743 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11744 return pmd.pmd;
11745 }
11746 #else
11747 -#include <asm-generic/pgtable-nopmd.h>
11748 -
11749 static inline pmdval_t native_pmd_val(pmd_t pmd)
11750 {
11751 return native_pgd_val(pmd.pud.pgd);
11752 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11753
11754 extern pteval_t __supported_pte_mask;
11755 extern void set_nx(void);
11756 -extern int nx_enabled;
11757
11758 #define pgprot_writecombine pgprot_writecombine
11759 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11760 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11761 index 4fa7dcc..764e33a 100644
11762 --- a/arch/x86/include/asm/processor.h
11763 +++ b/arch/x86/include/asm/processor.h
11764 @@ -276,7 +276,7 @@ struct tss_struct {
11765
11766 } ____cacheline_aligned;
11767
11768 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11769 +extern struct tss_struct init_tss[NR_CPUS];
11770
11771 /*
11772 * Save the original ist values for checking stack pointers during debugging
11773 @@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
11774 */
11775 #define TASK_SIZE PAGE_OFFSET
11776 #define TASK_SIZE_MAX TASK_SIZE
11777 +
11778 +#ifdef CONFIG_PAX_SEGMEXEC
11779 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11780 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11781 +#else
11782 #define STACK_TOP TASK_SIZE
11783 -#define STACK_TOP_MAX STACK_TOP
11784 +#endif
11785 +
11786 +#define STACK_TOP_MAX TASK_SIZE
11787
11788 #define INIT_THREAD { \
11789 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11790 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11791 .vm86_info = NULL, \
11792 .sysenter_cs = __KERNEL_CS, \
11793 .io_bitmap_ptr = NULL, \
11794 @@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
11795 */
11796 #define INIT_TSS { \
11797 .x86_tss = { \
11798 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11799 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11800 .ss0 = __KERNEL_DS, \
11801 .ss1 = __KERNEL_CS, \
11802 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11803 @@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
11804 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11805
11806 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11807 -#define KSTK_TOP(info) \
11808 -({ \
11809 - unsigned long *__ptr = (unsigned long *)(info); \
11810 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11811 -})
11812 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11813
11814 /*
11815 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11816 @@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11817 #define task_pt_regs(task) \
11818 ({ \
11819 struct pt_regs *__regs__; \
11820 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11821 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11822 __regs__ - 1; \
11823 })
11824
11825 @@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11826 /*
11827 * User space process size. 47bits minus one guard page.
11828 */
11829 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11830 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11831
11832 /* This decides where the kernel will search for a free chunk of vm
11833 * space during mmap's.
11834 */
11835 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11836 - 0xc0000000 : 0xFFFFe000)
11837 + 0xc0000000 : 0xFFFFf000)
11838
11839 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11840 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11841 @@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11842 #define STACK_TOP_MAX TASK_SIZE_MAX
11843
11844 #define INIT_THREAD { \
11845 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11846 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11847 }
11848
11849 #define INIT_TSS { \
11850 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11851 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11852 }
11853
11854 /*
11855 @@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11856 */
11857 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11858
11859 +#ifdef CONFIG_PAX_SEGMEXEC
11860 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11861 +#endif
11862 +
11863 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11864
11865 /* Get/set a process' ability to use the timestamp counter instruction */
11866 @@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11867
11868 void cpu_idle_wait(void);
11869
11870 -extern unsigned long arch_align_stack(unsigned long sp);
11871 +#define arch_align_stack(x) ((x) & ~0xfUL)
11872 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11873
11874 void default_idle(void);
11875 bool set_pm_idle_to_default(void);
11876
11877 -void stop_this_cpu(void *dummy);
11878 +void stop_this_cpu(void *dummy) __noreturn;
11879
11880 #endif /* _ASM_X86_PROCESSOR_H */
11881 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11882 index dcfde52..dbfea06 100644
11883 --- a/arch/x86/include/asm/ptrace.h
11884 +++ b/arch/x86/include/asm/ptrace.h
11885 @@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11886 }
11887
11888 /*
11889 - * user_mode_vm(regs) determines whether a register set came from user mode.
11890 + * user_mode(regs) determines whether a register set came from user mode.
11891 * This is true if V8086 mode was enabled OR if the register set was from
11892 * protected mode with RPL-3 CS value. This tricky test checks that with
11893 * one comparison. Many places in the kernel can bypass this full check
11894 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11895 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11896 + * be used.
11897 */
11898 -static inline int user_mode(struct pt_regs *regs)
11899 +static inline int user_mode_novm(struct pt_regs *regs)
11900 {
11901 #ifdef CONFIG_X86_32
11902 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11903 #else
11904 - return !!(regs->cs & 3);
11905 + return !!(regs->cs & SEGMENT_RPL_MASK);
11906 #endif
11907 }
11908
11909 -static inline int user_mode_vm(struct pt_regs *regs)
11910 +static inline int user_mode(struct pt_regs *regs)
11911 {
11912 #ifdef CONFIG_X86_32
11913 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11914 USER_RPL;
11915 #else
11916 - return user_mode(regs);
11917 + return user_mode_novm(regs);
11918 #endif
11919 }
11920
11921 @@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11922 #ifdef CONFIG_X86_64
11923 static inline bool user_64bit_mode(struct pt_regs *regs)
11924 {
11925 + unsigned long cs = regs->cs & 0xffff;
11926 #ifndef CONFIG_PARAVIRT
11927 /*
11928 * On non-paravirt systems, this is the only long mode CPL 3
11929 * selector. We do not allow long mode selectors in the LDT.
11930 */
11931 - return regs->cs == __USER_CS;
11932 + return cs == __USER_CS;
11933 #else
11934 /* Headers are too twisted for this to go in paravirt.h. */
11935 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11936 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11937 #endif
11938 }
11939 #endif
11940 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11941 index 92f29706..a79cbbb 100644
11942 --- a/arch/x86/include/asm/reboot.h
11943 +++ b/arch/x86/include/asm/reboot.h
11944 @@ -6,19 +6,19 @@
11945 struct pt_regs;
11946
11947 struct machine_ops {
11948 - void (*restart)(char *cmd);
11949 - void (*halt)(void);
11950 - void (*power_off)(void);
11951 + void (* __noreturn restart)(char *cmd);
11952 + void (* __noreturn halt)(void);
11953 + void (* __noreturn power_off)(void);
11954 void (*shutdown)(void);
11955 void (*crash_shutdown)(struct pt_regs *);
11956 - void (*emergency_restart)(void);
11957 -};
11958 + void (* __noreturn emergency_restart)(void);
11959 +} __no_const;
11960
11961 extern struct machine_ops machine_ops;
11962
11963 void native_machine_crash_shutdown(struct pt_regs *regs);
11964 void native_machine_shutdown(void);
11965 -void machine_real_restart(unsigned int type);
11966 +void machine_real_restart(unsigned int type) __noreturn;
11967 /* These must match dispatch_table in reboot_32.S */
11968 #define MRR_BIOS 0
11969 #define MRR_APM 1
11970 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11971 index 2dbe4a7..ce1db00 100644
11972 --- a/arch/x86/include/asm/rwsem.h
11973 +++ b/arch/x86/include/asm/rwsem.h
11974 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11975 {
11976 asm volatile("# beginning down_read\n\t"
11977 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11978 +
11979 +#ifdef CONFIG_PAX_REFCOUNT
11980 + "jno 0f\n"
11981 + LOCK_PREFIX _ASM_DEC "(%1)\n"
11982 + "int $4\n0:\n"
11983 + _ASM_EXTABLE(0b, 0b)
11984 +#endif
11985 +
11986 /* adds 0x00000001 */
11987 " jns 1f\n"
11988 " call call_rwsem_down_read_failed\n"
11989 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11990 "1:\n\t"
11991 " mov %1,%2\n\t"
11992 " add %3,%2\n\t"
11993 +
11994 +#ifdef CONFIG_PAX_REFCOUNT
11995 + "jno 0f\n"
11996 + "sub %3,%2\n"
11997 + "int $4\n0:\n"
11998 + _ASM_EXTABLE(0b, 0b)
11999 +#endif
12000 +
12001 " jle 2f\n\t"
12002 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12003 " jnz 1b\n\t"
12004 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12005 long tmp;
12006 asm volatile("# beginning down_write\n\t"
12007 LOCK_PREFIX " xadd %1,(%2)\n\t"
12008 +
12009 +#ifdef CONFIG_PAX_REFCOUNT
12010 + "jno 0f\n"
12011 + "mov %1,(%2)\n"
12012 + "int $4\n0:\n"
12013 + _ASM_EXTABLE(0b, 0b)
12014 +#endif
12015 +
12016 /* adds 0xffff0001, returns the old value */
12017 " test %1,%1\n\t"
12018 /* was the count 0 before? */
12019 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12020 long tmp;
12021 asm volatile("# beginning __up_read\n\t"
12022 LOCK_PREFIX " xadd %1,(%2)\n\t"
12023 +
12024 +#ifdef CONFIG_PAX_REFCOUNT
12025 + "jno 0f\n"
12026 + "mov %1,(%2)\n"
12027 + "int $4\n0:\n"
12028 + _ASM_EXTABLE(0b, 0b)
12029 +#endif
12030 +
12031 /* subtracts 1, returns the old value */
12032 " jns 1f\n\t"
12033 " call call_rwsem_wake\n" /* expects old value in %edx */
12034 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12035 long tmp;
12036 asm volatile("# beginning __up_write\n\t"
12037 LOCK_PREFIX " xadd %1,(%2)\n\t"
12038 +
12039 +#ifdef CONFIG_PAX_REFCOUNT
12040 + "jno 0f\n"
12041 + "mov %1,(%2)\n"
12042 + "int $4\n0:\n"
12043 + _ASM_EXTABLE(0b, 0b)
12044 +#endif
12045 +
12046 /* subtracts 0xffff0001, returns the old value */
12047 " jns 1f\n\t"
12048 " call call_rwsem_wake\n" /* expects old value in %edx */
12049 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12050 {
12051 asm volatile("# beginning __downgrade_write\n\t"
12052 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12053 +
12054 +#ifdef CONFIG_PAX_REFCOUNT
12055 + "jno 0f\n"
12056 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12057 + "int $4\n0:\n"
12058 + _ASM_EXTABLE(0b, 0b)
12059 +#endif
12060 +
12061 /*
12062 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12063 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12064 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12065 */
12066 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12067 {
12068 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12069 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12070 +
12071 +#ifdef CONFIG_PAX_REFCOUNT
12072 + "jno 0f\n"
12073 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12074 + "int $4\n0:\n"
12075 + _ASM_EXTABLE(0b, 0b)
12076 +#endif
12077 +
12078 : "+m" (sem->count)
12079 : "er" (delta));
12080 }
12081 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12082 */
12083 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12084 {
12085 - return delta + xadd(&sem->count, delta);
12086 + return delta + xadd_check_overflow(&sem->count, delta);
12087 }
12088
12089 #endif /* __KERNEL__ */
12090 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12091 index 1654662..5af4157 100644
12092 --- a/arch/x86/include/asm/segment.h
12093 +++ b/arch/x86/include/asm/segment.h
12094 @@ -64,10 +64,15 @@
12095 * 26 - ESPFIX small SS
12096 * 27 - per-cpu [ offset to per-cpu data area ]
12097 * 28 - stack_canary-20 [ for stack protector ]
12098 - * 29 - unused
12099 - * 30 - unused
12100 + * 29 - PCI BIOS CS
12101 + * 30 - PCI BIOS DS
12102 * 31 - TSS for double fault handler
12103 */
12104 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12105 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12106 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12107 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12108 +
12109 #define GDT_ENTRY_TLS_MIN 6
12110 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12111
12112 @@ -79,6 +84,8 @@
12113
12114 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12115
12116 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12117 +
12118 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12119
12120 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12121 @@ -104,6 +111,12 @@
12122 #define __KERNEL_STACK_CANARY 0
12123 #endif
12124
12125 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12126 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12127 +
12128 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12129 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12130 +
12131 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12132
12133 /*
12134 @@ -141,7 +154,7 @@
12135 */
12136
12137 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12138 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12139 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12140
12141
12142 #else
12143 @@ -165,6 +178,8 @@
12144 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12145 #define __USER32_DS __USER_DS
12146
12147 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12148 +
12149 #define GDT_ENTRY_TSS 8 /* needs two entries */
12150 #define GDT_ENTRY_LDT 10 /* needs two entries */
12151 #define GDT_ENTRY_TLS_MIN 12
12152 @@ -185,6 +200,7 @@
12153 #endif
12154
12155 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12156 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12157 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12158 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12159 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12160 @@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
12161 {
12162 unsigned long __limit;
12163 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12164 - return __limit + 1;
12165 + return __limit;
12166 }
12167
12168 #endif /* !__ASSEMBLY__ */
12169 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12170 index 0434c40..1714bf0 100644
12171 --- a/arch/x86/include/asm/smp.h
12172 +++ b/arch/x86/include/asm/smp.h
12173 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12174 /* cpus sharing the last level cache: */
12175 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12176 DECLARE_PER_CPU(u16, cpu_llc_id);
12177 -DECLARE_PER_CPU(int, cpu_number);
12178 +DECLARE_PER_CPU(unsigned int, cpu_number);
12179
12180 static inline struct cpumask *cpu_sibling_mask(int cpu)
12181 {
12182 @@ -77,7 +77,7 @@ struct smp_ops {
12183
12184 void (*send_call_func_ipi)(const struct cpumask *mask);
12185 void (*send_call_func_single_ipi)(int cpu);
12186 -};
12187 +} __no_const;
12188
12189 /* Globals due to paravirt */
12190 extern void set_cpu_sibling_map(int cpu);
12191 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12192 extern int safe_smp_processor_id(void);
12193
12194 #elif defined(CONFIG_X86_64_SMP)
12195 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12196 -
12197 -#define stack_smp_processor_id() \
12198 -({ \
12199 - struct thread_info *ti; \
12200 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12201 - ti->cpu; \
12202 -})
12203 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12204 +#define stack_smp_processor_id() raw_smp_processor_id()
12205 #define safe_smp_processor_id() smp_processor_id()
12206
12207 #endif
12208 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12209 index 76bfa2c..12d3fe7 100644
12210 --- a/arch/x86/include/asm/spinlock.h
12211 +++ b/arch/x86/include/asm/spinlock.h
12212 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12213 static inline void arch_read_lock(arch_rwlock_t *rw)
12214 {
12215 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12216 +
12217 +#ifdef CONFIG_PAX_REFCOUNT
12218 + "jno 0f\n"
12219 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12220 + "int $4\n0:\n"
12221 + _ASM_EXTABLE(0b, 0b)
12222 +#endif
12223 +
12224 "jns 1f\n"
12225 "call __read_lock_failed\n\t"
12226 "1:\n"
12227 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12228 static inline void arch_write_lock(arch_rwlock_t *rw)
12229 {
12230 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12231 +
12232 +#ifdef CONFIG_PAX_REFCOUNT
12233 + "jno 0f\n"
12234 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12235 + "int $4\n0:\n"
12236 + _ASM_EXTABLE(0b, 0b)
12237 +#endif
12238 +
12239 "jz 1f\n"
12240 "call __write_lock_failed\n\t"
12241 "1:\n"
12242 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12243
12244 static inline void arch_read_unlock(arch_rwlock_t *rw)
12245 {
12246 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12247 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12248 +
12249 +#ifdef CONFIG_PAX_REFCOUNT
12250 + "jno 0f\n"
12251 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12252 + "int $4\n0:\n"
12253 + _ASM_EXTABLE(0b, 0b)
12254 +#endif
12255 +
12256 :"+m" (rw->lock) : : "memory");
12257 }
12258
12259 static inline void arch_write_unlock(arch_rwlock_t *rw)
12260 {
12261 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12262 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12263 +
12264 +#ifdef CONFIG_PAX_REFCOUNT
12265 + "jno 0f\n"
12266 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12267 + "int $4\n0:\n"
12268 + _ASM_EXTABLE(0b, 0b)
12269 +#endif
12270 +
12271 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12272 }
12273
12274 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12275 index b5d9533..41655fa 100644
12276 --- a/arch/x86/include/asm/stackprotector.h
12277 +++ b/arch/x86/include/asm/stackprotector.h
12278 @@ -47,7 +47,7 @@
12279 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12280 */
12281 #define GDT_STACK_CANARY_INIT \
12282 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12283 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12284
12285 /*
12286 * Initialize the stackprotector canary value.
12287 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12288
12289 static inline void load_stack_canary_segment(void)
12290 {
12291 -#ifdef CONFIG_X86_32
12292 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12293 asm volatile ("mov %0, %%gs" : : "r" (0));
12294 #endif
12295 }
12296 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12297 index 70bbe39..4ae2bd4 100644
12298 --- a/arch/x86/include/asm/stacktrace.h
12299 +++ b/arch/x86/include/asm/stacktrace.h
12300 @@ -11,28 +11,20 @@
12301
12302 extern int kstack_depth_to_print;
12303
12304 -struct thread_info;
12305 +struct task_struct;
12306 struct stacktrace_ops;
12307
12308 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12309 - unsigned long *stack,
12310 - unsigned long bp,
12311 - const struct stacktrace_ops *ops,
12312 - void *data,
12313 - unsigned long *end,
12314 - int *graph);
12315 +typedef unsigned long walk_stack_t(struct task_struct *task,
12316 + void *stack_start,
12317 + unsigned long *stack,
12318 + unsigned long bp,
12319 + const struct stacktrace_ops *ops,
12320 + void *data,
12321 + unsigned long *end,
12322 + int *graph);
12323
12324 -extern unsigned long
12325 -print_context_stack(struct thread_info *tinfo,
12326 - unsigned long *stack, unsigned long bp,
12327 - const struct stacktrace_ops *ops, void *data,
12328 - unsigned long *end, int *graph);
12329 -
12330 -extern unsigned long
12331 -print_context_stack_bp(struct thread_info *tinfo,
12332 - unsigned long *stack, unsigned long bp,
12333 - const struct stacktrace_ops *ops, void *data,
12334 - unsigned long *end, int *graph);
12335 +extern walk_stack_t print_context_stack;
12336 +extern walk_stack_t print_context_stack_bp;
12337
12338 /* Generic stack tracer with callbacks */
12339
12340 @@ -40,7 +32,7 @@ struct stacktrace_ops {
12341 void (*address)(void *data, unsigned long address, int reliable);
12342 /* On negative return stop dumping */
12343 int (*stack)(void *data, char *name);
12344 - walk_stack_t walk_stack;
12345 + walk_stack_t *walk_stack;
12346 };
12347
12348 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12349 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12350 index 4ec45b3..a4f0a8a 100644
12351 --- a/arch/x86/include/asm/switch_to.h
12352 +++ b/arch/x86/include/asm/switch_to.h
12353 @@ -108,7 +108,7 @@ do { \
12354 "call __switch_to\n\t" \
12355 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12356 __switch_canary \
12357 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12358 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12359 "movq %%rax,%%rdi\n\t" \
12360 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12361 "jnz ret_from_fork\n\t" \
12362 @@ -119,7 +119,7 @@ do { \
12363 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12364 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12365 [_tif_fork] "i" (_TIF_FORK), \
12366 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12367 + [thread_info] "m" (current_tinfo), \
12368 [current_task] "m" (current_task) \
12369 __switch_canary_iparam \
12370 : "memory", "cc" __EXTRA_CLOBBER)
12371 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12372 index 3fda9db4..4ca1c61 100644
12373 --- a/arch/x86/include/asm/sys_ia32.h
12374 +++ b/arch/x86/include/asm/sys_ia32.h
12375 @@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12376 struct old_sigaction32 __user *);
12377 asmlinkage long sys32_alarm(unsigned int);
12378
12379 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12380 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12381 asmlinkage long sys32_sysfs(int, u32, u32);
12382
12383 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12384 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12385 index ad6df8c..5e0cf6e 100644
12386 --- a/arch/x86/include/asm/thread_info.h
12387 +++ b/arch/x86/include/asm/thread_info.h
12388 @@ -10,6 +10,7 @@
12389 #include <linux/compiler.h>
12390 #include <asm/page.h>
12391 #include <asm/types.h>
12392 +#include <asm/percpu.h>
12393
12394 /*
12395 * low level task data that entry.S needs immediate access to
12396 @@ -24,7 +25,6 @@ struct exec_domain;
12397 #include <linux/atomic.h>
12398
12399 struct thread_info {
12400 - struct task_struct *task; /* main task structure */
12401 struct exec_domain *exec_domain; /* execution domain */
12402 __u32 flags; /* low level flags */
12403 __u32 status; /* thread synchronous flags */
12404 @@ -34,19 +34,13 @@ struct thread_info {
12405 mm_segment_t addr_limit;
12406 struct restart_block restart_block;
12407 void __user *sysenter_return;
12408 -#ifdef CONFIG_X86_32
12409 - unsigned long previous_esp; /* ESP of the previous stack in
12410 - case of nested (IRQ) stacks
12411 - */
12412 - __u8 supervisor_stack[0];
12413 -#endif
12414 + unsigned long lowest_stack;
12415 unsigned int sig_on_uaccess_error:1;
12416 unsigned int uaccess_err:1; /* uaccess failed */
12417 };
12418
12419 -#define INIT_THREAD_INFO(tsk) \
12420 +#define INIT_THREAD_INFO \
12421 { \
12422 - .task = &tsk, \
12423 .exec_domain = &default_exec_domain, \
12424 .flags = 0, \
12425 .cpu = 0, \
12426 @@ -57,7 +51,7 @@ struct thread_info {
12427 }, \
12428 }
12429
12430 -#define init_thread_info (init_thread_union.thread_info)
12431 +#define init_thread_info (init_thread_union.stack)
12432 #define init_stack (init_thread_union.stack)
12433
12434 #else /* !__ASSEMBLY__ */
12435 @@ -97,6 +91,7 @@ struct thread_info {
12436 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12437 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12438 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12439 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12440
12441 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12442 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12443 @@ -120,16 +115,18 @@ struct thread_info {
12444 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12445 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12446 #define _TIF_X32 (1 << TIF_X32)
12447 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12448
12449 /* work to do in syscall_trace_enter() */
12450 #define _TIF_WORK_SYSCALL_ENTRY \
12451 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12452 - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12453 + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12454 + _TIF_GRSEC_SETXID)
12455
12456 /* work to do in syscall_trace_leave() */
12457 #define _TIF_WORK_SYSCALL_EXIT \
12458 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12459 - _TIF_SYSCALL_TRACEPOINT)
12460 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12461
12462 /* work to do on interrupt/exception return */
12463 #define _TIF_WORK_MASK \
12464 @@ -139,7 +136,8 @@ struct thread_info {
12465
12466 /* work to do on any return to user space */
12467 #define _TIF_ALLWORK_MASK \
12468 - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12469 + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12470 + _TIF_GRSEC_SETXID)
12471
12472 /* Only used for 64 bit */
12473 #define _TIF_DO_NOTIFY_MASK \
12474 @@ -173,45 +171,40 @@ struct thread_info {
12475 ret; \
12476 })
12477
12478 -#ifdef CONFIG_X86_32
12479 -
12480 -#define STACK_WARN (THREAD_SIZE/8)
12481 -/*
12482 - * macros/functions for gaining access to the thread information structure
12483 - *
12484 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12485 - */
12486 -#ifndef __ASSEMBLY__
12487 -
12488 -
12489 -/* how to get the current stack pointer from C */
12490 -register unsigned long current_stack_pointer asm("esp") __used;
12491 -
12492 -/* how to get the thread information struct from C */
12493 -static inline struct thread_info *current_thread_info(void)
12494 -{
12495 - return (struct thread_info *)
12496 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12497 -}
12498 -
12499 -#else /* !__ASSEMBLY__ */
12500 -
12501 +#ifdef __ASSEMBLY__
12502 /* how to get the thread information struct from ASM */
12503 #define GET_THREAD_INFO(reg) \
12504 - movl $-THREAD_SIZE, reg; \
12505 - andl %esp, reg
12506 + mov PER_CPU_VAR(current_tinfo), reg
12507
12508 /* use this one if reg already contains %esp */
12509 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12510 - andl $-THREAD_SIZE, reg
12511 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12512 +#else
12513 +/* how to get the thread information struct from C */
12514 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12515 +
12516 +static __always_inline struct thread_info *current_thread_info(void)
12517 +{
12518 + return percpu_read_stable(current_tinfo);
12519 +}
12520 +#endif
12521 +
12522 +#ifdef CONFIG_X86_32
12523 +
12524 +#define STACK_WARN (THREAD_SIZE/8)
12525 +/*
12526 + * macros/functions for gaining access to the thread information structure
12527 + *
12528 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12529 + */
12530 +#ifndef __ASSEMBLY__
12531 +
12532 +/* how to get the current stack pointer from C */
12533 +register unsigned long current_stack_pointer asm("esp") __used;
12534
12535 #endif
12536
12537 #else /* X86_32 */
12538
12539 -#include <asm/percpu.h>
12540 -#define KERNEL_STACK_OFFSET (5*8)
12541 -
12542 /*
12543 * macros/functions for gaining access to the thread information structure
12544 * preempt_count needs to be 1 initially, until the scheduler is functional.
12545 @@ -219,27 +212,8 @@ static inline struct thread_info *current_thread_info(void)
12546 #ifndef __ASSEMBLY__
12547 DECLARE_PER_CPU(unsigned long, kernel_stack);
12548
12549 -static inline struct thread_info *current_thread_info(void)
12550 -{
12551 - struct thread_info *ti;
12552 - ti = (void *)(percpu_read_stable(kernel_stack) +
12553 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12554 - return ti;
12555 -}
12556 -
12557 -#else /* !__ASSEMBLY__ */
12558 -
12559 -/* how to get the thread information struct from ASM */
12560 -#define GET_THREAD_INFO(reg) \
12561 - movq PER_CPU_VAR(kernel_stack),reg ; \
12562 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12563 -
12564 -/*
12565 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12566 - * a certain register (to be used in assembler memory operands).
12567 - */
12568 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12569 -
12570 +/* how to get the current stack pointer from C */
12571 +register unsigned long current_stack_pointer asm("rsp") __used;
12572 #endif
12573
12574 #endif /* !X86_32 */
12575 @@ -285,5 +259,16 @@ extern void arch_task_cache_init(void);
12576 extern void free_thread_info(struct thread_info *ti);
12577 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12578 #define arch_task_cache_init arch_task_cache_init
12579 +
12580 +#define __HAVE_THREAD_FUNCTIONS
12581 +#define task_thread_info(task) (&(task)->tinfo)
12582 +#define task_stack_page(task) ((task)->stack)
12583 +#define setup_thread_stack(p, org) do {} while (0)
12584 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12585 +
12586 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12587 +extern struct task_struct *alloc_task_struct_node(int node);
12588 +extern void free_task_struct(struct task_struct *);
12589 +
12590 #endif
12591 #endif /* _ASM_X86_THREAD_INFO_H */
12592 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12593 index e054459..14bc8a7 100644
12594 --- a/arch/x86/include/asm/uaccess.h
12595 +++ b/arch/x86/include/asm/uaccess.h
12596 @@ -7,12 +7,15 @@
12597 #include <linux/compiler.h>
12598 #include <linux/thread_info.h>
12599 #include <linux/string.h>
12600 +#include <linux/sched.h>
12601 #include <asm/asm.h>
12602 #include <asm/page.h>
12603
12604 #define VERIFY_READ 0
12605 #define VERIFY_WRITE 1
12606
12607 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12608 +
12609 /*
12610 * The fs value determines whether argument validity checking should be
12611 * performed or not. If get_fs() == USER_DS, checking is performed, with
12612 @@ -28,7 +31,12 @@
12613
12614 #define get_ds() (KERNEL_DS)
12615 #define get_fs() (current_thread_info()->addr_limit)
12616 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12617 +void __set_fs(mm_segment_t x);
12618 +void set_fs(mm_segment_t x);
12619 +#else
12620 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12621 +#endif
12622
12623 #define segment_eq(a, b) ((a).seg == (b).seg)
12624
12625 @@ -76,7 +84,33 @@
12626 * checks that the pointer is in the user space range - after calling
12627 * this function, memory access functions may still return -EFAULT.
12628 */
12629 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12630 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12631 +#define access_ok(type, addr, size) \
12632 +({ \
12633 + long __size = size; \
12634 + unsigned long __addr = (unsigned long)addr; \
12635 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12636 + unsigned long __end_ao = __addr + __size - 1; \
12637 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12638 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12639 + while(__addr_ao <= __end_ao) { \
12640 + char __c_ao; \
12641 + __addr_ao += PAGE_SIZE; \
12642 + if (__size > PAGE_SIZE) \
12643 + cond_resched(); \
12644 + if (__get_user(__c_ao, (char __user *)__addr)) \
12645 + break; \
12646 + if (type != VERIFY_WRITE) { \
12647 + __addr = __addr_ao; \
12648 + continue; \
12649 + } \
12650 + if (__put_user(__c_ao, (char __user *)__addr)) \
12651 + break; \
12652 + __addr = __addr_ao; \
12653 + } \
12654 + } \
12655 + __ret_ao; \
12656 +})
12657
12658 /*
12659 * The exception table consists of pairs of addresses: the first is the
12660 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12661 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12662 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12663
12664 -
12665 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12666 +#define __copyuser_seg "gs;"
12667 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12668 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12669 +#else
12670 +#define __copyuser_seg
12671 +#define __COPYUSER_SET_ES
12672 +#define __COPYUSER_RESTORE_ES
12673 +#endif
12674
12675 #ifdef CONFIG_X86_32
12676 #define __put_user_asm_u64(x, addr, err, errret) \
12677 - asm volatile("1: movl %%eax,0(%2)\n" \
12678 - "2: movl %%edx,4(%2)\n" \
12679 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12680 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12681 "3:\n" \
12682 ".section .fixup,\"ax\"\n" \
12683 "4: movl %3,%0\n" \
12684 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12685 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12686
12687 #define __put_user_asm_ex_u64(x, addr) \
12688 - asm volatile("1: movl %%eax,0(%1)\n" \
12689 - "2: movl %%edx,4(%1)\n" \
12690 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12691 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12692 "3:\n" \
12693 _ASM_EXTABLE(1b, 2b - 1b) \
12694 _ASM_EXTABLE(2b, 3b - 2b) \
12695 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12696 __typeof__(*(ptr)) __pu_val; \
12697 __chk_user_ptr(ptr); \
12698 might_fault(); \
12699 - __pu_val = x; \
12700 + __pu_val = (x); \
12701 switch (sizeof(*(ptr))) { \
12702 case 1: \
12703 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12704 @@ -373,7 +415,7 @@ do { \
12705 } while (0)
12706
12707 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12708 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12709 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12710 "2:\n" \
12711 ".section .fixup,\"ax\"\n" \
12712 "3: mov %3,%0\n" \
12713 @@ -381,7 +423,7 @@ do { \
12714 " jmp 2b\n" \
12715 ".previous\n" \
12716 _ASM_EXTABLE(1b, 3b) \
12717 - : "=r" (err), ltype(x) \
12718 + : "=r" (err), ltype (x) \
12719 : "m" (__m(addr)), "i" (errret), "0" (err))
12720
12721 #define __get_user_size_ex(x, ptr, size) \
12722 @@ -406,7 +448,7 @@ do { \
12723 } while (0)
12724
12725 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12726 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12727 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12728 "2:\n" \
12729 _ASM_EXTABLE(1b, 2b - 1b) \
12730 : ltype(x) : "m" (__m(addr)))
12731 @@ -423,13 +465,24 @@ do { \
12732 int __gu_err; \
12733 unsigned long __gu_val; \
12734 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12735 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12736 + (x) = (__typeof__(*(ptr)))__gu_val; \
12737 __gu_err; \
12738 })
12739
12740 /* FIXME: this hack is definitely wrong -AK */
12741 struct __large_struct { unsigned long buf[100]; };
12742 -#define __m(x) (*(struct __large_struct __user *)(x))
12743 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12744 +#define ____m(x) \
12745 +({ \
12746 + unsigned long ____x = (unsigned long)(x); \
12747 + if (____x < PAX_USER_SHADOW_BASE) \
12748 + ____x += PAX_USER_SHADOW_BASE; \
12749 + (void __user *)____x; \
12750 +})
12751 +#else
12752 +#define ____m(x) (x)
12753 +#endif
12754 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12755
12756 /*
12757 * Tell gcc we read from memory instead of writing: this is because
12758 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12759 * aliasing issues.
12760 */
12761 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12762 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12763 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12764 "2:\n" \
12765 ".section .fixup,\"ax\"\n" \
12766 "3: mov %3,%0\n" \
12767 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12768 ".previous\n" \
12769 _ASM_EXTABLE(1b, 3b) \
12770 : "=r"(err) \
12771 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12772 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12773
12774 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12775 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12776 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12777 "2:\n" \
12778 _ASM_EXTABLE(1b, 2b - 1b) \
12779 : : ltype(x), "m" (__m(addr)))
12780 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12781 * On error, the variable @x is set to zero.
12782 */
12783
12784 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12785 +#define __get_user(x, ptr) get_user((x), (ptr))
12786 +#else
12787 #define __get_user(x, ptr) \
12788 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12789 +#endif
12790
12791 /**
12792 * __put_user: - Write a simple value into user space, with less checking.
12793 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12794 * Returns zero on success, or -EFAULT on error.
12795 */
12796
12797 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12798 +#define __put_user(x, ptr) put_user((x), (ptr))
12799 +#else
12800 #define __put_user(x, ptr) \
12801 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12802 +#endif
12803
12804 #define __get_user_unaligned __get_user
12805 #define __put_user_unaligned __put_user
12806 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12807 #define get_user_ex(x, ptr) do { \
12808 unsigned long __gue_val; \
12809 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12810 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12811 + (x) = (__typeof__(*(ptr)))__gue_val; \
12812 } while (0)
12813
12814 #ifdef CONFIG_X86_WP_WORKS_OK
12815 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12816 index 8084bc7..cc139cb 100644
12817 --- a/arch/x86/include/asm/uaccess_32.h
12818 +++ b/arch/x86/include/asm/uaccess_32.h
12819 @@ -11,15 +11,15 @@
12820 #include <asm/page.h>
12821
12822 unsigned long __must_check __copy_to_user_ll
12823 - (void __user *to, const void *from, unsigned long n);
12824 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12825 unsigned long __must_check __copy_from_user_ll
12826 - (void *to, const void __user *from, unsigned long n);
12827 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12828 unsigned long __must_check __copy_from_user_ll_nozero
12829 - (void *to, const void __user *from, unsigned long n);
12830 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12831 unsigned long __must_check __copy_from_user_ll_nocache
12832 - (void *to, const void __user *from, unsigned long n);
12833 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12834 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12835 - (void *to, const void __user *from, unsigned long n);
12836 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12837
12838 /**
12839 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12840 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12841 static __always_inline unsigned long __must_check
12842 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12843 {
12844 + if ((long)n < 0)
12845 + return n;
12846 +
12847 if (__builtin_constant_p(n)) {
12848 unsigned long ret;
12849
12850 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12851 return ret;
12852 }
12853 }
12854 + if (!__builtin_constant_p(n))
12855 + check_object_size(from, n, true);
12856 return __copy_to_user_ll(to, from, n);
12857 }
12858
12859 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12860 __copy_to_user(void __user *to, const void *from, unsigned long n)
12861 {
12862 might_fault();
12863 +
12864 return __copy_to_user_inatomic(to, from, n);
12865 }
12866
12867 static __always_inline unsigned long
12868 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12869 {
12870 + if ((long)n < 0)
12871 + return n;
12872 +
12873 /* Avoid zeroing the tail if the copy fails..
12874 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12875 * but as the zeroing behaviour is only significant when n is not
12876 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12877 __copy_from_user(void *to, const void __user *from, unsigned long n)
12878 {
12879 might_fault();
12880 +
12881 + if ((long)n < 0)
12882 + return n;
12883 +
12884 if (__builtin_constant_p(n)) {
12885 unsigned long ret;
12886
12887 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12888 return ret;
12889 }
12890 }
12891 + if (!__builtin_constant_p(n))
12892 + check_object_size(to, n, false);
12893 return __copy_from_user_ll(to, from, n);
12894 }
12895
12896 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12897 const void __user *from, unsigned long n)
12898 {
12899 might_fault();
12900 +
12901 + if ((long)n < 0)
12902 + return n;
12903 +
12904 if (__builtin_constant_p(n)) {
12905 unsigned long ret;
12906
12907 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12908 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12909 unsigned long n)
12910 {
12911 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12912 + if ((long)n < 0)
12913 + return n;
12914 +
12915 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12916 }
12917
12918 -unsigned long __must_check copy_to_user(void __user *to,
12919 - const void *from, unsigned long n);
12920 -unsigned long __must_check _copy_from_user(void *to,
12921 - const void __user *from,
12922 - unsigned long n);
12923 -
12924 +extern void copy_to_user_overflow(void)
12925 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12926 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12927 +#else
12928 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12929 +#endif
12930 +;
12931
12932 extern void copy_from_user_overflow(void)
12933 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12934 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12935 #endif
12936 ;
12937
12938 -static inline unsigned long __must_check copy_from_user(void *to,
12939 - const void __user *from,
12940 - unsigned long n)
12941 +/**
12942 + * copy_to_user: - Copy a block of data into user space.
12943 + * @to: Destination address, in user space.
12944 + * @from: Source address, in kernel space.
12945 + * @n: Number of bytes to copy.
12946 + *
12947 + * Context: User context only. This function may sleep.
12948 + *
12949 + * Copy data from kernel space to user space.
12950 + *
12951 + * Returns number of bytes that could not be copied.
12952 + * On success, this will be zero.
12953 + */
12954 +static inline unsigned long __must_check
12955 +copy_to_user(void __user *to, const void *from, unsigned long n)
12956 +{
12957 + int sz = __compiletime_object_size(from);
12958 +
12959 + if (unlikely(sz != -1 && sz < n))
12960 + copy_to_user_overflow();
12961 + else if (access_ok(VERIFY_WRITE, to, n))
12962 + n = __copy_to_user(to, from, n);
12963 + return n;
12964 +}
12965 +
12966 +/**
12967 + * copy_from_user: - Copy a block of data from user space.
12968 + * @to: Destination address, in kernel space.
12969 + * @from: Source address, in user space.
12970 + * @n: Number of bytes to copy.
12971 + *
12972 + * Context: User context only. This function may sleep.
12973 + *
12974 + * Copy data from user space to kernel space.
12975 + *
12976 + * Returns number of bytes that could not be copied.
12977 + * On success, this will be zero.
12978 + *
12979 + * If some data could not be copied, this function will pad the copied
12980 + * data to the requested size using zero bytes.
12981 + */
12982 +static inline unsigned long __must_check
12983 +copy_from_user(void *to, const void __user *from, unsigned long n)
12984 {
12985 int sz = __compiletime_object_size(to);
12986
12987 - if (likely(sz == -1 || sz >= n))
12988 - n = _copy_from_user(to, from, n);
12989 - else
12990 + if (unlikely(sz != -1 && sz < n))
12991 copy_from_user_overflow();
12992 -
12993 + else if (access_ok(VERIFY_READ, from, n))
12994 + n = __copy_from_user(to, from, n);
12995 + else if ((long)n > 0) {
12996 + if (!__builtin_constant_p(n))
12997 + check_object_size(to, n, false);
12998 + memset(to, 0, n);
12999 + }
13000 return n;
13001 }
13002
13003 @@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
13004 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13005
13006 long strnlen_user(const char __user *str, long n);
13007 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13008 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13009 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13010 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13011
13012 #endif /* _ASM_X86_UACCESS_32_H */
13013 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13014 index fcd4b6f..f4631a0 100644
13015 --- a/arch/x86/include/asm/uaccess_64.h
13016 +++ b/arch/x86/include/asm/uaccess_64.h
13017 @@ -10,6 +10,9 @@
13018 #include <asm/alternative.h>
13019 #include <asm/cpufeature.h>
13020 #include <asm/page.h>
13021 +#include <asm/pgtable.h>
13022 +
13023 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
13024
13025 /*
13026 * Copy To/From Userspace
13027 @@ -17,12 +20,14 @@
13028
13029 /* Handles exceptions in both to and from, but doesn't do access_ok */
13030 __must_check unsigned long
13031 -copy_user_generic_string(void *to, const void *from, unsigned len);
13032 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13033 __must_check unsigned long
13034 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13035 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13036
13037 static __always_inline __must_check unsigned long
13038 -copy_user_generic(void *to, const void *from, unsigned len)
13039 +copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13040 +static __always_inline __must_check unsigned long
13041 +copy_user_generic(void *to, const void *from, unsigned long len)
13042 {
13043 unsigned ret;
13044
13045 @@ -32,142 +37,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
13046 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13047 "=d" (len)),
13048 "1" (to), "2" (from), "3" (len)
13049 - : "memory", "rcx", "r8", "r9", "r10", "r11");
13050 + : "memory", "rcx", "r8", "r9", "r11");
13051 return ret;
13052 }
13053
13054 +static __always_inline __must_check unsigned long
13055 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13056 +static __always_inline __must_check unsigned long
13057 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13058 __must_check unsigned long
13059 -_copy_to_user(void __user *to, const void *from, unsigned len);
13060 -__must_check unsigned long
13061 -_copy_from_user(void *to, const void __user *from, unsigned len);
13062 -__must_check unsigned long
13063 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13064 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13065
13066 static inline unsigned long __must_check copy_from_user(void *to,
13067 const void __user *from,
13068 unsigned long n)
13069 {
13070 - int sz = __compiletime_object_size(to);
13071 -
13072 might_fault();
13073 - if (likely(sz == -1 || sz >= n))
13074 - n = _copy_from_user(to, from, n);
13075 -#ifdef CONFIG_DEBUG_VM
13076 - else
13077 - WARN(1, "Buffer overflow detected!\n");
13078 -#endif
13079 +
13080 + if (access_ok(VERIFY_READ, from, n))
13081 + n = __copy_from_user(to, from, n);
13082 + else if (n < INT_MAX) {
13083 + if (!__builtin_constant_p(n))
13084 + check_object_size(to, n, false);
13085 + memset(to, 0, n);
13086 + }
13087 return n;
13088 }
13089
13090 static __always_inline __must_check
13091 -int copy_to_user(void __user *dst, const void *src, unsigned size)
13092 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
13093 {
13094 might_fault();
13095
13096 - return _copy_to_user(dst, src, size);
13097 + if (access_ok(VERIFY_WRITE, dst, size))
13098 + size = __copy_to_user(dst, src, size);
13099 + return size;
13100 }
13101
13102 static __always_inline __must_check
13103 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13104 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13105 {
13106 - int ret = 0;
13107 + int sz = __compiletime_object_size(dst);
13108 + unsigned ret = 0;
13109
13110 might_fault();
13111 - if (!__builtin_constant_p(size))
13112 - return copy_user_generic(dst, (__force void *)src, size);
13113 +
13114 + if (size > INT_MAX)
13115 + return size;
13116 +
13117 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13118 + if (!__access_ok(VERIFY_READ, src, size))
13119 + return size;
13120 +#endif
13121 +
13122 + if (unlikely(sz != -1 && sz < size)) {
13123 +#ifdef CONFIG_DEBUG_VM
13124 + WARN(1, "Buffer overflow detected!\n");
13125 +#endif
13126 + return size;
13127 + }
13128 +
13129 + if (!__builtin_constant_p(size)) {
13130 + check_object_size(dst, size, false);
13131 +
13132 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13133 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13134 + src += PAX_USER_SHADOW_BASE;
13135 +#endif
13136 +
13137 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13138 + }
13139 switch (size) {
13140 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13141 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13142 ret, "b", "b", "=q", 1);
13143 return ret;
13144 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13145 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13146 ret, "w", "w", "=r", 2);
13147 return ret;
13148 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13149 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13150 ret, "l", "k", "=r", 4);
13151 return ret;
13152 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13153 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13154 ret, "q", "", "=r", 8);
13155 return ret;
13156 case 10:
13157 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13158 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13159 ret, "q", "", "=r", 10);
13160 if (unlikely(ret))
13161 return ret;
13162 __get_user_asm(*(u16 *)(8 + (char *)dst),
13163 - (u16 __user *)(8 + (char __user *)src),
13164 + (const u16 __user *)(8 + (const char __user *)src),
13165 ret, "w", "w", "=r", 2);
13166 return ret;
13167 case 16:
13168 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13169 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13170 ret, "q", "", "=r", 16);
13171 if (unlikely(ret))
13172 return ret;
13173 __get_user_asm(*(u64 *)(8 + (char *)dst),
13174 - (u64 __user *)(8 + (char __user *)src),
13175 + (const u64 __user *)(8 + (const char __user *)src),
13176 ret, "q", "", "=r", 8);
13177 return ret;
13178 default:
13179 - return copy_user_generic(dst, (__force void *)src, size);
13180 +
13181 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13182 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13183 + src += PAX_USER_SHADOW_BASE;
13184 +#endif
13185 +
13186 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13187 }
13188 }
13189
13190 static __always_inline __must_check
13191 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13192 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13193 {
13194 - int ret = 0;
13195 + int sz = __compiletime_object_size(src);
13196 + unsigned ret = 0;
13197
13198 might_fault();
13199 - if (!__builtin_constant_p(size))
13200 - return copy_user_generic((__force void *)dst, src, size);
13201 +
13202 + if (size > INT_MAX)
13203 + return size;
13204 +
13205 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13206 + if (!__access_ok(VERIFY_WRITE, dst, size))
13207 + return size;
13208 +#endif
13209 +
13210 + if (unlikely(sz != -1 && sz < size)) {
13211 +#ifdef CONFIG_DEBUG_VM
13212 + WARN(1, "Buffer overflow detected!\n");
13213 +#endif
13214 + return size;
13215 + }
13216 +
13217 + if (!__builtin_constant_p(size)) {
13218 + check_object_size(src, size, true);
13219 +
13220 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13221 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13222 + dst += PAX_USER_SHADOW_BASE;
13223 +#endif
13224 +
13225 + return copy_user_generic((__force_kernel void *)dst, src, size);
13226 + }
13227 switch (size) {
13228 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13229 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13230 ret, "b", "b", "iq", 1);
13231 return ret;
13232 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13233 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13234 ret, "w", "w", "ir", 2);
13235 return ret;
13236 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13237 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13238 ret, "l", "k", "ir", 4);
13239 return ret;
13240 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13241 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13242 ret, "q", "", "er", 8);
13243 return ret;
13244 case 10:
13245 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13246 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13247 ret, "q", "", "er", 10);
13248 if (unlikely(ret))
13249 return ret;
13250 asm("":::"memory");
13251 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13252 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13253 ret, "w", "w", "ir", 2);
13254 return ret;
13255 case 16:
13256 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13257 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13258 ret, "q", "", "er", 16);
13259 if (unlikely(ret))
13260 return ret;
13261 asm("":::"memory");
13262 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13263 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13264 ret, "q", "", "er", 8);
13265 return ret;
13266 default:
13267 - return copy_user_generic((__force void *)dst, src, size);
13268 +
13269 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13270 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13271 + dst += PAX_USER_SHADOW_BASE;
13272 +#endif
13273 +
13274 + return copy_user_generic((__force_kernel void *)dst, src, size);
13275 }
13276 }
13277
13278 static __always_inline __must_check
13279 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13280 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13281 {
13282 - int ret = 0;
13283 + unsigned ret = 0;
13284
13285 might_fault();
13286 - if (!__builtin_constant_p(size))
13287 - return copy_user_generic((__force void *)dst,
13288 - (__force void *)src, size);
13289 +
13290 + if (size > INT_MAX)
13291 + return size;
13292 +
13293 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13294 + if (!__access_ok(VERIFY_READ, src, size))
13295 + return size;
13296 + if (!__access_ok(VERIFY_WRITE, dst, size))
13297 + return size;
13298 +#endif
13299 +
13300 + if (!__builtin_constant_p(size)) {
13301 +
13302 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13303 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13304 + src += PAX_USER_SHADOW_BASE;
13305 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13306 + dst += PAX_USER_SHADOW_BASE;
13307 +#endif
13308 +
13309 + return copy_user_generic((__force_kernel void *)dst,
13310 + (__force_kernel const void *)src, size);
13311 + }
13312 switch (size) {
13313 case 1: {
13314 u8 tmp;
13315 - __get_user_asm(tmp, (u8 __user *)src,
13316 + __get_user_asm(tmp, (const u8 __user *)src,
13317 ret, "b", "b", "=q", 1);
13318 if (likely(!ret))
13319 __put_user_asm(tmp, (u8 __user *)dst,
13320 @@ -176,7 +265,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13321 }
13322 case 2: {
13323 u16 tmp;
13324 - __get_user_asm(tmp, (u16 __user *)src,
13325 + __get_user_asm(tmp, (const u16 __user *)src,
13326 ret, "w", "w", "=r", 2);
13327 if (likely(!ret))
13328 __put_user_asm(tmp, (u16 __user *)dst,
13329 @@ -186,7 +275,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13330
13331 case 4: {
13332 u32 tmp;
13333 - __get_user_asm(tmp, (u32 __user *)src,
13334 + __get_user_asm(tmp, (const u32 __user *)src,
13335 ret, "l", "k", "=r", 4);
13336 if (likely(!ret))
13337 __put_user_asm(tmp, (u32 __user *)dst,
13338 @@ -195,7 +284,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13339 }
13340 case 8: {
13341 u64 tmp;
13342 - __get_user_asm(tmp, (u64 __user *)src,
13343 + __get_user_asm(tmp, (const u64 __user *)src,
13344 ret, "q", "", "=r", 8);
13345 if (likely(!ret))
13346 __put_user_asm(tmp, (u64 __user *)dst,
13347 @@ -203,47 +292,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13348 return ret;
13349 }
13350 default:
13351 - return copy_user_generic((__force void *)dst,
13352 - (__force void *)src, size);
13353 +
13354 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13355 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13356 + src += PAX_USER_SHADOW_BASE;
13357 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13358 + dst += PAX_USER_SHADOW_BASE;
13359 +#endif
13360 +
13361 + return copy_user_generic((__force_kernel void *)dst,
13362 + (__force_kernel const void *)src, size);
13363 }
13364 }
13365
13366 __must_check long strnlen_user(const char __user *str, long n);
13367 __must_check long __strnlen_user(const char __user *str, long n);
13368 __must_check long strlen_user(const char __user *str);
13369 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13370 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13371 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13372 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13373
13374 static __must_check __always_inline int
13375 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13376 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13377 {
13378 - return copy_user_generic(dst, (__force const void *)src, size);
13379 + if (size > INT_MAX)
13380 + return size;
13381 +
13382 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13383 + if (!__access_ok(VERIFY_READ, src, size))
13384 + return size;
13385 +
13386 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13387 + src += PAX_USER_SHADOW_BASE;
13388 +#endif
13389 +
13390 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13391 }
13392
13393 -static __must_check __always_inline int
13394 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13395 +static __must_check __always_inline unsigned long
13396 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13397 {
13398 - return copy_user_generic((__force void *)dst, src, size);
13399 + if (size > INT_MAX)
13400 + return size;
13401 +
13402 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13403 + if (!__access_ok(VERIFY_WRITE, dst, size))
13404 + return size;
13405 +
13406 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13407 + dst += PAX_USER_SHADOW_BASE;
13408 +#endif
13409 +
13410 + return copy_user_generic((__force_kernel void *)dst, src, size);
13411 }
13412
13413 -extern long __copy_user_nocache(void *dst, const void __user *src,
13414 - unsigned size, int zerorest);
13415 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13416 + unsigned long size, int zerorest) __size_overflow(3);
13417
13418 -static inline int
13419 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13420 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13421 {
13422 might_sleep();
13423 +
13424 + if (size > INT_MAX)
13425 + return size;
13426 +
13427 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13428 + if (!__access_ok(VERIFY_READ, src, size))
13429 + return size;
13430 +#endif
13431 +
13432 return __copy_user_nocache(dst, src, size, 1);
13433 }
13434
13435 -static inline int
13436 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13437 - unsigned size)
13438 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13439 + unsigned long size)
13440 {
13441 + if (size > INT_MAX)
13442 + return size;
13443 +
13444 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13445 + if (!__access_ok(VERIFY_READ, src, size))
13446 + return size;
13447 +#endif
13448 +
13449 return __copy_user_nocache(dst, src, size, 0);
13450 }
13451
13452 -unsigned long
13453 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13454 +extern unsigned long
13455 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13456
13457 #endif /* _ASM_X86_UACCESS_64_H */
13458 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13459 index bb05228..d763d5b 100644
13460 --- a/arch/x86/include/asm/vdso.h
13461 +++ b/arch/x86/include/asm/vdso.h
13462 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13463 #define VDSO32_SYMBOL(base, name) \
13464 ({ \
13465 extern const char VDSO32_##name[]; \
13466 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13467 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13468 })
13469 #endif
13470
13471 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13472 index 764b66a..ad3cfc8 100644
13473 --- a/arch/x86/include/asm/x86_init.h
13474 +++ b/arch/x86/include/asm/x86_init.h
13475 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13476 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13477 void (*find_smp_config)(void);
13478 void (*get_smp_config)(unsigned int early);
13479 -};
13480 +} __no_const;
13481
13482 /**
13483 * struct x86_init_resources - platform specific resource related ops
13484 @@ -43,7 +43,7 @@ struct x86_init_resources {
13485 void (*probe_roms)(void);
13486 void (*reserve_resources)(void);
13487 char *(*memory_setup)(void);
13488 -};
13489 +} __no_const;
13490
13491 /**
13492 * struct x86_init_irqs - platform specific interrupt setup
13493 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13494 void (*pre_vector_init)(void);
13495 void (*intr_init)(void);
13496 void (*trap_init)(void);
13497 -};
13498 +} __no_const;
13499
13500 /**
13501 * struct x86_init_oem - oem platform specific customizing functions
13502 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13503 struct x86_init_oem {
13504 void (*arch_setup)(void);
13505 void (*banner)(void);
13506 -};
13507 +} __no_const;
13508
13509 /**
13510 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13511 @@ -77,7 +77,7 @@ struct x86_init_oem {
13512 */
13513 struct x86_init_mapping {
13514 void (*pagetable_reserve)(u64 start, u64 end);
13515 -};
13516 +} __no_const;
13517
13518 /**
13519 * struct x86_init_paging - platform specific paging functions
13520 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13521 struct x86_init_paging {
13522 void (*pagetable_setup_start)(pgd_t *base);
13523 void (*pagetable_setup_done)(pgd_t *base);
13524 -};
13525 +} __no_const;
13526
13527 /**
13528 * struct x86_init_timers - platform specific timer setup
13529 @@ -102,7 +102,7 @@ struct x86_init_timers {
13530 void (*tsc_pre_init)(void);
13531 void (*timer_init)(void);
13532 void (*wallclock_init)(void);
13533 -};
13534 +} __no_const;
13535
13536 /**
13537 * struct x86_init_iommu - platform specific iommu setup
13538 @@ -110,7 +110,7 @@ struct x86_init_timers {
13539 */
13540 struct x86_init_iommu {
13541 int (*iommu_init)(void);
13542 -};
13543 +} __no_const;
13544
13545 /**
13546 * struct x86_init_pci - platform specific pci init functions
13547 @@ -124,7 +124,7 @@ struct x86_init_pci {
13548 int (*init)(void);
13549 void (*init_irq)(void);
13550 void (*fixup_irqs)(void);
13551 -};
13552 +} __no_const;
13553
13554 /**
13555 * struct x86_init_ops - functions for platform specific setup
13556 @@ -140,7 +140,7 @@ struct x86_init_ops {
13557 struct x86_init_timers timers;
13558 struct x86_init_iommu iommu;
13559 struct x86_init_pci pci;
13560 -};
13561 +} __no_const;
13562
13563 /**
13564 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13565 @@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13566 void (*setup_percpu_clockev)(void);
13567 void (*early_percpu_clock_init)(void);
13568 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13569 -};
13570 +} __no_const;
13571
13572 /**
13573 * struct x86_platform_ops - platform specific runtime functions
13574 @@ -177,7 +177,7 @@ struct x86_platform_ops {
13575 int (*i8042_detect)(void);
13576 void (*save_sched_clock_state)(void);
13577 void (*restore_sched_clock_state)(void);
13578 -};
13579 +} __no_const;
13580
13581 struct pci_dev;
13582
13583 @@ -186,7 +186,7 @@ struct x86_msi_ops {
13584 void (*teardown_msi_irq)(unsigned int irq);
13585 void (*teardown_msi_irqs)(struct pci_dev *dev);
13586 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13587 -};
13588 +} __no_const;
13589
13590 extern struct x86_init_ops x86_init;
13591 extern struct x86_cpuinit_ops x86_cpuinit;
13592 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13593 index c6ce245..ffbdab7 100644
13594 --- a/arch/x86/include/asm/xsave.h
13595 +++ b/arch/x86/include/asm/xsave.h
13596 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13597 {
13598 int err;
13599
13600 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13601 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13602 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13603 +#endif
13604 +
13605 /*
13606 * Clear the xsave header first, so that reserved fields are
13607 * initialized to zero.
13608 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13609 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13610 {
13611 int err;
13612 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13613 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13614 u32 lmask = mask;
13615 u32 hmask = mask >> 32;
13616
13617 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13618 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13619 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13620 +#endif
13621 +
13622 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13623 "2:\n"
13624 ".section .fixup,\"ax\"\n"
13625 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13626 index 6a564ac..9b1340c 100644
13627 --- a/arch/x86/kernel/acpi/realmode/Makefile
13628 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13629 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13630 $(call cc-option, -fno-stack-protector) \
13631 $(call cc-option, -mpreferred-stack-boundary=2)
13632 KBUILD_CFLAGS += $(call cc-option, -m32)
13633 +ifdef CONSTIFY_PLUGIN
13634 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13635 +endif
13636 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13637 GCOV_PROFILE := n
13638
13639 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13640 index b4fd836..4358fe3 100644
13641 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13642 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13643 @@ -108,6 +108,9 @@ wakeup_code:
13644 /* Do any other stuff... */
13645
13646 #ifndef CONFIG_64BIT
13647 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13648 + call verify_cpu
13649 +
13650 /* This could also be done in C code... */
13651 movl pmode_cr3, %eax
13652 movl %eax, %cr3
13653 @@ -131,6 +134,7 @@ wakeup_code:
13654 movl pmode_cr0, %eax
13655 movl %eax, %cr0
13656 jmp pmode_return
13657 +# include "../../verify_cpu.S"
13658 #else
13659 pushw $0
13660 pushw trampoline_segment
13661 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13662 index 146a49c..1b5338b 100644
13663 --- a/arch/x86/kernel/acpi/sleep.c
13664 +++ b/arch/x86/kernel/acpi/sleep.c
13665 @@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
13666 header->trampoline_segment = trampoline_address() >> 4;
13667 #ifdef CONFIG_SMP
13668 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13669 +
13670 + pax_open_kernel();
13671 early_gdt_descr.address =
13672 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13673 + pax_close_kernel();
13674 +
13675 initial_gs = per_cpu_offset(smp_processor_id());
13676 #endif
13677 initial_code = (unsigned long)wakeup_long64;
13678 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13679 index 7261083..5c12053 100644
13680 --- a/arch/x86/kernel/acpi/wakeup_32.S
13681 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13682 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13683 # and restore the stack ... but you need gdt for this to work
13684 movl saved_context_esp, %esp
13685
13686 - movl %cs:saved_magic, %eax
13687 - cmpl $0x12345678, %eax
13688 + cmpl $0x12345678, saved_magic
13689 jne bogus_magic
13690
13691 # jump to place where we left off
13692 - movl saved_eip, %eax
13693 - jmp *%eax
13694 + jmp *(saved_eip)
13695
13696 bogus_magic:
13697 jmp bogus_magic
13698 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13699 index 1f84794..e23f862 100644
13700 --- a/arch/x86/kernel/alternative.c
13701 +++ b/arch/x86/kernel/alternative.c
13702 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13703 */
13704 for (a = start; a < end; a++) {
13705 instr = (u8 *)&a->instr_offset + a->instr_offset;
13706 +
13707 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13708 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13709 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13710 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13711 +#endif
13712 +
13713 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13714 BUG_ON(a->replacementlen > a->instrlen);
13715 BUG_ON(a->instrlen > sizeof(insnbuf));
13716 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13717 for (poff = start; poff < end; poff++) {
13718 u8 *ptr = (u8 *)poff + *poff;
13719
13720 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13721 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13722 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13723 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13724 +#endif
13725 +
13726 if (!*poff || ptr < text || ptr >= text_end)
13727 continue;
13728 /* turn DS segment override prefix into lock prefix */
13729 - if (*ptr == 0x3e)
13730 + if (*ktla_ktva(ptr) == 0x3e)
13731 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13732 };
13733 mutex_unlock(&text_mutex);
13734 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13735 for (poff = start; poff < end; poff++) {
13736 u8 *ptr = (u8 *)poff + *poff;
13737
13738 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13739 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13740 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13741 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13742 +#endif
13743 +
13744 if (!*poff || ptr < text || ptr >= text_end)
13745 continue;
13746 /* turn lock prefix into DS segment override prefix */
13747 - if (*ptr == 0xf0)
13748 + if (*ktla_ktva(ptr) == 0xf0)
13749 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13750 };
13751 mutex_unlock(&text_mutex);
13752 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13753
13754 BUG_ON(p->len > MAX_PATCH_LEN);
13755 /* prep the buffer with the original instructions */
13756 - memcpy(insnbuf, p->instr, p->len);
13757 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13758 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13759 (unsigned long)p->instr, p->len);
13760
13761 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13762 if (smp_alt_once)
13763 free_init_pages("SMP alternatives",
13764 (unsigned long)__smp_locks,
13765 - (unsigned long)__smp_locks_end);
13766 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13767
13768 restart_nmi();
13769 }
13770 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13771 * instructions. And on the local CPU you need to be protected again NMI or MCE
13772 * handlers seeing an inconsistent instruction while you patch.
13773 */
13774 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13775 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13776 size_t len)
13777 {
13778 unsigned long flags;
13779 local_irq_save(flags);
13780 - memcpy(addr, opcode, len);
13781 +
13782 + pax_open_kernel();
13783 + memcpy(ktla_ktva(addr), opcode, len);
13784 sync_core();
13785 + pax_close_kernel();
13786 +
13787 local_irq_restore(flags);
13788 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13789 that causes hangs on some VIA CPUs. */
13790 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13791 */
13792 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13793 {
13794 - unsigned long flags;
13795 - char *vaddr;
13796 + unsigned char *vaddr = ktla_ktva(addr);
13797 struct page *pages[2];
13798 - int i;
13799 + size_t i;
13800
13801 if (!core_kernel_text((unsigned long)addr)) {
13802 - pages[0] = vmalloc_to_page(addr);
13803 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13804 + pages[0] = vmalloc_to_page(vaddr);
13805 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13806 } else {
13807 - pages[0] = virt_to_page(addr);
13808 + pages[0] = virt_to_page(vaddr);
13809 WARN_ON(!PageReserved(pages[0]));
13810 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13811 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13812 }
13813 BUG_ON(!pages[0]);
13814 - local_irq_save(flags);
13815 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13816 - if (pages[1])
13817 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13818 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13819 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13820 - clear_fixmap(FIX_TEXT_POKE0);
13821 - if (pages[1])
13822 - clear_fixmap(FIX_TEXT_POKE1);
13823 - local_flush_tlb();
13824 - sync_core();
13825 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13826 - that causes hangs on some VIA CPUs. */
13827 + text_poke_early(addr, opcode, len);
13828 for (i = 0; i < len; i++)
13829 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13830 - local_irq_restore(flags);
13831 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13832 return addr;
13833 }
13834
13835 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13836 index edc2448..553e7c5 100644
13837 --- a/arch/x86/kernel/apic/apic.c
13838 +++ b/arch/x86/kernel/apic/apic.c
13839 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13840 /*
13841 * Debug level, exported for io_apic.c
13842 */
13843 -unsigned int apic_verbosity;
13844 +int apic_verbosity;
13845
13846 int pic_mode;
13847
13848 @@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13849 apic_write(APIC_ESR, 0);
13850 v1 = apic_read(APIC_ESR);
13851 ack_APIC_irq();
13852 - atomic_inc(&irq_err_count);
13853 + atomic_inc_unchecked(&irq_err_count);
13854
13855 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13856 smp_processor_id(), v0 , v1);
13857 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13858 index e88300d..cd5a87a 100644
13859 --- a/arch/x86/kernel/apic/io_apic.c
13860 +++ b/arch/x86/kernel/apic/io_apic.c
13861 @@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
13862
13863 void __init set_io_apic_ops(const struct io_apic_ops *ops)
13864 {
13865 - io_apic_ops = *ops;
13866 + pax_open_kernel();
13867 + memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
13868 + pax_close_kernel();
13869 }
13870
13871 /*
13872 @@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13873 }
13874 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13875
13876 -void lock_vector_lock(void)
13877 +void lock_vector_lock(void) __acquires(vector_lock)
13878 {
13879 /* Used to the online set of cpus does not change
13880 * during assign_irq_vector.
13881 @@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
13882 raw_spin_lock(&vector_lock);
13883 }
13884
13885 -void unlock_vector_lock(void)
13886 +void unlock_vector_lock(void) __releases(vector_lock)
13887 {
13888 raw_spin_unlock(&vector_lock);
13889 }
13890 @@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
13891 ack_APIC_irq();
13892 }
13893
13894 -atomic_t irq_mis_count;
13895 +atomic_unchecked_t irq_mis_count;
13896
13897 #ifdef CONFIG_GENERIC_PENDING_IRQ
13898 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
13899 @@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
13900 * at the cpu.
13901 */
13902 if (!(v & (1 << (i & 0x1f)))) {
13903 - atomic_inc(&irq_mis_count);
13904 + atomic_inc_unchecked(&irq_mis_count);
13905
13906 eoi_ioapic_irq(irq, cfg);
13907 }
13908 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13909 index 459e78c..f037006 100644
13910 --- a/arch/x86/kernel/apm_32.c
13911 +++ b/arch/x86/kernel/apm_32.c
13912 @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13913 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13914 * even though they are called in protected mode.
13915 */
13916 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13917 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13918 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13919
13920 static const char driver_version[] = "1.16ac"; /* no spaces */
13921 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13922 BUG_ON(cpu != 0);
13923 gdt = get_cpu_gdt_table(cpu);
13924 save_desc_40 = gdt[0x40 / 8];
13925 +
13926 + pax_open_kernel();
13927 gdt[0x40 / 8] = bad_bios_desc;
13928 + pax_close_kernel();
13929
13930 apm_irq_save(flags);
13931 APM_DO_SAVE_SEGS;
13932 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13933 &call->esi);
13934 APM_DO_RESTORE_SEGS;
13935 apm_irq_restore(flags);
13936 +
13937 + pax_open_kernel();
13938 gdt[0x40 / 8] = save_desc_40;
13939 + pax_close_kernel();
13940 +
13941 put_cpu();
13942
13943 return call->eax & 0xff;
13944 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13945 BUG_ON(cpu != 0);
13946 gdt = get_cpu_gdt_table(cpu);
13947 save_desc_40 = gdt[0x40 / 8];
13948 +
13949 + pax_open_kernel();
13950 gdt[0x40 / 8] = bad_bios_desc;
13951 + pax_close_kernel();
13952
13953 apm_irq_save(flags);
13954 APM_DO_SAVE_SEGS;
13955 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13956 &call->eax);
13957 APM_DO_RESTORE_SEGS;
13958 apm_irq_restore(flags);
13959 +
13960 + pax_open_kernel();
13961 gdt[0x40 / 8] = save_desc_40;
13962 + pax_close_kernel();
13963 +
13964 put_cpu();
13965 return error;
13966 }
13967 @@ -2345,12 +2359,15 @@ static int __init apm_init(void)
13968 * code to that CPU.
13969 */
13970 gdt = get_cpu_gdt_table(0);
13971 +
13972 + pax_open_kernel();
13973 set_desc_base(&gdt[APM_CS >> 3],
13974 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13975 set_desc_base(&gdt[APM_CS_16 >> 3],
13976 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13977 set_desc_base(&gdt[APM_DS >> 3],
13978 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13979 + pax_close_kernel();
13980
13981 proc_create("apm", 0, NULL, &apm_file_ops);
13982
13983 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13984 index 68de2dc..1f3c720 100644
13985 --- a/arch/x86/kernel/asm-offsets.c
13986 +++ b/arch/x86/kernel/asm-offsets.c
13987 @@ -33,6 +33,8 @@ void common(void) {
13988 OFFSET(TI_status, thread_info, status);
13989 OFFSET(TI_addr_limit, thread_info, addr_limit);
13990 OFFSET(TI_preempt_count, thread_info, preempt_count);
13991 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13992 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13993
13994 BLANK();
13995 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13996 @@ -53,8 +55,26 @@ void common(void) {
13997 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13998 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13999 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14000 +
14001 +#ifdef CONFIG_PAX_KERNEXEC
14002 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14003 #endif
14004
14005 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14006 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14007 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14008 +#ifdef CONFIG_X86_64
14009 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14010 +#endif
14011 +#endif
14012 +
14013 +#endif
14014 +
14015 + BLANK();
14016 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14017 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14018 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14019 +
14020 #ifdef CONFIG_XEN
14021 BLANK();
14022 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14023 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14024 index 1b4754f..fbb4227 100644
14025 --- a/arch/x86/kernel/asm-offsets_64.c
14026 +++ b/arch/x86/kernel/asm-offsets_64.c
14027 @@ -76,6 +76,7 @@ int main(void)
14028 BLANK();
14029 #undef ENTRY
14030
14031 + DEFINE(TSS_size, sizeof(struct tss_struct));
14032 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14033 BLANK();
14034
14035 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14036 index 6ab6aa2..8f71507 100644
14037 --- a/arch/x86/kernel/cpu/Makefile
14038 +++ b/arch/x86/kernel/cpu/Makefile
14039 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14040 CFLAGS_REMOVE_perf_event.o = -pg
14041 endif
14042
14043 -# Make sure load_percpu_segment has no stackprotector
14044 -nostackp := $(call cc-option, -fno-stack-protector)
14045 -CFLAGS_common.o := $(nostackp)
14046 -
14047 obj-y := intel_cacheinfo.o scattered.o topology.o
14048 obj-y += proc.o capflags.o powerflags.o common.o
14049 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14050 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14051 index 146bb62..ac9c74a 100644
14052 --- a/arch/x86/kernel/cpu/amd.c
14053 +++ b/arch/x86/kernel/cpu/amd.c
14054 @@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14055 unsigned int size)
14056 {
14057 /* AMD errata T13 (order #21922) */
14058 - if ((c->x86 == 6)) {
14059 + if (c->x86 == 6) {
14060 /* Duron Rev A0 */
14061 if (c->x86_model == 3 && c->x86_mask == 0)
14062 size = 64;
14063 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14064 index cf79302..b1b28ae 100644
14065 --- a/arch/x86/kernel/cpu/common.c
14066 +++ b/arch/x86/kernel/cpu/common.c
14067 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14068
14069 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14070
14071 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14072 -#ifdef CONFIG_X86_64
14073 - /*
14074 - * We need valid kernel segments for data and code in long mode too
14075 - * IRET will check the segment types kkeil 2000/10/28
14076 - * Also sysret mandates a special GDT layout
14077 - *
14078 - * TLS descriptors are currently at a different place compared to i386.
14079 - * Hopefully nobody expects them at a fixed place (Wine?)
14080 - */
14081 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14082 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14083 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14084 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14085 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14086 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14087 -#else
14088 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14089 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14090 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14091 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14092 - /*
14093 - * Segments used for calling PnP BIOS have byte granularity.
14094 - * They code segments and data segments have fixed 64k limits,
14095 - * the transfer segment sizes are set at run time.
14096 - */
14097 - /* 32-bit code */
14098 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14099 - /* 16-bit code */
14100 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14101 - /* 16-bit data */
14102 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14103 - /* 16-bit data */
14104 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14105 - /* 16-bit data */
14106 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14107 - /*
14108 - * The APM segments have byte granularity and their bases
14109 - * are set at run time. All have 64k limits.
14110 - */
14111 - /* 32-bit code */
14112 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14113 - /* 16-bit code */
14114 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14115 - /* data */
14116 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14117 -
14118 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14119 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14120 - GDT_STACK_CANARY_INIT
14121 -#endif
14122 -} };
14123 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14124 -
14125 static int __init x86_xsave_setup(char *s)
14126 {
14127 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14128 @@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14129 {
14130 struct desc_ptr gdt_descr;
14131
14132 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14133 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14134 gdt_descr.size = GDT_SIZE - 1;
14135 load_gdt(&gdt_descr);
14136 /* Reload the per-cpu base */
14137 @@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14138 /* Filter out anything that depends on CPUID levels we don't have */
14139 filter_cpuid_features(c, true);
14140
14141 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14142 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14143 +#endif
14144 +
14145 /* If the model name is still unset, do table lookup. */
14146 if (!c->x86_model_id[0]) {
14147 const char *p;
14148 @@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14149 }
14150 __setup("clearcpuid=", setup_disablecpuid);
14151
14152 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14153 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14154 +
14155 #ifdef CONFIG_X86_64
14156 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14157 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14158 - (unsigned long) nmi_idt_table };
14159 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14160
14161 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14162 irq_stack_union) __aligned(PAGE_SIZE);
14163 @@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14164 EXPORT_PER_CPU_SYMBOL(current_task);
14165
14166 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14167 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14168 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14169 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14170
14171 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14172 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14173 {
14174 memset(regs, 0, sizeof(struct pt_regs));
14175 regs->fs = __KERNEL_PERCPU;
14176 - regs->gs = __KERNEL_STACK_CANARY;
14177 + savesegment(gs, regs->gs);
14178
14179 return regs;
14180 }
14181 @@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14182 int i;
14183
14184 cpu = stack_smp_processor_id();
14185 - t = &per_cpu(init_tss, cpu);
14186 + t = init_tss + cpu;
14187 oist = &per_cpu(orig_ist, cpu);
14188
14189 #ifdef CONFIG_NUMA
14190 @@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14191 switch_to_new_gdt(cpu);
14192 loadsegment(fs, 0);
14193
14194 - load_idt((const struct desc_ptr *)&idt_descr);
14195 + load_idt(&idt_descr);
14196
14197 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14198 syscall_init();
14199 @@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14200 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14201 barrier();
14202
14203 - x86_configure_nx();
14204 if (cpu != 0)
14205 enable_x2apic();
14206
14207 @@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14208 {
14209 int cpu = smp_processor_id();
14210 struct task_struct *curr = current;
14211 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14212 + struct tss_struct *t = init_tss + cpu;
14213 struct thread_struct *thread = &curr->thread;
14214
14215 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14216 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14217 index 3e6ff6c..54b4992 100644
14218 --- a/arch/x86/kernel/cpu/intel.c
14219 +++ b/arch/x86/kernel/cpu/intel.c
14220 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14221 * Update the IDT descriptor and reload the IDT so that
14222 * it uses the read-only mapped virtual address.
14223 */
14224 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14225 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14226 load_idt(&idt_descr);
14227 }
14228 #endif
14229 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14230 index 61604ae..98250a5 100644
14231 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14232 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14233 @@ -42,6 +42,7 @@
14234 #include <asm/processor.h>
14235 #include <asm/mce.h>
14236 #include <asm/msr.h>
14237 +#include <asm/local.h>
14238
14239 #include "mce-internal.h"
14240
14241 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14242 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14243 m->cs, m->ip);
14244
14245 - if (m->cs == __KERNEL_CS)
14246 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14247 print_symbol("{%s}", m->ip);
14248 pr_cont("\n");
14249 }
14250 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14251
14252 #define PANIC_TIMEOUT 5 /* 5 seconds */
14253
14254 -static atomic_t mce_paniced;
14255 +static atomic_unchecked_t mce_paniced;
14256
14257 static int fake_panic;
14258 -static atomic_t mce_fake_paniced;
14259 +static atomic_unchecked_t mce_fake_paniced;
14260
14261 /* Panic in progress. Enable interrupts and wait for final IPI */
14262 static void wait_for_panic(void)
14263 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14264 /*
14265 * Make sure only one CPU runs in machine check panic
14266 */
14267 - if (atomic_inc_return(&mce_paniced) > 1)
14268 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14269 wait_for_panic();
14270 barrier();
14271
14272 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14273 console_verbose();
14274 } else {
14275 /* Don't log too much for fake panic */
14276 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14277 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14278 return;
14279 }
14280 /* First print corrected ones that are still unlogged */
14281 @@ -684,7 +685,7 @@ static int mce_timed_out(u64 *t)
14282 * might have been modified by someone else.
14283 */
14284 rmb();
14285 - if (atomic_read(&mce_paniced))
14286 + if (atomic_read_unchecked(&mce_paniced))
14287 wait_for_panic();
14288 if (!monarch_timeout)
14289 goto out;
14290 @@ -1535,7 +1536,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14291 }
14292
14293 /* Call the installed machine check handler for this CPU setup. */
14294 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14295 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14296 unexpected_machine_check;
14297
14298 /*
14299 @@ -1558,7 +1559,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14300 return;
14301 }
14302
14303 + pax_open_kernel();
14304 machine_check_vector = do_machine_check;
14305 + pax_close_kernel();
14306
14307 __mcheck_cpu_init_generic();
14308 __mcheck_cpu_init_vendor(c);
14309 @@ -1572,7 +1575,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14310 */
14311
14312 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14313 -static int mce_chrdev_open_count; /* #times opened */
14314 +static local_t mce_chrdev_open_count; /* #times opened */
14315 static int mce_chrdev_open_exclu; /* already open exclusive? */
14316
14317 static int mce_chrdev_open(struct inode *inode, struct file *file)
14318 @@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14319 spin_lock(&mce_chrdev_state_lock);
14320
14321 if (mce_chrdev_open_exclu ||
14322 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14323 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14324 spin_unlock(&mce_chrdev_state_lock);
14325
14326 return -EBUSY;
14327 @@ -1588,7 +1591,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14328
14329 if (file->f_flags & O_EXCL)
14330 mce_chrdev_open_exclu = 1;
14331 - mce_chrdev_open_count++;
14332 + local_inc(&mce_chrdev_open_count);
14333
14334 spin_unlock(&mce_chrdev_state_lock);
14335
14336 @@ -1599,7 +1602,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14337 {
14338 spin_lock(&mce_chrdev_state_lock);
14339
14340 - mce_chrdev_open_count--;
14341 + local_dec(&mce_chrdev_open_count);
14342 mce_chrdev_open_exclu = 0;
14343
14344 spin_unlock(&mce_chrdev_state_lock);
14345 @@ -2324,7 +2327,7 @@ struct dentry *mce_get_debugfs_dir(void)
14346 static void mce_reset(void)
14347 {
14348 cpu_missing = 0;
14349 - atomic_set(&mce_fake_paniced, 0);
14350 + atomic_set_unchecked(&mce_fake_paniced, 0);
14351 atomic_set(&mce_executing, 0);
14352 atomic_set(&mce_callin, 0);
14353 atomic_set(&global_nwo, 0);
14354 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14355 index 2d5454c..51987eb 100644
14356 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14357 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14358 @@ -11,6 +11,7 @@
14359 #include <asm/processor.h>
14360 #include <asm/mce.h>
14361 #include <asm/msr.h>
14362 +#include <asm/pgtable.h>
14363
14364 /* By default disabled */
14365 int mce_p5_enabled __read_mostly;
14366 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14367 if (!cpu_has(c, X86_FEATURE_MCE))
14368 return;
14369
14370 + pax_open_kernel();
14371 machine_check_vector = pentium_machine_check;
14372 + pax_close_kernel();
14373 /* Make sure the vector pointer is visible before we enable MCEs: */
14374 wmb();
14375
14376 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14377 index 2d7998f..17c9de1 100644
14378 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14379 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14380 @@ -10,6 +10,7 @@
14381 #include <asm/processor.h>
14382 #include <asm/mce.h>
14383 #include <asm/msr.h>
14384 +#include <asm/pgtable.h>
14385
14386 /* Machine check handler for WinChip C6: */
14387 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14388 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14389 {
14390 u32 lo, hi;
14391
14392 + pax_open_kernel();
14393 machine_check_vector = winchip_machine_check;
14394 + pax_close_kernel();
14395 /* Make sure the vector pointer is visible before we enable MCEs: */
14396 wmb();
14397
14398 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14399 index 6b96110..0da73eb 100644
14400 --- a/arch/x86/kernel/cpu/mtrr/main.c
14401 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14402 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14403 u64 size_or_mask, size_and_mask;
14404 static bool mtrr_aps_delayed_init;
14405
14406 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14407 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14408
14409 const struct mtrr_ops *mtrr_if;
14410
14411 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14412 index df5e41f..816c719 100644
14413 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14414 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14415 @@ -25,7 +25,7 @@ struct mtrr_ops {
14416 int (*validate_add_page)(unsigned long base, unsigned long size,
14417 unsigned int type);
14418 int (*have_wrcomb)(void);
14419 -};
14420 +} __do_const;
14421
14422 extern int generic_get_free_region(unsigned long base, unsigned long size,
14423 int replace_reg);
14424 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14425 index bb8e034..fb9020b 100644
14426 --- a/arch/x86/kernel/cpu/perf_event.c
14427 +++ b/arch/x86/kernel/cpu/perf_event.c
14428 @@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14429 break;
14430
14431 perf_callchain_store(entry, frame.return_address);
14432 - fp = frame.next_frame;
14433 + fp = (const void __force_user *)frame.next_frame;
14434 }
14435 }
14436
14437 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14438 index 13ad899..f642b9a 100644
14439 --- a/arch/x86/kernel/crash.c
14440 +++ b/arch/x86/kernel/crash.c
14441 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14442 {
14443 #ifdef CONFIG_X86_32
14444 struct pt_regs fixed_regs;
14445 -#endif
14446
14447 -#ifdef CONFIG_X86_32
14448 - if (!user_mode_vm(regs)) {
14449 + if (!user_mode(regs)) {
14450 crash_fixup_ss_esp(&fixed_regs, regs);
14451 regs = &fixed_regs;
14452 }
14453 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14454 index 37250fe..bf2ec74 100644
14455 --- a/arch/x86/kernel/doublefault_32.c
14456 +++ b/arch/x86/kernel/doublefault_32.c
14457 @@ -11,7 +11,7 @@
14458
14459 #define DOUBLEFAULT_STACKSIZE (1024)
14460 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14461 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14462 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14463
14464 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14465
14466 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14467 unsigned long gdt, tss;
14468
14469 store_gdt(&gdt_desc);
14470 - gdt = gdt_desc.address;
14471 + gdt = (unsigned long)gdt_desc.address;
14472
14473 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14474
14475 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14476 /* 0x2 bit is always set */
14477 .flags = X86_EFLAGS_SF | 0x2,
14478 .sp = STACK_START,
14479 - .es = __USER_DS,
14480 + .es = __KERNEL_DS,
14481 .cs = __KERNEL_CS,
14482 .ss = __KERNEL_DS,
14483 - .ds = __USER_DS,
14484 + .ds = __KERNEL_DS,
14485 .fs = __KERNEL_PERCPU,
14486
14487 .__cr3 = __pa_nodebug(swapper_pg_dir),
14488 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14489 index 1b81839..0b4e7b0 100644
14490 --- a/arch/x86/kernel/dumpstack.c
14491 +++ b/arch/x86/kernel/dumpstack.c
14492 @@ -2,6 +2,9 @@
14493 * Copyright (C) 1991, 1992 Linus Torvalds
14494 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14495 */
14496 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14497 +#define __INCLUDED_BY_HIDESYM 1
14498 +#endif
14499 #include <linux/kallsyms.h>
14500 #include <linux/kprobes.h>
14501 #include <linux/uaccess.h>
14502 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14503 static void
14504 print_ftrace_graph_addr(unsigned long addr, void *data,
14505 const struct stacktrace_ops *ops,
14506 - struct thread_info *tinfo, int *graph)
14507 + struct task_struct *task, int *graph)
14508 {
14509 - struct task_struct *task;
14510 unsigned long ret_addr;
14511 int index;
14512
14513 if (addr != (unsigned long)return_to_handler)
14514 return;
14515
14516 - task = tinfo->task;
14517 index = task->curr_ret_stack;
14518
14519 if (!task->ret_stack || index < *graph)
14520 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14521 static inline void
14522 print_ftrace_graph_addr(unsigned long addr, void *data,
14523 const struct stacktrace_ops *ops,
14524 - struct thread_info *tinfo, int *graph)
14525 + struct task_struct *task, int *graph)
14526 { }
14527 #endif
14528
14529 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14530 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14531 */
14532
14533 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14534 - void *p, unsigned int size, void *end)
14535 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14536 {
14537 - void *t = tinfo;
14538 if (end) {
14539 if (p < end && p >= (end-THREAD_SIZE))
14540 return 1;
14541 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14542 }
14543
14544 unsigned long
14545 -print_context_stack(struct thread_info *tinfo,
14546 +print_context_stack(struct task_struct *task, void *stack_start,
14547 unsigned long *stack, unsigned long bp,
14548 const struct stacktrace_ops *ops, void *data,
14549 unsigned long *end, int *graph)
14550 {
14551 struct stack_frame *frame = (struct stack_frame *)bp;
14552
14553 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14554 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14555 unsigned long addr;
14556
14557 addr = *stack;
14558 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14559 } else {
14560 ops->address(data, addr, 0);
14561 }
14562 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14563 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14564 }
14565 stack++;
14566 }
14567 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14568 EXPORT_SYMBOL_GPL(print_context_stack);
14569
14570 unsigned long
14571 -print_context_stack_bp(struct thread_info *tinfo,
14572 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14573 unsigned long *stack, unsigned long bp,
14574 const struct stacktrace_ops *ops, void *data,
14575 unsigned long *end, int *graph)
14576 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14577 struct stack_frame *frame = (struct stack_frame *)bp;
14578 unsigned long *ret_addr = &frame->return_address;
14579
14580 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14581 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14582 unsigned long addr = *ret_addr;
14583
14584 if (!__kernel_text_address(addr))
14585 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14586 ops->address(data, addr, 1);
14587 frame = frame->next_frame;
14588 ret_addr = &frame->return_address;
14589 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14590 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14591 }
14592
14593 return (unsigned long)frame;
14594 @@ -189,7 +188,7 @@ void dump_stack(void)
14595
14596 bp = stack_frame(current, NULL);
14597 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14598 - current->pid, current->comm, print_tainted(),
14599 + task_pid_nr(current), current->comm, print_tainted(),
14600 init_utsname()->release,
14601 (int)strcspn(init_utsname()->version, " "),
14602 init_utsname()->version);
14603 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14604 }
14605 EXPORT_SYMBOL_GPL(oops_begin);
14606
14607 +extern void gr_handle_kernel_exploit(void);
14608 +
14609 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14610 {
14611 if (regs && kexec_should_crash(current))
14612 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14613 panic("Fatal exception in interrupt");
14614 if (panic_on_oops)
14615 panic("Fatal exception");
14616 - do_exit(signr);
14617 +
14618 + gr_handle_kernel_exploit();
14619 +
14620 + do_group_exit(signr);
14621 }
14622
14623 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14624 @@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14625
14626 show_registers(regs);
14627 #ifdef CONFIG_X86_32
14628 - if (user_mode_vm(regs)) {
14629 + if (user_mode(regs)) {
14630 sp = regs->sp;
14631 ss = regs->ss & 0xffff;
14632 } else {
14633 @@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14634 unsigned long flags = oops_begin();
14635 int sig = SIGSEGV;
14636
14637 - if (!user_mode_vm(regs))
14638 + if (!user_mode(regs))
14639 report_bug(regs->ip, regs);
14640
14641 if (__die(str, regs, err))
14642 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14643 index 88ec912..e95e935 100644
14644 --- a/arch/x86/kernel/dumpstack_32.c
14645 +++ b/arch/x86/kernel/dumpstack_32.c
14646 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14647 bp = stack_frame(task, regs);
14648
14649 for (;;) {
14650 - struct thread_info *context;
14651 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14652
14653 - context = (struct thread_info *)
14654 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14655 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14656 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14657
14658 - stack = (unsigned long *)context->previous_esp;
14659 - if (!stack)
14660 + if (stack_start == task_stack_page(task))
14661 break;
14662 + stack = *(unsigned long **)stack_start;
14663 if (ops->stack(data, "IRQ") < 0)
14664 break;
14665 touch_nmi_watchdog();
14666 @@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
14667 int i;
14668
14669 print_modules();
14670 - __show_regs(regs, !user_mode_vm(regs));
14671 + __show_regs(regs, !user_mode(regs));
14672
14673 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14674 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14675 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14676 * When in-kernel, we also print out the stack and code at the
14677 * time of the fault..
14678 */
14679 - if (!user_mode_vm(regs)) {
14680 + if (!user_mode(regs)) {
14681 unsigned int code_prologue = code_bytes * 43 / 64;
14682 unsigned int code_len = code_bytes;
14683 unsigned char c;
14684 u8 *ip;
14685 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14686
14687 printk(KERN_EMERG "Stack:\n");
14688 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14689
14690 printk(KERN_EMERG "Code: ");
14691
14692 - ip = (u8 *)regs->ip - code_prologue;
14693 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14694 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14695 /* try starting at IP */
14696 - ip = (u8 *)regs->ip;
14697 + ip = (u8 *)regs->ip + cs_base;
14698 code_len = code_len - code_prologue + 1;
14699 }
14700 for (i = 0; i < code_len; i++, ip++) {
14701 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14702 printk(KERN_CONT " Bad EIP value.");
14703 break;
14704 }
14705 - if (ip == (u8 *)regs->ip)
14706 + if (ip == (u8 *)regs->ip + cs_base)
14707 printk(KERN_CONT "<%02x> ", c);
14708 else
14709 printk(KERN_CONT "%02x ", c);
14710 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14711 {
14712 unsigned short ud2;
14713
14714 + ip = ktla_ktva(ip);
14715 if (ip < PAGE_OFFSET)
14716 return 0;
14717 if (probe_kernel_address((unsigned short *)ip, ud2))
14718 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14719
14720 return ud2 == 0x0b0f;
14721 }
14722 +
14723 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14724 +void pax_check_alloca(unsigned long size)
14725 +{
14726 + unsigned long sp = (unsigned long)&sp, stack_left;
14727 +
14728 + /* all kernel stacks are of the same size */
14729 + stack_left = sp & (THREAD_SIZE - 1);
14730 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14731 +}
14732 +EXPORT_SYMBOL(pax_check_alloca);
14733 +#endif
14734 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14735 index 17107bd..9623722 100644
14736 --- a/arch/x86/kernel/dumpstack_64.c
14737 +++ b/arch/x86/kernel/dumpstack_64.c
14738 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14739 unsigned long *irq_stack_end =
14740 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14741 unsigned used = 0;
14742 - struct thread_info *tinfo;
14743 int graph = 0;
14744 unsigned long dummy;
14745 + void *stack_start;
14746
14747 if (!task)
14748 task = current;
14749 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14750 * current stack address. If the stacks consist of nested
14751 * exceptions
14752 */
14753 - tinfo = task_thread_info(task);
14754 for (;;) {
14755 char *id;
14756 unsigned long *estack_end;
14757 +
14758 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14759 &used, &id);
14760
14761 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14762 if (ops->stack(data, id) < 0)
14763 break;
14764
14765 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14766 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14767 data, estack_end, &graph);
14768 ops->stack(data, "<EOE>");
14769 /*
14770 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14771 * second-to-last pointer (index -2 to end) in the
14772 * exception stack:
14773 */
14774 + if ((u16)estack_end[-1] != __KERNEL_DS)
14775 + goto out;
14776 stack = (unsigned long *) estack_end[-2];
14777 continue;
14778 }
14779 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14780 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14781 if (ops->stack(data, "IRQ") < 0)
14782 break;
14783 - bp = ops->walk_stack(tinfo, stack, bp,
14784 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14785 ops, data, irq_stack_end, &graph);
14786 /*
14787 * We link to the next stack (which would be
14788 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14789 /*
14790 * This handles the process stack:
14791 */
14792 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14793 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14794 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14795 +out:
14796 put_cpu();
14797 }
14798 EXPORT_SYMBOL(dump_trace);
14799 @@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14800
14801 return ud2 == 0x0b0f;
14802 }
14803 +
14804 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14805 +void pax_check_alloca(unsigned long size)
14806 +{
14807 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14808 + unsigned cpu, used;
14809 + char *id;
14810 +
14811 + /* check the process stack first */
14812 + stack_start = (unsigned long)task_stack_page(current);
14813 + stack_end = stack_start + THREAD_SIZE;
14814 + if (likely(stack_start <= sp && sp < stack_end)) {
14815 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14816 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14817 + return;
14818 + }
14819 +
14820 + cpu = get_cpu();
14821 +
14822 + /* check the irq stacks */
14823 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14824 + stack_start = stack_end - IRQ_STACK_SIZE;
14825 + if (stack_start <= sp && sp < stack_end) {
14826 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14827 + put_cpu();
14828 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14829 + return;
14830 + }
14831 +
14832 + /* check the exception stacks */
14833 + used = 0;
14834 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14835 + stack_start = stack_end - EXCEPTION_STKSZ;
14836 + if (stack_end && stack_start <= sp && sp < stack_end) {
14837 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14838 + put_cpu();
14839 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14840 + return;
14841 + }
14842 +
14843 + put_cpu();
14844 +
14845 + /* unknown stack */
14846 + BUG();
14847 +}
14848 +EXPORT_SYMBOL(pax_check_alloca);
14849 +#endif
14850 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14851 index 9b9f18b..9fcaa04 100644
14852 --- a/arch/x86/kernel/early_printk.c
14853 +++ b/arch/x86/kernel/early_printk.c
14854 @@ -7,6 +7,7 @@
14855 #include <linux/pci_regs.h>
14856 #include <linux/pci_ids.h>
14857 #include <linux/errno.h>
14858 +#include <linux/sched.h>
14859 #include <asm/io.h>
14860 #include <asm/processor.h>
14861 #include <asm/fcntl.h>
14862 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14863 index 7b784f4..db6b628 100644
14864 --- a/arch/x86/kernel/entry_32.S
14865 +++ b/arch/x86/kernel/entry_32.S
14866 @@ -179,13 +179,146 @@
14867 /*CFI_REL_OFFSET gs, PT_GS*/
14868 .endm
14869 .macro SET_KERNEL_GS reg
14870 +
14871 +#ifdef CONFIG_CC_STACKPROTECTOR
14872 movl $(__KERNEL_STACK_CANARY), \reg
14873 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14874 + movl $(__USER_DS), \reg
14875 +#else
14876 + xorl \reg, \reg
14877 +#endif
14878 +
14879 movl \reg, %gs
14880 .endm
14881
14882 #endif /* CONFIG_X86_32_LAZY_GS */
14883
14884 -.macro SAVE_ALL
14885 +.macro pax_enter_kernel
14886 +#ifdef CONFIG_PAX_KERNEXEC
14887 + call pax_enter_kernel
14888 +#endif
14889 +.endm
14890 +
14891 +.macro pax_exit_kernel
14892 +#ifdef CONFIG_PAX_KERNEXEC
14893 + call pax_exit_kernel
14894 +#endif
14895 +.endm
14896 +
14897 +#ifdef CONFIG_PAX_KERNEXEC
14898 +ENTRY(pax_enter_kernel)
14899 +#ifdef CONFIG_PARAVIRT
14900 + pushl %eax
14901 + pushl %ecx
14902 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14903 + mov %eax, %esi
14904 +#else
14905 + mov %cr0, %esi
14906 +#endif
14907 + bts $16, %esi
14908 + jnc 1f
14909 + mov %cs, %esi
14910 + cmp $__KERNEL_CS, %esi
14911 + jz 3f
14912 + ljmp $__KERNEL_CS, $3f
14913 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14914 +2:
14915 +#ifdef CONFIG_PARAVIRT
14916 + mov %esi, %eax
14917 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14918 +#else
14919 + mov %esi, %cr0
14920 +#endif
14921 +3:
14922 +#ifdef CONFIG_PARAVIRT
14923 + popl %ecx
14924 + popl %eax
14925 +#endif
14926 + ret
14927 +ENDPROC(pax_enter_kernel)
14928 +
14929 +ENTRY(pax_exit_kernel)
14930 +#ifdef CONFIG_PARAVIRT
14931 + pushl %eax
14932 + pushl %ecx
14933 +#endif
14934 + mov %cs, %esi
14935 + cmp $__KERNEXEC_KERNEL_CS, %esi
14936 + jnz 2f
14937 +#ifdef CONFIG_PARAVIRT
14938 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14939 + mov %eax, %esi
14940 +#else
14941 + mov %cr0, %esi
14942 +#endif
14943 + btr $16, %esi
14944 + ljmp $__KERNEL_CS, $1f
14945 +1:
14946 +#ifdef CONFIG_PARAVIRT
14947 + mov %esi, %eax
14948 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14949 +#else
14950 + mov %esi, %cr0
14951 +#endif
14952 +2:
14953 +#ifdef CONFIG_PARAVIRT
14954 + popl %ecx
14955 + popl %eax
14956 +#endif
14957 + ret
14958 +ENDPROC(pax_exit_kernel)
14959 +#endif
14960 +
14961 +.macro pax_erase_kstack
14962 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14963 + call pax_erase_kstack
14964 +#endif
14965 +.endm
14966 +
14967 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14968 +/*
14969 + * ebp: thread_info
14970 + * ecx, edx: can be clobbered
14971 + */
14972 +ENTRY(pax_erase_kstack)
14973 + pushl %edi
14974 + pushl %eax
14975 +
14976 + mov TI_lowest_stack(%ebp), %edi
14977 + mov $-0xBEEF, %eax
14978 + std
14979 +
14980 +1: mov %edi, %ecx
14981 + and $THREAD_SIZE_asm - 1, %ecx
14982 + shr $2, %ecx
14983 + repne scasl
14984 + jecxz 2f
14985 +
14986 + cmp $2*16, %ecx
14987 + jc 2f
14988 +
14989 + mov $2*16, %ecx
14990 + repe scasl
14991 + jecxz 2f
14992 + jne 1b
14993 +
14994 +2: cld
14995 + mov %esp, %ecx
14996 + sub %edi, %ecx
14997 + shr $2, %ecx
14998 + rep stosl
14999 +
15000 + mov TI_task_thread_sp0(%ebp), %edi
15001 + sub $128, %edi
15002 + mov %edi, TI_lowest_stack(%ebp)
15003 +
15004 + popl %eax
15005 + popl %edi
15006 + ret
15007 +ENDPROC(pax_erase_kstack)
15008 +#endif
15009 +
15010 +.macro __SAVE_ALL _DS
15011 cld
15012 PUSH_GS
15013 pushl_cfi %fs
15014 @@ -208,7 +341,7 @@
15015 CFI_REL_OFFSET ecx, 0
15016 pushl_cfi %ebx
15017 CFI_REL_OFFSET ebx, 0
15018 - movl $(__USER_DS), %edx
15019 + movl $\_DS, %edx
15020 movl %edx, %ds
15021 movl %edx, %es
15022 movl $(__KERNEL_PERCPU), %edx
15023 @@ -216,6 +349,15 @@
15024 SET_KERNEL_GS %edx
15025 .endm
15026
15027 +.macro SAVE_ALL
15028 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15029 + __SAVE_ALL __KERNEL_DS
15030 + pax_enter_kernel
15031 +#else
15032 + __SAVE_ALL __USER_DS
15033 +#endif
15034 +.endm
15035 +
15036 .macro RESTORE_INT_REGS
15037 popl_cfi %ebx
15038 CFI_RESTORE ebx
15039 @@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15040 popfl_cfi
15041 jmp syscall_exit
15042 CFI_ENDPROC
15043 -END(ret_from_fork)
15044 +ENDPROC(ret_from_fork)
15045
15046 /*
15047 * Interrupt exit functions should be protected against kprobes
15048 @@ -335,7 +477,15 @@ resume_userspace_sig:
15049 andl $SEGMENT_RPL_MASK, %eax
15050 #endif
15051 cmpl $USER_RPL, %eax
15052 +
15053 +#ifdef CONFIG_PAX_KERNEXEC
15054 + jae resume_userspace
15055 +
15056 + pax_exit_kernel
15057 + jmp resume_kernel
15058 +#else
15059 jb resume_kernel # not returning to v8086 or userspace
15060 +#endif
15061
15062 ENTRY(resume_userspace)
15063 LOCKDEP_SYS_EXIT
15064 @@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15065 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15066 # int/exception return?
15067 jne work_pending
15068 - jmp restore_all
15069 -END(ret_from_exception)
15070 + jmp restore_all_pax
15071 +ENDPROC(ret_from_exception)
15072
15073 #ifdef CONFIG_PREEMPT
15074 ENTRY(resume_kernel)
15075 @@ -363,7 +513,7 @@ need_resched:
15076 jz restore_all
15077 call preempt_schedule_irq
15078 jmp need_resched
15079 -END(resume_kernel)
15080 +ENDPROC(resume_kernel)
15081 #endif
15082 CFI_ENDPROC
15083 /*
15084 @@ -397,23 +547,34 @@ sysenter_past_esp:
15085 /*CFI_REL_OFFSET cs, 0*/
15086 /*
15087 * Push current_thread_info()->sysenter_return to the stack.
15088 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15089 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15090 */
15091 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15092 + pushl_cfi $0
15093 CFI_REL_OFFSET eip, 0
15094
15095 pushl_cfi %eax
15096 SAVE_ALL
15097 + GET_THREAD_INFO(%ebp)
15098 + movl TI_sysenter_return(%ebp),%ebp
15099 + movl %ebp,PT_EIP(%esp)
15100 ENABLE_INTERRUPTS(CLBR_NONE)
15101
15102 /*
15103 * Load the potential sixth argument from user stack.
15104 * Careful about security.
15105 */
15106 + movl PT_OLDESP(%esp),%ebp
15107 +
15108 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15109 + mov PT_OLDSS(%esp),%ds
15110 +1: movl %ds:(%ebp),%ebp
15111 + push %ss
15112 + pop %ds
15113 +#else
15114 cmpl $__PAGE_OFFSET-3,%ebp
15115 jae syscall_fault
15116 1: movl (%ebp),%ebp
15117 +#endif
15118 +
15119 movl %ebp,PT_EBP(%esp)
15120 .section __ex_table,"a"
15121 .align 4
15122 @@ -436,12 +597,24 @@ sysenter_do_call:
15123 testl $_TIF_ALLWORK_MASK, %ecx
15124 jne sysexit_audit
15125 sysenter_exit:
15126 +
15127 +#ifdef CONFIG_PAX_RANDKSTACK
15128 + pushl_cfi %eax
15129 + movl %esp, %eax
15130 + call pax_randomize_kstack
15131 + popl_cfi %eax
15132 +#endif
15133 +
15134 + pax_erase_kstack
15135 +
15136 /* if something modifies registers it must also disable sysexit */
15137 movl PT_EIP(%esp), %edx
15138 movl PT_OLDESP(%esp), %ecx
15139 xorl %ebp,%ebp
15140 TRACE_IRQS_ON
15141 1: mov PT_FS(%esp), %fs
15142 +2: mov PT_DS(%esp), %ds
15143 +3: mov PT_ES(%esp), %es
15144 PTGS_TO_GS
15145 ENABLE_INTERRUPTS_SYSEXIT
15146
15147 @@ -458,6 +631,9 @@ sysenter_audit:
15148 movl %eax,%edx /* 2nd arg: syscall number */
15149 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15150 call __audit_syscall_entry
15151 +
15152 + pax_erase_kstack
15153 +
15154 pushl_cfi %ebx
15155 movl PT_EAX(%esp),%eax /* reload syscall number */
15156 jmp sysenter_do_call
15157 @@ -483,11 +659,17 @@ sysexit_audit:
15158
15159 CFI_ENDPROC
15160 .pushsection .fixup,"ax"
15161 -2: movl $0,PT_FS(%esp)
15162 +4: movl $0,PT_FS(%esp)
15163 + jmp 1b
15164 +5: movl $0,PT_DS(%esp)
15165 + jmp 1b
15166 +6: movl $0,PT_ES(%esp)
15167 jmp 1b
15168 .section __ex_table,"a"
15169 .align 4
15170 - .long 1b,2b
15171 + .long 1b,4b
15172 + .long 2b,5b
15173 + .long 3b,6b
15174 .popsection
15175 PTGS_TO_GS_EX
15176 ENDPROC(ia32_sysenter_target)
15177 @@ -520,6 +702,15 @@ syscall_exit:
15178 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15179 jne syscall_exit_work
15180
15181 +restore_all_pax:
15182 +
15183 +#ifdef CONFIG_PAX_RANDKSTACK
15184 + movl %esp, %eax
15185 + call pax_randomize_kstack
15186 +#endif
15187 +
15188 + pax_erase_kstack
15189 +
15190 restore_all:
15191 TRACE_IRQS_IRET
15192 restore_all_notrace:
15193 @@ -579,14 +770,34 @@ ldt_ss:
15194 * compensating for the offset by changing to the ESPFIX segment with
15195 * a base address that matches for the difference.
15196 */
15197 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15198 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15199 mov %esp, %edx /* load kernel esp */
15200 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15201 mov %dx, %ax /* eax: new kernel esp */
15202 sub %eax, %edx /* offset (low word is 0) */
15203 +#ifdef CONFIG_SMP
15204 + movl PER_CPU_VAR(cpu_number), %ebx
15205 + shll $PAGE_SHIFT_asm, %ebx
15206 + addl $cpu_gdt_table, %ebx
15207 +#else
15208 + movl $cpu_gdt_table, %ebx
15209 +#endif
15210 shr $16, %edx
15211 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15212 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15213 +
15214 +#ifdef CONFIG_PAX_KERNEXEC
15215 + mov %cr0, %esi
15216 + btr $16, %esi
15217 + mov %esi, %cr0
15218 +#endif
15219 +
15220 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15221 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15222 +
15223 +#ifdef CONFIG_PAX_KERNEXEC
15224 + bts $16, %esi
15225 + mov %esi, %cr0
15226 +#endif
15227 +
15228 pushl_cfi $__ESPFIX_SS
15229 pushl_cfi %eax /* new kernel esp */
15230 /* Disable interrupts, but do not irqtrace this section: we
15231 @@ -615,38 +826,30 @@ work_resched:
15232 movl TI_flags(%ebp), %ecx
15233 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15234 # than syscall tracing?
15235 - jz restore_all
15236 + jz restore_all_pax
15237 testb $_TIF_NEED_RESCHED, %cl
15238 jnz work_resched
15239
15240 work_notifysig: # deal with pending signals and
15241 # notify-resume requests
15242 + movl %esp, %eax
15243 #ifdef CONFIG_VM86
15244 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15245 - movl %esp, %eax
15246 - jne work_notifysig_v86 # returning to kernel-space or
15247 + jz 1f # returning to kernel-space or
15248 # vm86-space
15249 - TRACE_IRQS_ON
15250 - ENABLE_INTERRUPTS(CLBR_NONE)
15251 - xorl %edx, %edx
15252 - call do_notify_resume
15253 - jmp resume_userspace_sig
15254
15255 - ALIGN
15256 -work_notifysig_v86:
15257 pushl_cfi %ecx # save ti_flags for do_notify_resume
15258 call save_v86_state # %eax contains pt_regs pointer
15259 popl_cfi %ecx
15260 movl %eax, %esp
15261 -#else
15262 - movl %esp, %eax
15263 +1:
15264 #endif
15265 TRACE_IRQS_ON
15266 ENABLE_INTERRUPTS(CLBR_NONE)
15267 xorl %edx, %edx
15268 call do_notify_resume
15269 jmp resume_userspace_sig
15270 -END(work_pending)
15271 +ENDPROC(work_pending)
15272
15273 # perform syscall exit tracing
15274 ALIGN
15275 @@ -654,11 +857,14 @@ syscall_trace_entry:
15276 movl $-ENOSYS,PT_EAX(%esp)
15277 movl %esp, %eax
15278 call syscall_trace_enter
15279 +
15280 + pax_erase_kstack
15281 +
15282 /* What it returned is what we'll actually use. */
15283 cmpl $(NR_syscalls), %eax
15284 jnae syscall_call
15285 jmp syscall_exit
15286 -END(syscall_trace_entry)
15287 +ENDPROC(syscall_trace_entry)
15288
15289 # perform syscall exit tracing
15290 ALIGN
15291 @@ -671,20 +877,24 @@ syscall_exit_work:
15292 movl %esp, %eax
15293 call syscall_trace_leave
15294 jmp resume_userspace
15295 -END(syscall_exit_work)
15296 +ENDPROC(syscall_exit_work)
15297 CFI_ENDPROC
15298
15299 RING0_INT_FRAME # can't unwind into user space anyway
15300 syscall_fault:
15301 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15302 + push %ss
15303 + pop %ds
15304 +#endif
15305 GET_THREAD_INFO(%ebp)
15306 movl $-EFAULT,PT_EAX(%esp)
15307 jmp resume_userspace
15308 -END(syscall_fault)
15309 +ENDPROC(syscall_fault)
15310
15311 syscall_badsys:
15312 movl $-ENOSYS,PT_EAX(%esp)
15313 jmp resume_userspace
15314 -END(syscall_badsys)
15315 +ENDPROC(syscall_badsys)
15316 CFI_ENDPROC
15317 /*
15318 * End of kprobes section
15319 @@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15320 CFI_ENDPROC
15321 ENDPROC(ptregs_clone)
15322
15323 + ALIGN;
15324 +ENTRY(kernel_execve)
15325 + CFI_STARTPROC
15326 + pushl_cfi %ebp
15327 + sub $PT_OLDSS+4,%esp
15328 + pushl_cfi %edi
15329 + pushl_cfi %ecx
15330 + pushl_cfi %eax
15331 + lea 3*4(%esp),%edi
15332 + mov $PT_OLDSS/4+1,%ecx
15333 + xorl %eax,%eax
15334 + rep stosl
15335 + popl_cfi %eax
15336 + popl_cfi %ecx
15337 + popl_cfi %edi
15338 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15339 + pushl_cfi %esp
15340 + call sys_execve
15341 + add $4,%esp
15342 + CFI_ADJUST_CFA_OFFSET -4
15343 + GET_THREAD_INFO(%ebp)
15344 + test %eax,%eax
15345 + jz syscall_exit
15346 + add $PT_OLDSS+4,%esp
15347 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15348 + popl_cfi %ebp
15349 + ret
15350 + CFI_ENDPROC
15351 +ENDPROC(kernel_execve)
15352 +
15353 .macro FIXUP_ESPFIX_STACK
15354 /*
15355 * Switch back for ESPFIX stack to the normal zerobased stack
15356 @@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15357 * normal stack and adjusts ESP with the matching offset.
15358 */
15359 /* fixup the stack */
15360 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15361 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15362 +#ifdef CONFIG_SMP
15363 + movl PER_CPU_VAR(cpu_number), %ebx
15364 + shll $PAGE_SHIFT_asm, %ebx
15365 + addl $cpu_gdt_table, %ebx
15366 +#else
15367 + movl $cpu_gdt_table, %ebx
15368 +#endif
15369 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15370 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15371 shl $16, %eax
15372 addl %esp, %eax /* the adjusted stack pointer */
15373 pushl_cfi $__KERNEL_DS
15374 @@ -819,7 +1066,7 @@ vector=vector+1
15375 .endr
15376 2: jmp common_interrupt
15377 .endr
15378 -END(irq_entries_start)
15379 +ENDPROC(irq_entries_start)
15380
15381 .previous
15382 END(interrupt)
15383 @@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15384 pushl_cfi $do_coprocessor_error
15385 jmp error_code
15386 CFI_ENDPROC
15387 -END(coprocessor_error)
15388 +ENDPROC(coprocessor_error)
15389
15390 ENTRY(simd_coprocessor_error)
15391 RING0_INT_FRAME
15392 @@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15393 #endif
15394 jmp error_code
15395 CFI_ENDPROC
15396 -END(simd_coprocessor_error)
15397 +ENDPROC(simd_coprocessor_error)
15398
15399 ENTRY(device_not_available)
15400 RING0_INT_FRAME
15401 @@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15402 pushl_cfi $do_device_not_available
15403 jmp error_code
15404 CFI_ENDPROC
15405 -END(device_not_available)
15406 +ENDPROC(device_not_available)
15407
15408 #ifdef CONFIG_PARAVIRT
15409 ENTRY(native_iret)
15410 @@ -905,12 +1152,12 @@ ENTRY(native_iret)
15411 .align 4
15412 .long native_iret, iret_exc
15413 .previous
15414 -END(native_iret)
15415 +ENDPROC(native_iret)
15416
15417 ENTRY(native_irq_enable_sysexit)
15418 sti
15419 sysexit
15420 -END(native_irq_enable_sysexit)
15421 +ENDPROC(native_irq_enable_sysexit)
15422 #endif
15423
15424 ENTRY(overflow)
15425 @@ -919,7 +1166,7 @@ ENTRY(overflow)
15426 pushl_cfi $do_overflow
15427 jmp error_code
15428 CFI_ENDPROC
15429 -END(overflow)
15430 +ENDPROC(overflow)
15431
15432 ENTRY(bounds)
15433 RING0_INT_FRAME
15434 @@ -927,7 +1174,7 @@ ENTRY(bounds)
15435 pushl_cfi $do_bounds
15436 jmp error_code
15437 CFI_ENDPROC
15438 -END(bounds)
15439 +ENDPROC(bounds)
15440
15441 ENTRY(invalid_op)
15442 RING0_INT_FRAME
15443 @@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15444 pushl_cfi $do_invalid_op
15445 jmp error_code
15446 CFI_ENDPROC
15447 -END(invalid_op)
15448 +ENDPROC(invalid_op)
15449
15450 ENTRY(coprocessor_segment_overrun)
15451 RING0_INT_FRAME
15452 @@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15453 pushl_cfi $do_coprocessor_segment_overrun
15454 jmp error_code
15455 CFI_ENDPROC
15456 -END(coprocessor_segment_overrun)
15457 +ENDPROC(coprocessor_segment_overrun)
15458
15459 ENTRY(invalid_TSS)
15460 RING0_EC_FRAME
15461 pushl_cfi $do_invalid_TSS
15462 jmp error_code
15463 CFI_ENDPROC
15464 -END(invalid_TSS)
15465 +ENDPROC(invalid_TSS)
15466
15467 ENTRY(segment_not_present)
15468 RING0_EC_FRAME
15469 pushl_cfi $do_segment_not_present
15470 jmp error_code
15471 CFI_ENDPROC
15472 -END(segment_not_present)
15473 +ENDPROC(segment_not_present)
15474
15475 ENTRY(stack_segment)
15476 RING0_EC_FRAME
15477 pushl_cfi $do_stack_segment
15478 jmp error_code
15479 CFI_ENDPROC
15480 -END(stack_segment)
15481 +ENDPROC(stack_segment)
15482
15483 ENTRY(alignment_check)
15484 RING0_EC_FRAME
15485 pushl_cfi $do_alignment_check
15486 jmp error_code
15487 CFI_ENDPROC
15488 -END(alignment_check)
15489 +ENDPROC(alignment_check)
15490
15491 ENTRY(divide_error)
15492 RING0_INT_FRAME
15493 @@ -979,7 +1226,7 @@ ENTRY(divide_error)
15494 pushl_cfi $do_divide_error
15495 jmp error_code
15496 CFI_ENDPROC
15497 -END(divide_error)
15498 +ENDPROC(divide_error)
15499
15500 #ifdef CONFIG_X86_MCE
15501 ENTRY(machine_check)
15502 @@ -988,7 +1235,7 @@ ENTRY(machine_check)
15503 pushl_cfi machine_check_vector
15504 jmp error_code
15505 CFI_ENDPROC
15506 -END(machine_check)
15507 +ENDPROC(machine_check)
15508 #endif
15509
15510 ENTRY(spurious_interrupt_bug)
15511 @@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15512 pushl_cfi $do_spurious_interrupt_bug
15513 jmp error_code
15514 CFI_ENDPROC
15515 -END(spurious_interrupt_bug)
15516 +ENDPROC(spurious_interrupt_bug)
15517 /*
15518 * End of kprobes section
15519 */
15520 @@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15521
15522 ENTRY(mcount)
15523 ret
15524 -END(mcount)
15525 +ENDPROC(mcount)
15526
15527 ENTRY(ftrace_caller)
15528 cmpl $0, function_trace_stop
15529 @@ -1141,7 +1388,7 @@ ftrace_graph_call:
15530 .globl ftrace_stub
15531 ftrace_stub:
15532 ret
15533 -END(ftrace_caller)
15534 +ENDPROC(ftrace_caller)
15535
15536 #else /* ! CONFIG_DYNAMIC_FTRACE */
15537
15538 @@ -1177,7 +1424,7 @@ trace:
15539 popl %ecx
15540 popl %eax
15541 jmp ftrace_stub
15542 -END(mcount)
15543 +ENDPROC(mcount)
15544 #endif /* CONFIG_DYNAMIC_FTRACE */
15545 #endif /* CONFIG_FUNCTION_TRACER */
15546
15547 @@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15548 popl %ecx
15549 popl %eax
15550 ret
15551 -END(ftrace_graph_caller)
15552 +ENDPROC(ftrace_graph_caller)
15553
15554 .globl return_to_handler
15555 return_to_handler:
15556 @@ -1253,15 +1500,18 @@ error_code:
15557 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15558 REG_TO_PTGS %ecx
15559 SET_KERNEL_GS %ecx
15560 - movl $(__USER_DS), %ecx
15561 + movl $(__KERNEL_DS), %ecx
15562 movl %ecx, %ds
15563 movl %ecx, %es
15564 +
15565 + pax_enter_kernel
15566 +
15567 TRACE_IRQS_OFF
15568 movl %esp,%eax # pt_regs pointer
15569 call *%edi
15570 jmp ret_from_exception
15571 CFI_ENDPROC
15572 -END(page_fault)
15573 +ENDPROC(page_fault)
15574
15575 /*
15576 * Debug traps and NMI can happen at the one SYSENTER instruction
15577 @@ -1303,7 +1553,7 @@ debug_stack_correct:
15578 call do_debug
15579 jmp ret_from_exception
15580 CFI_ENDPROC
15581 -END(debug)
15582 +ENDPROC(debug)
15583
15584 /*
15585 * NMI is doubly nasty. It can happen _while_ we're handling
15586 @@ -1340,6 +1590,9 @@ nmi_stack_correct:
15587 xorl %edx,%edx # zero error code
15588 movl %esp,%eax # pt_regs pointer
15589 call do_nmi
15590 +
15591 + pax_exit_kernel
15592 +
15593 jmp restore_all_notrace
15594 CFI_ENDPROC
15595
15596 @@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15597 FIXUP_ESPFIX_STACK # %eax == %esp
15598 xorl %edx,%edx # zero error code
15599 call do_nmi
15600 +
15601 + pax_exit_kernel
15602 +
15603 RESTORE_REGS
15604 lss 12+4(%esp), %esp # back to espfix stack
15605 CFI_ADJUST_CFA_OFFSET -24
15606 jmp irq_return
15607 CFI_ENDPROC
15608 -END(nmi)
15609 +ENDPROC(nmi)
15610
15611 ENTRY(int3)
15612 RING0_INT_FRAME
15613 @@ -1393,14 +1649,14 @@ ENTRY(int3)
15614 call do_int3
15615 jmp ret_from_exception
15616 CFI_ENDPROC
15617 -END(int3)
15618 +ENDPROC(int3)
15619
15620 ENTRY(general_protection)
15621 RING0_EC_FRAME
15622 pushl_cfi $do_general_protection
15623 jmp error_code
15624 CFI_ENDPROC
15625 -END(general_protection)
15626 +ENDPROC(general_protection)
15627
15628 #ifdef CONFIG_KVM_GUEST
15629 ENTRY(async_page_fault)
15630 @@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15631 pushl_cfi $do_async_page_fault
15632 jmp error_code
15633 CFI_ENDPROC
15634 -END(async_page_fault)
15635 +ENDPROC(async_page_fault)
15636 #endif
15637
15638 /*
15639 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15640 index cdc79b5..4710a75 100644
15641 --- a/arch/x86/kernel/entry_64.S
15642 +++ b/arch/x86/kernel/entry_64.S
15643 @@ -56,6 +56,8 @@
15644 #include <asm/ftrace.h>
15645 #include <asm/percpu.h>
15646 #include <linux/err.h>
15647 +#include <asm/pgtable.h>
15648 +#include <asm/alternative-asm.h>
15649
15650 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15651 #include <linux/elf-em.h>
15652 @@ -69,8 +71,9 @@
15653 #ifdef CONFIG_FUNCTION_TRACER
15654 #ifdef CONFIG_DYNAMIC_FTRACE
15655 ENTRY(mcount)
15656 + pax_force_retaddr
15657 retq
15658 -END(mcount)
15659 +ENDPROC(mcount)
15660
15661 ENTRY(ftrace_caller)
15662 cmpl $0, function_trace_stop
15663 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15664 #endif
15665
15666 GLOBAL(ftrace_stub)
15667 + pax_force_retaddr
15668 retq
15669 -END(ftrace_caller)
15670 +ENDPROC(ftrace_caller)
15671
15672 #else /* ! CONFIG_DYNAMIC_FTRACE */
15673 ENTRY(mcount)
15674 @@ -113,6 +117,7 @@ ENTRY(mcount)
15675 #endif
15676
15677 GLOBAL(ftrace_stub)
15678 + pax_force_retaddr
15679 retq
15680
15681 trace:
15682 @@ -122,12 +127,13 @@ trace:
15683 movq 8(%rbp), %rsi
15684 subq $MCOUNT_INSN_SIZE, %rdi
15685
15686 + pax_force_fptr ftrace_trace_function
15687 call *ftrace_trace_function
15688
15689 MCOUNT_RESTORE_FRAME
15690
15691 jmp ftrace_stub
15692 -END(mcount)
15693 +ENDPROC(mcount)
15694 #endif /* CONFIG_DYNAMIC_FTRACE */
15695 #endif /* CONFIG_FUNCTION_TRACER */
15696
15697 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15698
15699 MCOUNT_RESTORE_FRAME
15700
15701 + pax_force_retaddr
15702 retq
15703 -END(ftrace_graph_caller)
15704 +ENDPROC(ftrace_graph_caller)
15705
15706 GLOBAL(return_to_handler)
15707 subq $24, %rsp
15708 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15709 movq 8(%rsp), %rdx
15710 movq (%rsp), %rax
15711 addq $24, %rsp
15712 + pax_force_fptr %rdi
15713 jmp *%rdi
15714 #endif
15715
15716 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15717 ENDPROC(native_usergs_sysret64)
15718 #endif /* CONFIG_PARAVIRT */
15719
15720 + .macro ljmpq sel, off
15721 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15722 + .byte 0x48; ljmp *1234f(%rip)
15723 + .pushsection .rodata
15724 + .align 16
15725 + 1234: .quad \off; .word \sel
15726 + .popsection
15727 +#else
15728 + pushq $\sel
15729 + pushq $\off
15730 + lretq
15731 +#endif
15732 + .endm
15733 +
15734 + .macro pax_enter_kernel
15735 + pax_set_fptr_mask
15736 +#ifdef CONFIG_PAX_KERNEXEC
15737 + call pax_enter_kernel
15738 +#endif
15739 + .endm
15740 +
15741 + .macro pax_exit_kernel
15742 +#ifdef CONFIG_PAX_KERNEXEC
15743 + call pax_exit_kernel
15744 +#endif
15745 + .endm
15746 +
15747 +#ifdef CONFIG_PAX_KERNEXEC
15748 +ENTRY(pax_enter_kernel)
15749 + pushq %rdi
15750 +
15751 +#ifdef CONFIG_PARAVIRT
15752 + PV_SAVE_REGS(CLBR_RDI)
15753 +#endif
15754 +
15755 + GET_CR0_INTO_RDI
15756 + bts $16,%rdi
15757 + jnc 3f
15758 + mov %cs,%edi
15759 + cmp $__KERNEL_CS,%edi
15760 + jnz 2f
15761 +1:
15762 +
15763 +#ifdef CONFIG_PARAVIRT
15764 + PV_RESTORE_REGS(CLBR_RDI)
15765 +#endif
15766 +
15767 + popq %rdi
15768 + pax_force_retaddr
15769 + retq
15770 +
15771 +2: ljmpq __KERNEL_CS,1f
15772 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15773 +4: SET_RDI_INTO_CR0
15774 + jmp 1b
15775 +ENDPROC(pax_enter_kernel)
15776 +
15777 +ENTRY(pax_exit_kernel)
15778 + pushq %rdi
15779 +
15780 +#ifdef CONFIG_PARAVIRT
15781 + PV_SAVE_REGS(CLBR_RDI)
15782 +#endif
15783 +
15784 + mov %cs,%rdi
15785 + cmp $__KERNEXEC_KERNEL_CS,%edi
15786 + jz 2f
15787 +1:
15788 +
15789 +#ifdef CONFIG_PARAVIRT
15790 + PV_RESTORE_REGS(CLBR_RDI);
15791 +#endif
15792 +
15793 + popq %rdi
15794 + pax_force_retaddr
15795 + retq
15796 +
15797 +2: GET_CR0_INTO_RDI
15798 + btr $16,%rdi
15799 + ljmpq __KERNEL_CS,3f
15800 +3: SET_RDI_INTO_CR0
15801 + jmp 1b
15802 +#ifdef CONFIG_PARAVIRT
15803 + PV_RESTORE_REGS(CLBR_RDI);
15804 +#endif
15805 +
15806 + popq %rdi
15807 + pax_force_retaddr
15808 + retq
15809 +ENDPROC(pax_exit_kernel)
15810 +#endif
15811 +
15812 + .macro pax_enter_kernel_user
15813 + pax_set_fptr_mask
15814 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15815 + call pax_enter_kernel_user
15816 +#endif
15817 + .endm
15818 +
15819 + .macro pax_exit_kernel_user
15820 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15821 + call pax_exit_kernel_user
15822 +#endif
15823 +#ifdef CONFIG_PAX_RANDKSTACK
15824 + pushq %rax
15825 + call pax_randomize_kstack
15826 + popq %rax
15827 +#endif
15828 + .endm
15829 +
15830 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15831 +ENTRY(pax_enter_kernel_user)
15832 + pushq %rdi
15833 + pushq %rbx
15834 +
15835 +#ifdef CONFIG_PARAVIRT
15836 + PV_SAVE_REGS(CLBR_RDI)
15837 +#endif
15838 +
15839 + GET_CR3_INTO_RDI
15840 + mov %rdi,%rbx
15841 + add $__START_KERNEL_map,%rbx
15842 + sub phys_base(%rip),%rbx
15843 +
15844 +#ifdef CONFIG_PARAVIRT
15845 + pushq %rdi
15846 + cmpl $0, pv_info+PARAVIRT_enabled
15847 + jz 1f
15848 + i = 0
15849 + .rept USER_PGD_PTRS
15850 + mov i*8(%rbx),%rsi
15851 + mov $0,%sil
15852 + lea i*8(%rbx),%rdi
15853 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15854 + i = i + 1
15855 + .endr
15856 + jmp 2f
15857 +1:
15858 +#endif
15859 +
15860 + i = 0
15861 + .rept USER_PGD_PTRS
15862 + movb $0,i*8(%rbx)
15863 + i = i + 1
15864 + .endr
15865 +
15866 +#ifdef CONFIG_PARAVIRT
15867 +2: popq %rdi
15868 +#endif
15869 + SET_RDI_INTO_CR3
15870 +
15871 +#ifdef CONFIG_PAX_KERNEXEC
15872 + GET_CR0_INTO_RDI
15873 + bts $16,%rdi
15874 + SET_RDI_INTO_CR0
15875 +#endif
15876 +
15877 +#ifdef CONFIG_PARAVIRT
15878 + PV_RESTORE_REGS(CLBR_RDI)
15879 +#endif
15880 +
15881 + popq %rbx
15882 + popq %rdi
15883 + pax_force_retaddr
15884 + retq
15885 +ENDPROC(pax_enter_kernel_user)
15886 +
15887 +ENTRY(pax_exit_kernel_user)
15888 + push %rdi
15889 +
15890 +#ifdef CONFIG_PARAVIRT
15891 + pushq %rbx
15892 + PV_SAVE_REGS(CLBR_RDI)
15893 +#endif
15894 +
15895 +#ifdef CONFIG_PAX_KERNEXEC
15896 + GET_CR0_INTO_RDI
15897 + btr $16,%rdi
15898 + SET_RDI_INTO_CR0
15899 +#endif
15900 +
15901 + GET_CR3_INTO_RDI
15902 + add $__START_KERNEL_map,%rdi
15903 + sub phys_base(%rip),%rdi
15904 +
15905 +#ifdef CONFIG_PARAVIRT
15906 + cmpl $0, pv_info+PARAVIRT_enabled
15907 + jz 1f
15908 + mov %rdi,%rbx
15909 + i = 0
15910 + .rept USER_PGD_PTRS
15911 + mov i*8(%rbx),%rsi
15912 + mov $0x67,%sil
15913 + lea i*8(%rbx),%rdi
15914 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15915 + i = i + 1
15916 + .endr
15917 + jmp 2f
15918 +1:
15919 +#endif
15920 +
15921 + i = 0
15922 + .rept USER_PGD_PTRS
15923 + movb $0x67,i*8(%rdi)
15924 + i = i + 1
15925 + .endr
15926 +
15927 +#ifdef CONFIG_PARAVIRT
15928 +2: PV_RESTORE_REGS(CLBR_RDI)
15929 + popq %rbx
15930 +#endif
15931 +
15932 + popq %rdi
15933 + pax_force_retaddr
15934 + retq
15935 +ENDPROC(pax_exit_kernel_user)
15936 +#endif
15937 +
15938 +.macro pax_erase_kstack
15939 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15940 + call pax_erase_kstack
15941 +#endif
15942 +.endm
15943 +
15944 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15945 +/*
15946 + * r11: thread_info
15947 + * rcx, rdx: can be clobbered
15948 + */
15949 +ENTRY(pax_erase_kstack)
15950 + pushq %rdi
15951 + pushq %rax
15952 + pushq %r11
15953 +
15954 + GET_THREAD_INFO(%r11)
15955 + mov TI_lowest_stack(%r11), %rdi
15956 + mov $-0xBEEF, %rax
15957 + std
15958 +
15959 +1: mov %edi, %ecx
15960 + and $THREAD_SIZE_asm - 1, %ecx
15961 + shr $3, %ecx
15962 + repne scasq
15963 + jecxz 2f
15964 +
15965 + cmp $2*8, %ecx
15966 + jc 2f
15967 +
15968 + mov $2*8, %ecx
15969 + repe scasq
15970 + jecxz 2f
15971 + jne 1b
15972 +
15973 +2: cld
15974 + mov %esp, %ecx
15975 + sub %edi, %ecx
15976 +
15977 + cmp $THREAD_SIZE_asm, %rcx
15978 + jb 3f
15979 + ud2
15980 +3:
15981 +
15982 + shr $3, %ecx
15983 + rep stosq
15984 +
15985 + mov TI_task_thread_sp0(%r11), %rdi
15986 + sub $256, %rdi
15987 + mov %rdi, TI_lowest_stack(%r11)
15988 +
15989 + popq %r11
15990 + popq %rax
15991 + popq %rdi
15992 + pax_force_retaddr
15993 + ret
15994 +ENDPROC(pax_erase_kstack)
15995 +#endif
15996
15997 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15998 #ifdef CONFIG_TRACE_IRQFLAGS
15999 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
16000 .endm
16001
16002 .macro UNFAKE_STACK_FRAME
16003 - addq $8*6, %rsp
16004 - CFI_ADJUST_CFA_OFFSET -(6*8)
16005 + addq $8*6 + ARG_SKIP, %rsp
16006 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16007 .endm
16008
16009 /*
16010 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
16011 movq %rsp, %rsi
16012
16013 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16014 - testl $3, CS-RBP(%rsi)
16015 + testb $3, CS-RBP(%rsi)
16016 je 1f
16017 SWAPGS
16018 /*
16019 @@ -355,9 +639,10 @@ ENTRY(save_rest)
16020 movq_cfi r15, R15+16
16021 movq %r11, 8(%rsp) /* return address */
16022 FIXUP_TOP_OF_STACK %r11, 16
16023 + pax_force_retaddr
16024 ret
16025 CFI_ENDPROC
16026 -END(save_rest)
16027 +ENDPROC(save_rest)
16028
16029 /* save complete stack frame */
16030 .pushsection .kprobes.text, "ax"
16031 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
16032 js 1f /* negative -> in kernel */
16033 SWAPGS
16034 xorl %ebx,%ebx
16035 -1: ret
16036 +1: pax_force_retaddr_bts
16037 + ret
16038 CFI_ENDPROC
16039 -END(save_paranoid)
16040 +ENDPROC(save_paranoid)
16041 .popsection
16042
16043 /*
16044 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
16045
16046 RESTORE_REST
16047
16048 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16049 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16050 jz retint_restore_args
16051
16052 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16053 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
16054 jmp ret_from_sys_call # go to the SYSRET fastpath
16055
16056 CFI_ENDPROC
16057 -END(ret_from_fork)
16058 +ENDPROC(ret_from_fork)
16059
16060 /*
16061 * System call entry. Up to 6 arguments in registers are supported.
16062 @@ -456,7 +742,7 @@ END(ret_from_fork)
16063 ENTRY(system_call)
16064 CFI_STARTPROC simple
16065 CFI_SIGNAL_FRAME
16066 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16067 + CFI_DEF_CFA rsp,0
16068 CFI_REGISTER rip,rcx
16069 /*CFI_REGISTER rflags,r11*/
16070 SWAPGS_UNSAFE_STACK
16071 @@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
16072
16073 movq %rsp,PER_CPU_VAR(old_rsp)
16074 movq PER_CPU_VAR(kernel_stack),%rsp
16075 + SAVE_ARGS 8*6,0
16076 + pax_enter_kernel_user
16077 /*
16078 * No need to follow this irqs off/on section - it's straight
16079 * and short:
16080 */
16081 ENABLE_INTERRUPTS(CLBR_NONE)
16082 - SAVE_ARGS 8,0
16083 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16084 movq %rcx,RIP-ARGOFFSET(%rsp)
16085 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16086 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16087 + GET_THREAD_INFO(%rcx)
16088 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16089 jnz tracesys
16090 system_call_fastpath:
16091 #if __SYSCALL_MASK == ~0
16092 @@ -488,7 +776,7 @@ system_call_fastpath:
16093 cmpl $__NR_syscall_max,%eax
16094 #endif
16095 ja badsys
16096 - movq %r10,%rcx
16097 + movq R10-ARGOFFSET(%rsp),%rcx
16098 call *sys_call_table(,%rax,8) # XXX: rip relative
16099 movq %rax,RAX-ARGOFFSET(%rsp)
16100 /*
16101 @@ -502,10 +790,13 @@ sysret_check:
16102 LOCKDEP_SYS_EXIT
16103 DISABLE_INTERRUPTS(CLBR_NONE)
16104 TRACE_IRQS_OFF
16105 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16106 + GET_THREAD_INFO(%rcx)
16107 + movl TI_flags(%rcx),%edx
16108 andl %edi,%edx
16109 jnz sysret_careful
16110 CFI_REMEMBER_STATE
16111 + pax_exit_kernel_user
16112 + pax_erase_kstack
16113 /*
16114 * sysretq will re-enable interrupts:
16115 */
16116 @@ -557,14 +848,18 @@ badsys:
16117 * jump back to the normal fast path.
16118 */
16119 auditsys:
16120 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16121 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16122 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16123 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16124 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16125 movq %rax,%rsi /* 2nd arg: syscall number */
16126 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16127 call __audit_syscall_entry
16128 +
16129 + pax_erase_kstack
16130 +
16131 LOAD_ARGS 0 /* reload call-clobbered registers */
16132 + pax_set_fptr_mask
16133 jmp system_call_fastpath
16134
16135 /*
16136 @@ -585,7 +880,7 @@ sysret_audit:
16137 /* Do syscall tracing */
16138 tracesys:
16139 #ifdef CONFIG_AUDITSYSCALL
16140 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16141 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16142 jz auditsys
16143 #endif
16144 SAVE_REST
16145 @@ -593,12 +888,16 @@ tracesys:
16146 FIXUP_TOP_OF_STACK %rdi
16147 movq %rsp,%rdi
16148 call syscall_trace_enter
16149 +
16150 + pax_erase_kstack
16151 +
16152 /*
16153 * Reload arg registers from stack in case ptrace changed them.
16154 * We don't reload %rax because syscall_trace_enter() returned
16155 * the value it wants us to use in the table lookup.
16156 */
16157 LOAD_ARGS ARGOFFSET, 1
16158 + pax_set_fptr_mask
16159 RESTORE_REST
16160 #if __SYSCALL_MASK == ~0
16161 cmpq $__NR_syscall_max,%rax
16162 @@ -607,7 +906,7 @@ tracesys:
16163 cmpl $__NR_syscall_max,%eax
16164 #endif
16165 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16166 - movq %r10,%rcx /* fixup for C */
16167 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16168 call *sys_call_table(,%rax,8)
16169 movq %rax,RAX-ARGOFFSET(%rsp)
16170 /* Use IRET because user could have changed frame */
16171 @@ -628,6 +927,7 @@ GLOBAL(int_with_check)
16172 andl %edi,%edx
16173 jnz int_careful
16174 andl $~TS_COMPAT,TI_status(%rcx)
16175 + pax_erase_kstack
16176 jmp retint_swapgs
16177
16178 /* Either reschedule or signal or syscall exit tracking needed. */
16179 @@ -674,7 +974,7 @@ int_restore_rest:
16180 TRACE_IRQS_OFF
16181 jmp int_with_check
16182 CFI_ENDPROC
16183 -END(system_call)
16184 +ENDPROC(system_call)
16185
16186 /*
16187 * Certain special system calls that need to save a complete full stack frame.
16188 @@ -690,7 +990,7 @@ ENTRY(\label)
16189 call \func
16190 jmp ptregscall_common
16191 CFI_ENDPROC
16192 -END(\label)
16193 +ENDPROC(\label)
16194 .endm
16195
16196 PTREGSCALL stub_clone, sys_clone, %r8
16197 @@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
16198 movq_cfi_restore R12+8, r12
16199 movq_cfi_restore RBP+8, rbp
16200 movq_cfi_restore RBX+8, rbx
16201 + pax_force_retaddr
16202 ret $REST_SKIP /* pop extended registers */
16203 CFI_ENDPROC
16204 -END(ptregscall_common)
16205 +ENDPROC(ptregscall_common)
16206
16207 ENTRY(stub_execve)
16208 CFI_STARTPROC
16209 @@ -725,7 +1026,7 @@ ENTRY(stub_execve)
16210 RESTORE_REST
16211 jmp int_ret_from_sys_call
16212 CFI_ENDPROC
16213 -END(stub_execve)
16214 +ENDPROC(stub_execve)
16215
16216 /*
16217 * sigreturn is special because it needs to restore all registers on return.
16218 @@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16219 RESTORE_REST
16220 jmp int_ret_from_sys_call
16221 CFI_ENDPROC
16222 -END(stub_rt_sigreturn)
16223 +ENDPROC(stub_rt_sigreturn)
16224
16225 #ifdef CONFIG_X86_X32_ABI
16226 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16227 @@ -812,7 +1113,7 @@ vector=vector+1
16228 2: jmp common_interrupt
16229 .endr
16230 CFI_ENDPROC
16231 -END(irq_entries_start)
16232 +ENDPROC(irq_entries_start)
16233
16234 .previous
16235 END(interrupt)
16236 @@ -832,6 +1133,16 @@ END(interrupt)
16237 subq $ORIG_RAX-RBP, %rsp
16238 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16239 SAVE_ARGS_IRQ
16240 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16241 + testb $3, CS(%rdi)
16242 + jnz 1f
16243 + pax_enter_kernel
16244 + jmp 2f
16245 +1: pax_enter_kernel_user
16246 +2:
16247 +#else
16248 + pax_enter_kernel
16249 +#endif
16250 call \func
16251 .endm
16252
16253 @@ -863,7 +1174,7 @@ ret_from_intr:
16254
16255 exit_intr:
16256 GET_THREAD_INFO(%rcx)
16257 - testl $3,CS-ARGOFFSET(%rsp)
16258 + testb $3,CS-ARGOFFSET(%rsp)
16259 je retint_kernel
16260
16261 /* Interrupt came from user space */
16262 @@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space */
16263 * The iretq could re-enable interrupts:
16264 */
16265 DISABLE_INTERRUPTS(CLBR_ANY)
16266 + pax_exit_kernel_user
16267 TRACE_IRQS_IRETQ
16268 SWAPGS
16269 jmp restore_args
16270
16271 retint_restore_args: /* return to kernel space */
16272 DISABLE_INTERRUPTS(CLBR_ANY)
16273 + pax_exit_kernel
16274 + pax_force_retaddr RIP-ARGOFFSET
16275 /*
16276 * The iretq could re-enable interrupts:
16277 */
16278 @@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
16279 #endif
16280
16281 CFI_ENDPROC
16282 -END(common_interrupt)
16283 +ENDPROC(common_interrupt)
16284 /*
16285 * End of kprobes section
16286 */
16287 @@ -996,7 +1310,7 @@ ENTRY(\sym)
16288 interrupt \do_sym
16289 jmp ret_from_intr
16290 CFI_ENDPROC
16291 -END(\sym)
16292 +ENDPROC(\sym)
16293 .endm
16294
16295 #ifdef CONFIG_SMP
16296 @@ -1069,12 +1383,22 @@ ENTRY(\sym)
16297 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16298 call error_entry
16299 DEFAULT_FRAME 0
16300 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16301 + testb $3, CS(%rsp)
16302 + jnz 1f
16303 + pax_enter_kernel
16304 + jmp 2f
16305 +1: pax_enter_kernel_user
16306 +2:
16307 +#else
16308 + pax_enter_kernel
16309 +#endif
16310 movq %rsp,%rdi /* pt_regs pointer */
16311 xorl %esi,%esi /* no error code */
16312 call \do_sym
16313 jmp error_exit /* %ebx: no swapgs flag */
16314 CFI_ENDPROC
16315 -END(\sym)
16316 +ENDPROC(\sym)
16317 .endm
16318
16319 .macro paranoidzeroentry sym do_sym
16320 @@ -1086,15 +1410,25 @@ ENTRY(\sym)
16321 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16322 call save_paranoid
16323 TRACE_IRQS_OFF
16324 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16325 + testb $3, CS(%rsp)
16326 + jnz 1f
16327 + pax_enter_kernel
16328 + jmp 2f
16329 +1: pax_enter_kernel_user
16330 +2:
16331 +#else
16332 + pax_enter_kernel
16333 +#endif
16334 movq %rsp,%rdi /* pt_regs pointer */
16335 xorl %esi,%esi /* no error code */
16336 call \do_sym
16337 jmp paranoid_exit /* %ebx: no swapgs flag */
16338 CFI_ENDPROC
16339 -END(\sym)
16340 +ENDPROC(\sym)
16341 .endm
16342
16343 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16344 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16345 .macro paranoidzeroentry_ist sym do_sym ist
16346 ENTRY(\sym)
16347 INTR_FRAME
16348 @@ -1104,14 +1438,30 @@ ENTRY(\sym)
16349 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16350 call save_paranoid
16351 TRACE_IRQS_OFF
16352 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16353 + testb $3, CS(%rsp)
16354 + jnz 1f
16355 + pax_enter_kernel
16356 + jmp 2f
16357 +1: pax_enter_kernel_user
16358 +2:
16359 +#else
16360 + pax_enter_kernel
16361 +#endif
16362 movq %rsp,%rdi /* pt_regs pointer */
16363 xorl %esi,%esi /* no error code */
16364 +#ifdef CONFIG_SMP
16365 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16366 + lea init_tss(%r12), %r12
16367 +#else
16368 + lea init_tss(%rip), %r12
16369 +#endif
16370 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16371 call \do_sym
16372 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16373 jmp paranoid_exit /* %ebx: no swapgs flag */
16374 CFI_ENDPROC
16375 -END(\sym)
16376 +ENDPROC(\sym)
16377 .endm
16378
16379 .macro errorentry sym do_sym
16380 @@ -1122,13 +1472,23 @@ ENTRY(\sym)
16381 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16382 call error_entry
16383 DEFAULT_FRAME 0
16384 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16385 + testb $3, CS(%rsp)
16386 + jnz 1f
16387 + pax_enter_kernel
16388 + jmp 2f
16389 +1: pax_enter_kernel_user
16390 +2:
16391 +#else
16392 + pax_enter_kernel
16393 +#endif
16394 movq %rsp,%rdi /* pt_regs pointer */
16395 movq ORIG_RAX(%rsp),%rsi /* get error code */
16396 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16397 call \do_sym
16398 jmp error_exit /* %ebx: no swapgs flag */
16399 CFI_ENDPROC
16400 -END(\sym)
16401 +ENDPROC(\sym)
16402 .endm
16403
16404 /* error code is on the stack already */
16405 @@ -1141,13 +1501,23 @@ ENTRY(\sym)
16406 call save_paranoid
16407 DEFAULT_FRAME 0
16408 TRACE_IRQS_OFF
16409 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16410 + testb $3, CS(%rsp)
16411 + jnz 1f
16412 + pax_enter_kernel
16413 + jmp 2f
16414 +1: pax_enter_kernel_user
16415 +2:
16416 +#else
16417 + pax_enter_kernel
16418 +#endif
16419 movq %rsp,%rdi /* pt_regs pointer */
16420 movq ORIG_RAX(%rsp),%rsi /* get error code */
16421 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16422 call \do_sym
16423 jmp paranoid_exit /* %ebx: no swapgs flag */
16424 CFI_ENDPROC
16425 -END(\sym)
16426 +ENDPROC(\sym)
16427 .endm
16428
16429 zeroentry divide_error do_divide_error
16430 @@ -1177,9 +1547,10 @@ gs_change:
16431 2: mfence /* workaround */
16432 SWAPGS
16433 popfq_cfi
16434 + pax_force_retaddr
16435 ret
16436 CFI_ENDPROC
16437 -END(native_load_gs_index)
16438 +ENDPROC(native_load_gs_index)
16439
16440 .section __ex_table,"a"
16441 .align 8
16442 @@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
16443 * Here we are in the child and the registers are set as they were
16444 * at kernel_thread() invocation in the parent.
16445 */
16446 + pax_force_fptr %rsi
16447 call *%rsi
16448 # exit
16449 mov %eax, %edi
16450 call do_exit
16451 ud2 # padding for call trace
16452 CFI_ENDPROC
16453 -END(kernel_thread_helper)
16454 +ENDPROC(kernel_thread_helper)
16455
16456 /*
16457 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16458 @@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
16459 RESTORE_REST
16460 testq %rax,%rax
16461 je int_ret_from_sys_call
16462 - RESTORE_ARGS
16463 UNFAKE_STACK_FRAME
16464 + pax_force_retaddr
16465 ret
16466 CFI_ENDPROC
16467 -END(kernel_execve)
16468 +ENDPROC(kernel_execve)
16469
16470 /* Call softirq on interrupt stack. Interrupts are off. */
16471 ENTRY(call_softirq)
16472 @@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
16473 CFI_DEF_CFA_REGISTER rsp
16474 CFI_ADJUST_CFA_OFFSET -8
16475 decl PER_CPU_VAR(irq_count)
16476 + pax_force_retaddr
16477 ret
16478 CFI_ENDPROC
16479 -END(call_softirq)
16480 +ENDPROC(call_softirq)
16481
16482 #ifdef CONFIG_XEN
16483 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16484 @@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16485 decl PER_CPU_VAR(irq_count)
16486 jmp error_exit
16487 CFI_ENDPROC
16488 -END(xen_do_hypervisor_callback)
16489 +ENDPROC(xen_do_hypervisor_callback)
16490
16491 /*
16492 * Hypervisor uses this for application faults while it executes.
16493 @@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
16494 SAVE_ALL
16495 jmp error_exit
16496 CFI_ENDPROC
16497 -END(xen_failsafe_callback)
16498 +ENDPROC(xen_failsafe_callback)
16499
16500 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16501 xen_hvm_callback_vector xen_evtchn_do_upcall
16502 @@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
16503 TRACE_IRQS_OFF
16504 testl %ebx,%ebx /* swapgs needed? */
16505 jnz paranoid_restore
16506 - testl $3,CS(%rsp)
16507 + testb $3,CS(%rsp)
16508 jnz paranoid_userspace
16509 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16510 + pax_exit_kernel
16511 + TRACE_IRQS_IRETQ 0
16512 + SWAPGS_UNSAFE_STACK
16513 + RESTORE_ALL 8
16514 + pax_force_retaddr_bts
16515 + jmp irq_return
16516 +#endif
16517 paranoid_swapgs:
16518 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16519 + pax_exit_kernel_user
16520 +#else
16521 + pax_exit_kernel
16522 +#endif
16523 TRACE_IRQS_IRETQ 0
16524 SWAPGS_UNSAFE_STACK
16525 RESTORE_ALL 8
16526 jmp irq_return
16527 paranoid_restore:
16528 + pax_exit_kernel
16529 TRACE_IRQS_IRETQ 0
16530 RESTORE_ALL 8
16531 + pax_force_retaddr_bts
16532 jmp irq_return
16533 paranoid_userspace:
16534 GET_THREAD_INFO(%rcx)
16535 @@ -1442,7 +1830,7 @@ paranoid_schedule:
16536 TRACE_IRQS_OFF
16537 jmp paranoid_userspace
16538 CFI_ENDPROC
16539 -END(paranoid_exit)
16540 +ENDPROC(paranoid_exit)
16541
16542 /*
16543 * Exception entry point. This expects an error code/orig_rax on the stack.
16544 @@ -1469,12 +1857,13 @@ ENTRY(error_entry)
16545 movq_cfi r14, R14+8
16546 movq_cfi r15, R15+8
16547 xorl %ebx,%ebx
16548 - testl $3,CS+8(%rsp)
16549 + testb $3,CS+8(%rsp)
16550 je error_kernelspace
16551 error_swapgs:
16552 SWAPGS
16553 error_sti:
16554 TRACE_IRQS_OFF
16555 + pax_force_retaddr_bts
16556 ret
16557
16558 /*
16559 @@ -1501,7 +1890,7 @@ bstep_iret:
16560 movq %rcx,RIP+8(%rsp)
16561 jmp error_swapgs
16562 CFI_ENDPROC
16563 -END(error_entry)
16564 +ENDPROC(error_entry)
16565
16566
16567 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16568 @@ -1521,7 +1910,7 @@ ENTRY(error_exit)
16569 jnz retint_careful
16570 jmp retint_swapgs
16571 CFI_ENDPROC
16572 -END(error_exit)
16573 +ENDPROC(error_exit)
16574
16575 /*
16576 * Test if a given stack is an NMI stack or not.
16577 @@ -1579,9 +1968,11 @@ ENTRY(nmi)
16578 * If %cs was not the kernel segment, then the NMI triggered in user
16579 * space, which means it is definitely not nested.
16580 */
16581 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16582 + je 1f
16583 cmpl $__KERNEL_CS, 16(%rsp)
16584 jne first_nmi
16585 -
16586 +1:
16587 /*
16588 * Check the special variable on the stack to see if NMIs are
16589 * executing.
16590 @@ -1728,6 +2119,16 @@ end_repeat_nmi:
16591 */
16592 call save_paranoid
16593 DEFAULT_FRAME 0
16594 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16595 + testb $3, CS(%rsp)
16596 + jnz 1f
16597 + pax_enter_kernel
16598 + jmp 2f
16599 +1: pax_enter_kernel_user
16600 +2:
16601 +#else
16602 + pax_enter_kernel
16603 +#endif
16604 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16605 movq %rsp,%rdi
16606 movq $-1,%rsi
16607 @@ -1735,21 +2136,32 @@ end_repeat_nmi:
16608 testl %ebx,%ebx /* swapgs needed? */
16609 jnz nmi_restore
16610 nmi_swapgs:
16611 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16612 + pax_exit_kernel_user
16613 +#else
16614 + pax_exit_kernel
16615 +#endif
16616 SWAPGS_UNSAFE_STACK
16617 + RESTORE_ALL 8
16618 + /* Clear the NMI executing stack variable */
16619 + movq $0, 10*8(%rsp)
16620 + jmp irq_return
16621 nmi_restore:
16622 + pax_exit_kernel
16623 RESTORE_ALL 8
16624 + pax_force_retaddr_bts
16625 /* Clear the NMI executing stack variable */
16626 movq $0, 10*8(%rsp)
16627 jmp irq_return
16628 CFI_ENDPROC
16629 -END(nmi)
16630 +ENDPROC(nmi)
16631
16632 ENTRY(ignore_sysret)
16633 CFI_STARTPROC
16634 mov $-ENOSYS,%eax
16635 sysret
16636 CFI_ENDPROC
16637 -END(ignore_sysret)
16638 +ENDPROC(ignore_sysret)
16639
16640 /*
16641 * End of kprobes section
16642 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16643 index c9a281f..ce2f317 100644
16644 --- a/arch/x86/kernel/ftrace.c
16645 +++ b/arch/x86/kernel/ftrace.c
16646 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16647 static const void *mod_code_newcode; /* holds the text to write to the IP */
16648
16649 static unsigned nmi_wait_count;
16650 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16651 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16652
16653 int ftrace_arch_read_dyn_info(char *buf, int size)
16654 {
16655 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16656
16657 r = snprintf(buf, size, "%u %u",
16658 nmi_wait_count,
16659 - atomic_read(&nmi_update_count));
16660 + atomic_read_unchecked(&nmi_update_count));
16661 return r;
16662 }
16663
16664 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16665
16666 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16667 smp_rmb();
16668 + pax_open_kernel();
16669 ftrace_mod_code();
16670 - atomic_inc(&nmi_update_count);
16671 + pax_close_kernel();
16672 + atomic_inc_unchecked(&nmi_update_count);
16673 }
16674 /* Must have previous changes seen before executions */
16675 smp_mb();
16676 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16677 {
16678 unsigned char replaced[MCOUNT_INSN_SIZE];
16679
16680 + ip = ktla_ktva(ip);
16681 +
16682 /*
16683 * Note: Due to modules and __init, code can
16684 * disappear and change, we need to protect against faulting
16685 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16686 unsigned char old[MCOUNT_INSN_SIZE], *new;
16687 int ret;
16688
16689 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16690 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16691 new = ftrace_call_replace(ip, (unsigned long)func);
16692 ret = ftrace_modify_code(ip, old, new);
16693
16694 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16695 {
16696 unsigned char code[MCOUNT_INSN_SIZE];
16697
16698 + ip = ktla_ktva(ip);
16699 +
16700 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16701 return -EFAULT;
16702
16703 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16704 index 51ff186..9e77418 100644
16705 --- a/arch/x86/kernel/head32.c
16706 +++ b/arch/x86/kernel/head32.c
16707 @@ -19,6 +19,7 @@
16708 #include <asm/io_apic.h>
16709 #include <asm/bios_ebda.h>
16710 #include <asm/tlbflush.h>
16711 +#include <asm/boot.h>
16712
16713 static void __init i386_default_early_setup(void)
16714 {
16715 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16716
16717 void __init i386_start_kernel(void)
16718 {
16719 - memblock_reserve(__pa_symbol(&_text),
16720 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16721 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16722
16723 #ifdef CONFIG_BLK_DEV_INITRD
16724 /* Reserve INITRD */
16725 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16726 index ce0be7c..c41476e 100644
16727 --- a/arch/x86/kernel/head_32.S
16728 +++ b/arch/x86/kernel/head_32.S
16729 @@ -25,6 +25,12 @@
16730 /* Physical address */
16731 #define pa(X) ((X) - __PAGE_OFFSET)
16732
16733 +#ifdef CONFIG_PAX_KERNEXEC
16734 +#define ta(X) (X)
16735 +#else
16736 +#define ta(X) ((X) - __PAGE_OFFSET)
16737 +#endif
16738 +
16739 /*
16740 * References to members of the new_cpu_data structure.
16741 */
16742 @@ -54,11 +60,7 @@
16743 * and small than max_low_pfn, otherwise will waste some page table entries
16744 */
16745
16746 -#if PTRS_PER_PMD > 1
16747 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16748 -#else
16749 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16750 -#endif
16751 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16752
16753 /* Number of possible pages in the lowmem region */
16754 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16755 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16756 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16757
16758 /*
16759 + * Real beginning of normal "text" segment
16760 + */
16761 +ENTRY(stext)
16762 +ENTRY(_stext)
16763 +
16764 +/*
16765 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16766 * %esi points to the real-mode code as a 32-bit pointer.
16767 * CS and DS must be 4 GB flat segments, but we don't depend on
16768 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16769 * can.
16770 */
16771 __HEAD
16772 +
16773 +#ifdef CONFIG_PAX_KERNEXEC
16774 + jmp startup_32
16775 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16776 +.fill PAGE_SIZE-5,1,0xcc
16777 +#endif
16778 +
16779 ENTRY(startup_32)
16780 movl pa(stack_start),%ecx
16781
16782 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16783 2:
16784 leal -__PAGE_OFFSET(%ecx),%esp
16785
16786 +#ifdef CONFIG_SMP
16787 + movl $pa(cpu_gdt_table),%edi
16788 + movl $__per_cpu_load,%eax
16789 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16790 + rorl $16,%eax
16791 + movb %al,__KERNEL_PERCPU + 4(%edi)
16792 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16793 + movl $__per_cpu_end - 1,%eax
16794 + subl $__per_cpu_start,%eax
16795 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16796 +#endif
16797 +
16798 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16799 + movl $NR_CPUS,%ecx
16800 + movl $pa(cpu_gdt_table),%edi
16801 +1:
16802 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16803 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16804 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16805 + addl $PAGE_SIZE_asm,%edi
16806 + loop 1b
16807 +#endif
16808 +
16809 +#ifdef CONFIG_PAX_KERNEXEC
16810 + movl $pa(boot_gdt),%edi
16811 + movl $__LOAD_PHYSICAL_ADDR,%eax
16812 + movw %ax,__BOOT_CS + 2(%edi)
16813 + rorl $16,%eax
16814 + movb %al,__BOOT_CS + 4(%edi)
16815 + movb %ah,__BOOT_CS + 7(%edi)
16816 + rorl $16,%eax
16817 +
16818 + ljmp $(__BOOT_CS),$1f
16819 +1:
16820 +
16821 + movl $NR_CPUS,%ecx
16822 + movl $pa(cpu_gdt_table),%edi
16823 + addl $__PAGE_OFFSET,%eax
16824 +1:
16825 + movw %ax,__KERNEL_CS + 2(%edi)
16826 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16827 + rorl $16,%eax
16828 + movb %al,__KERNEL_CS + 4(%edi)
16829 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16830 + movb %ah,__KERNEL_CS + 7(%edi)
16831 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16832 + rorl $16,%eax
16833 + addl $PAGE_SIZE_asm,%edi
16834 + loop 1b
16835 +#endif
16836 +
16837 /*
16838 * Clear BSS first so that there are no surprises...
16839 */
16840 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16841 movl %eax, pa(max_pfn_mapped)
16842
16843 /* Do early initialization of the fixmap area */
16844 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16845 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16846 +#ifdef CONFIG_COMPAT_VDSO
16847 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16848 +#else
16849 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16850 +#endif
16851 #else /* Not PAE */
16852
16853 page_pde_offset = (__PAGE_OFFSET >> 20);
16854 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16855 movl %eax, pa(max_pfn_mapped)
16856
16857 /* Do early initialization of the fixmap area */
16858 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16859 - movl %eax,pa(initial_page_table+0xffc)
16860 +#ifdef CONFIG_COMPAT_VDSO
16861 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16862 +#else
16863 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16864 +#endif
16865 #endif
16866
16867 #ifdef CONFIG_PARAVIRT
16868 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16869 cmpl $num_subarch_entries, %eax
16870 jae bad_subarch
16871
16872 - movl pa(subarch_entries)(,%eax,4), %eax
16873 - subl $__PAGE_OFFSET, %eax
16874 - jmp *%eax
16875 + jmp *pa(subarch_entries)(,%eax,4)
16876
16877 bad_subarch:
16878 WEAK(lguest_entry)
16879 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16880 __INITDATA
16881
16882 subarch_entries:
16883 - .long default_entry /* normal x86/PC */
16884 - .long lguest_entry /* lguest hypervisor */
16885 - .long xen_entry /* Xen hypervisor */
16886 - .long default_entry /* Moorestown MID */
16887 + .long ta(default_entry) /* normal x86/PC */
16888 + .long ta(lguest_entry) /* lguest hypervisor */
16889 + .long ta(xen_entry) /* Xen hypervisor */
16890 + .long ta(default_entry) /* Moorestown MID */
16891 num_subarch_entries = (. - subarch_entries) / 4
16892 .previous
16893 #else
16894 @@ -312,6 +382,7 @@ default_entry:
16895 orl %edx,%eax
16896 movl %eax,%cr4
16897
16898 +#ifdef CONFIG_X86_PAE
16899 testb $X86_CR4_PAE, %al # check if PAE is enabled
16900 jz 6f
16901
16902 @@ -340,6 +411,9 @@ default_entry:
16903 /* Make changes effective */
16904 wrmsr
16905
16906 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16907 +#endif
16908 +
16909 6:
16910
16911 /*
16912 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16913 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16914 movl %eax,%ss # after changing gdt.
16915
16916 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16917 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16918 movl %eax,%ds
16919 movl %eax,%es
16920
16921 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16922 */
16923 cmpb $0,ready
16924 jne 1f
16925 - movl $gdt_page,%eax
16926 + movl $cpu_gdt_table,%eax
16927 movl $stack_canary,%ecx
16928 +#ifdef CONFIG_SMP
16929 + addl $__per_cpu_load,%ecx
16930 +#endif
16931 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16932 shrl $16, %ecx
16933 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16934 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16935 1:
16936 -#endif
16937 movl $(__KERNEL_STACK_CANARY),%eax
16938 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16939 + movl $(__USER_DS),%eax
16940 +#else
16941 + xorl %eax,%eax
16942 +#endif
16943 movl %eax,%gs
16944
16945 xorl %eax,%eax # Clear LDT
16946 @@ -558,22 +639,22 @@ early_page_fault:
16947 jmp early_fault
16948
16949 early_fault:
16950 - cld
16951 #ifdef CONFIG_PRINTK
16952 + cmpl $1,%ss:early_recursion_flag
16953 + je hlt_loop
16954 + incl %ss:early_recursion_flag
16955 + cld
16956 pusha
16957 movl $(__KERNEL_DS),%eax
16958 movl %eax,%ds
16959 movl %eax,%es
16960 - cmpl $2,early_recursion_flag
16961 - je hlt_loop
16962 - incl early_recursion_flag
16963 movl %cr2,%eax
16964 pushl %eax
16965 pushl %edx /* trapno */
16966 pushl $fault_msg
16967 call printk
16968 +; call dump_stack
16969 #endif
16970 - call dump_stack
16971 hlt_loop:
16972 hlt
16973 jmp hlt_loop
16974 @@ -581,8 +662,11 @@ hlt_loop:
16975 /* This is the default interrupt "handler" :-) */
16976 ALIGN
16977 ignore_int:
16978 - cld
16979 #ifdef CONFIG_PRINTK
16980 + cmpl $2,%ss:early_recursion_flag
16981 + je hlt_loop
16982 + incl %ss:early_recursion_flag
16983 + cld
16984 pushl %eax
16985 pushl %ecx
16986 pushl %edx
16987 @@ -591,9 +675,6 @@ ignore_int:
16988 movl $(__KERNEL_DS),%eax
16989 movl %eax,%ds
16990 movl %eax,%es
16991 - cmpl $2,early_recursion_flag
16992 - je hlt_loop
16993 - incl early_recursion_flag
16994 pushl 16(%esp)
16995 pushl 24(%esp)
16996 pushl 32(%esp)
16997 @@ -622,29 +703,43 @@ ENTRY(initial_code)
16998 /*
16999 * BSS section
17000 */
17001 -__PAGE_ALIGNED_BSS
17002 - .align PAGE_SIZE
17003 #ifdef CONFIG_X86_PAE
17004 +.section .initial_pg_pmd,"a",@progbits
17005 initial_pg_pmd:
17006 .fill 1024*KPMDS,4,0
17007 #else
17008 +.section .initial_page_table,"a",@progbits
17009 ENTRY(initial_page_table)
17010 .fill 1024,4,0
17011 #endif
17012 +.section .initial_pg_fixmap,"a",@progbits
17013 initial_pg_fixmap:
17014 .fill 1024,4,0
17015 +.section .empty_zero_page,"a",@progbits
17016 ENTRY(empty_zero_page)
17017 .fill 4096,1,0
17018 +.section .swapper_pg_dir,"a",@progbits
17019 ENTRY(swapper_pg_dir)
17020 +#ifdef CONFIG_X86_PAE
17021 + .fill 4,8,0
17022 +#else
17023 .fill 1024,4,0
17024 +#endif
17025 +
17026 +/*
17027 + * The IDT has to be page-aligned to simplify the Pentium
17028 + * F0 0F bug workaround.. We have a special link segment
17029 + * for this.
17030 + */
17031 +.section .idt,"a",@progbits
17032 +ENTRY(idt_table)
17033 + .fill 256,8,0
17034
17035 /*
17036 * This starts the data section.
17037 */
17038 #ifdef CONFIG_X86_PAE
17039 -__PAGE_ALIGNED_DATA
17040 - /* Page-aligned for the benefit of paravirt? */
17041 - .align PAGE_SIZE
17042 +.section .initial_page_table,"a",@progbits
17043 ENTRY(initial_page_table)
17044 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17045 # if KPMDS == 3
17046 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17047 # error "Kernel PMDs should be 1, 2 or 3"
17048 # endif
17049 .align PAGE_SIZE /* needs to be page-sized too */
17050 +
17051 +#ifdef CONFIG_PAX_PER_CPU_PGD
17052 +ENTRY(cpu_pgd)
17053 + .rept NR_CPUS
17054 + .fill 4,8,0
17055 + .endr
17056 +#endif
17057 +
17058 #endif
17059
17060 .data
17061 .balign 4
17062 ENTRY(stack_start)
17063 - .long init_thread_union+THREAD_SIZE
17064 + .long init_thread_union+THREAD_SIZE-8
17065
17066 +ready: .byte 0
17067 +
17068 +.section .rodata,"a",@progbits
17069 early_recursion_flag:
17070 .long 0
17071
17072 -ready: .byte 0
17073 -
17074 int_msg:
17075 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17076
17077 @@ -707,7 +811,7 @@ fault_msg:
17078 .word 0 # 32 bit align gdt_desc.address
17079 boot_gdt_descr:
17080 .word __BOOT_DS+7
17081 - .long boot_gdt - __PAGE_OFFSET
17082 + .long pa(boot_gdt)
17083
17084 .word 0 # 32-bit align idt_desc.address
17085 idt_descr:
17086 @@ -718,7 +822,7 @@ idt_descr:
17087 .word 0 # 32 bit align gdt_desc.address
17088 ENTRY(early_gdt_descr)
17089 .word GDT_ENTRIES*8-1
17090 - .long gdt_page /* Overwritten for secondary CPUs */
17091 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17092
17093 /*
17094 * The boot_gdt must mirror the equivalent in setup.S and is
17095 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17096 .align L1_CACHE_BYTES
17097 ENTRY(boot_gdt)
17098 .fill GDT_ENTRY_BOOT_CS,8,0
17099 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17100 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17101 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17102 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17103 +
17104 + .align PAGE_SIZE_asm
17105 +ENTRY(cpu_gdt_table)
17106 + .rept NR_CPUS
17107 + .quad 0x0000000000000000 /* NULL descriptor */
17108 + .quad 0x0000000000000000 /* 0x0b reserved */
17109 + .quad 0x0000000000000000 /* 0x13 reserved */
17110 + .quad 0x0000000000000000 /* 0x1b reserved */
17111 +
17112 +#ifdef CONFIG_PAX_KERNEXEC
17113 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17114 +#else
17115 + .quad 0x0000000000000000 /* 0x20 unused */
17116 +#endif
17117 +
17118 + .quad 0x0000000000000000 /* 0x28 unused */
17119 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17120 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17121 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17122 + .quad 0x0000000000000000 /* 0x4b reserved */
17123 + .quad 0x0000000000000000 /* 0x53 reserved */
17124 + .quad 0x0000000000000000 /* 0x5b reserved */
17125 +
17126 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17127 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17128 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17129 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17130 +
17131 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17132 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17133 +
17134 + /*
17135 + * Segments used for calling PnP BIOS have byte granularity.
17136 + * The code segments and data segments have fixed 64k limits,
17137 + * the transfer segment sizes are set at run time.
17138 + */
17139 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17140 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17141 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17142 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17143 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17144 +
17145 + /*
17146 + * The APM segments have byte granularity and their bases
17147 + * are set at run time. All have 64k limits.
17148 + */
17149 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17150 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17151 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17152 +
17153 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17154 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17155 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17156 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17157 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17158 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17159 +
17160 + /* Be sure this is zeroed to avoid false validations in Xen */
17161 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17162 + .endr
17163 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17164 index 40f4eb3..6d24d9d 100644
17165 --- a/arch/x86/kernel/head_64.S
17166 +++ b/arch/x86/kernel/head_64.S
17167 @@ -19,6 +19,8 @@
17168 #include <asm/cache.h>
17169 #include <asm/processor-flags.h>
17170 #include <asm/percpu.h>
17171 +#include <asm/cpufeature.h>
17172 +#include <asm/alternative-asm.h>
17173
17174 #ifdef CONFIG_PARAVIRT
17175 #include <asm/asm-offsets.h>
17176 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17177 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17178 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17179 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17180 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17181 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17182 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17183 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17184 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17185 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17186
17187 .text
17188 __HEAD
17189 @@ -85,35 +93,23 @@ startup_64:
17190 */
17191 addq %rbp, init_level4_pgt + 0(%rip)
17192 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17193 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17194 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17195 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17196 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17197
17198 addq %rbp, level3_ident_pgt + 0(%rip)
17199 +#ifndef CONFIG_XEN
17200 + addq %rbp, level3_ident_pgt + 8(%rip)
17201 +#endif
17202
17203 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17204 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17205 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17206 +
17207 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17208 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17209
17210 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17211 -
17212 - /* Add an Identity mapping if I am above 1G */
17213 - leaq _text(%rip), %rdi
17214 - andq $PMD_PAGE_MASK, %rdi
17215 -
17216 - movq %rdi, %rax
17217 - shrq $PUD_SHIFT, %rax
17218 - andq $(PTRS_PER_PUD - 1), %rax
17219 - jz ident_complete
17220 -
17221 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17222 - leaq level3_ident_pgt(%rip), %rbx
17223 - movq %rdx, 0(%rbx, %rax, 8)
17224 -
17225 - movq %rdi, %rax
17226 - shrq $PMD_SHIFT, %rax
17227 - andq $(PTRS_PER_PMD - 1), %rax
17228 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17229 - leaq level2_spare_pgt(%rip), %rbx
17230 - movq %rdx, 0(%rbx, %rax, 8)
17231 -ident_complete:
17232 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17233
17234 /*
17235 * Fixup the kernel text+data virtual addresses. Note that
17236 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17237 * after the boot processor executes this code.
17238 */
17239
17240 - /* Enable PAE mode and PGE */
17241 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17242 + /* Enable PAE mode and PSE/PGE */
17243 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17244 movq %rax, %cr4
17245
17246 /* Setup early boot stage 4 level pagetables. */
17247 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17248 movl $MSR_EFER, %ecx
17249 rdmsr
17250 btsl $_EFER_SCE, %eax /* Enable System Call */
17251 - btl $20,%edi /* No Execute supported? */
17252 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17253 jnc 1f
17254 btsl $_EFER_NX, %eax
17255 + leaq init_level4_pgt(%rip), %rdi
17256 +#ifndef CONFIG_EFI
17257 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17258 +#endif
17259 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17260 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17261 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17262 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17263 1: wrmsr /* Make changes effective */
17264
17265 /* Setup cr0 */
17266 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17267 * jump. In addition we need to ensure %cs is set so we make this
17268 * a far return.
17269 */
17270 + pax_set_fptr_mask
17271 movq initial_code(%rip),%rax
17272 pushq $0 # fake return address to stop unwinder
17273 pushq $__KERNEL_CS # set correct cs
17274 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17275 bad_address:
17276 jmp bad_address
17277
17278 - .section ".init.text","ax"
17279 + __INIT
17280 #ifdef CONFIG_EARLY_PRINTK
17281 .globl early_idt_handlers
17282 early_idt_handlers:
17283 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17284 #endif /* EARLY_PRINTK */
17285 1: hlt
17286 jmp 1b
17287 + .previous
17288
17289 #ifdef CONFIG_EARLY_PRINTK
17290 + __INITDATA
17291 early_recursion_flag:
17292 .long 0
17293 + .previous
17294
17295 + .section .rodata,"a",@progbits
17296 early_idt_msg:
17297 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17298 early_idt_ripmsg:
17299 .asciz "RIP %s\n"
17300 + .previous
17301 #endif /* CONFIG_EARLY_PRINTK */
17302 - .previous
17303
17304 + .section .rodata,"a",@progbits
17305 #define NEXT_PAGE(name) \
17306 .balign PAGE_SIZE; \
17307 ENTRY(name)
17308 @@ -338,7 +348,6 @@ ENTRY(name)
17309 i = i + 1 ; \
17310 .endr
17311
17312 - .data
17313 /*
17314 * This default setting generates an ident mapping at address 0x100000
17315 * and a mapping for the kernel that precisely maps virtual address
17316 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17317 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17318 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17319 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17320 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17321 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17322 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17323 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17324 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17325 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17326 .org init_level4_pgt + L4_START_KERNEL*8, 0
17327 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17328 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17329
17330 +#ifdef CONFIG_PAX_PER_CPU_PGD
17331 +NEXT_PAGE(cpu_pgd)
17332 + .rept NR_CPUS
17333 + .fill 512,8,0
17334 + .endr
17335 +#endif
17336 +
17337 NEXT_PAGE(level3_ident_pgt)
17338 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17339 +#ifdef CONFIG_XEN
17340 .fill 511,8,0
17341 +#else
17342 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17343 + .fill 510,8,0
17344 +#endif
17345 +
17346 +NEXT_PAGE(level3_vmalloc_start_pgt)
17347 + .fill 512,8,0
17348 +
17349 +NEXT_PAGE(level3_vmalloc_end_pgt)
17350 + .fill 512,8,0
17351 +
17352 +NEXT_PAGE(level3_vmemmap_pgt)
17353 + .fill L3_VMEMMAP_START,8,0
17354 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17355
17356 NEXT_PAGE(level3_kernel_pgt)
17357 .fill L3_START_KERNEL,8,0
17358 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17359 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17360 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17361
17362 +NEXT_PAGE(level2_vmemmap_pgt)
17363 + .fill 512,8,0
17364 +
17365 NEXT_PAGE(level2_fixmap_pgt)
17366 - .fill 506,8,0
17367 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17368 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17369 - .fill 5,8,0
17370 + .fill 507,8,0
17371 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17372 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17373 + .fill 4,8,0
17374
17375 -NEXT_PAGE(level1_fixmap_pgt)
17376 +NEXT_PAGE(level1_vsyscall_pgt)
17377 .fill 512,8,0
17378
17379 -NEXT_PAGE(level2_ident_pgt)
17380 - /* Since I easily can, map the first 1G.
17381 + /* Since I easily can, map the first 2G.
17382 * Don't set NX because code runs from these pages.
17383 */
17384 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17385 +NEXT_PAGE(level2_ident_pgt)
17386 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17387
17388 NEXT_PAGE(level2_kernel_pgt)
17389 /*
17390 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17391 * If you want to increase this then increase MODULES_VADDR
17392 * too.)
17393 */
17394 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17395 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17396 -
17397 -NEXT_PAGE(level2_spare_pgt)
17398 - .fill 512, 8, 0
17399 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17400
17401 #undef PMDS
17402 #undef NEXT_PAGE
17403
17404 - .data
17405 + .align PAGE_SIZE
17406 +ENTRY(cpu_gdt_table)
17407 + .rept NR_CPUS
17408 + .quad 0x0000000000000000 /* NULL descriptor */
17409 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17410 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17411 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17412 + .quad 0x00cffb000000ffff /* __USER32_CS */
17413 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17414 + .quad 0x00affb000000ffff /* __USER_CS */
17415 +
17416 +#ifdef CONFIG_PAX_KERNEXEC
17417 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17418 +#else
17419 + .quad 0x0 /* unused */
17420 +#endif
17421 +
17422 + .quad 0,0 /* TSS */
17423 + .quad 0,0 /* LDT */
17424 + .quad 0,0,0 /* three TLS descriptors */
17425 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17426 + /* asm/segment.h:GDT_ENTRIES must match this */
17427 +
17428 + /* zero the remaining page */
17429 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17430 + .endr
17431 +
17432 .align 16
17433 .globl early_gdt_descr
17434 early_gdt_descr:
17435 .word GDT_ENTRIES*8-1
17436 early_gdt_descr_base:
17437 - .quad INIT_PER_CPU_VAR(gdt_page)
17438 + .quad cpu_gdt_table
17439
17440 ENTRY(phys_base)
17441 /* This must match the first entry in level2_kernel_pgt */
17442 .quad 0x0000000000000000
17443
17444 #include "../../x86/xen/xen-head.S"
17445 -
17446 - .section .bss, "aw", @nobits
17447 +
17448 + .section .rodata,"a",@progbits
17449 .align L1_CACHE_BYTES
17450 ENTRY(idt_table)
17451 - .skip IDT_ENTRIES * 16
17452 + .fill 512,8,0
17453
17454 .align L1_CACHE_BYTES
17455 ENTRY(nmi_idt_table)
17456 - .skip IDT_ENTRIES * 16
17457 + .fill 512,8,0
17458
17459 __PAGE_ALIGNED_BSS
17460 .align PAGE_SIZE
17461 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17462 index 9c3bd4a..e1d9b35 100644
17463 --- a/arch/x86/kernel/i386_ksyms_32.c
17464 +++ b/arch/x86/kernel/i386_ksyms_32.c
17465 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17466 EXPORT_SYMBOL(cmpxchg8b_emu);
17467 #endif
17468
17469 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17470 +
17471 /* Networking helper routines. */
17472 EXPORT_SYMBOL(csum_partial_copy_generic);
17473 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17474 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17475
17476 EXPORT_SYMBOL(__get_user_1);
17477 EXPORT_SYMBOL(__get_user_2);
17478 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17479
17480 EXPORT_SYMBOL(csum_partial);
17481 EXPORT_SYMBOL(empty_zero_page);
17482 +
17483 +#ifdef CONFIG_PAX_KERNEXEC
17484 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17485 +#endif
17486 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17487 index 2d6e649..df6e1af 100644
17488 --- a/arch/x86/kernel/i387.c
17489 +++ b/arch/x86/kernel/i387.c
17490 @@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17491 static inline bool interrupted_user_mode(void)
17492 {
17493 struct pt_regs *regs = get_irq_regs();
17494 - return regs && user_mode_vm(regs);
17495 + return regs && user_mode(regs);
17496 }
17497
17498 /*
17499 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17500 index 36d1853..bf25736 100644
17501 --- a/arch/x86/kernel/i8259.c
17502 +++ b/arch/x86/kernel/i8259.c
17503 @@ -209,7 +209,7 @@ spurious_8259A_irq:
17504 "spurious 8259A interrupt: IRQ%d.\n", irq);
17505 spurious_irq_mask |= irqmask;
17506 }
17507 - atomic_inc(&irq_err_count);
17508 + atomic_inc_unchecked(&irq_err_count);
17509 /*
17510 * Theoretically we do not have to handle this IRQ,
17511 * but in Linux this does not cause problems and is
17512 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17513 index 43e9ccf..44ccf6f 100644
17514 --- a/arch/x86/kernel/init_task.c
17515 +++ b/arch/x86/kernel/init_task.c
17516 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17517 * way process stacks are handled. This is done by having a special
17518 * "init_task" linker map entry..
17519 */
17520 -union thread_union init_thread_union __init_task_data =
17521 - { INIT_THREAD_INFO(init_task) };
17522 +union thread_union init_thread_union __init_task_data;
17523
17524 /*
17525 * Initial task structure.
17526 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17527 * section. Since TSS's are completely CPU-local, we want them
17528 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17529 */
17530 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17531 -
17532 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17533 +EXPORT_SYMBOL(init_tss);
17534 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17535 index 8c96897..be66bfa 100644
17536 --- a/arch/x86/kernel/ioport.c
17537 +++ b/arch/x86/kernel/ioport.c
17538 @@ -6,6 +6,7 @@
17539 #include <linux/sched.h>
17540 #include <linux/kernel.h>
17541 #include <linux/capability.h>
17542 +#include <linux/security.h>
17543 #include <linux/errno.h>
17544 #include <linux/types.h>
17545 #include <linux/ioport.h>
17546 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17547
17548 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17549 return -EINVAL;
17550 +#ifdef CONFIG_GRKERNSEC_IO
17551 + if (turn_on && grsec_disable_privio) {
17552 + gr_handle_ioperm();
17553 + return -EPERM;
17554 + }
17555 +#endif
17556 if (turn_on && !capable(CAP_SYS_RAWIO))
17557 return -EPERM;
17558
17559 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17560 * because the ->io_bitmap_max value must match the bitmap
17561 * contents:
17562 */
17563 - tss = &per_cpu(init_tss, get_cpu());
17564 + tss = init_tss + get_cpu();
17565
17566 if (turn_on)
17567 bitmap_clear(t->io_bitmap_ptr, from, num);
17568 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17569 return -EINVAL;
17570 /* Trying to gain more privileges? */
17571 if (level > old) {
17572 +#ifdef CONFIG_GRKERNSEC_IO
17573 + if (grsec_disable_privio) {
17574 + gr_handle_iopl();
17575 + return -EPERM;
17576 + }
17577 +#endif
17578 if (!capable(CAP_SYS_RAWIO))
17579 return -EPERM;
17580 }
17581 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17582 index 3dafc60..aa8e9c4 100644
17583 --- a/arch/x86/kernel/irq.c
17584 +++ b/arch/x86/kernel/irq.c
17585 @@ -18,7 +18,7 @@
17586 #include <asm/mce.h>
17587 #include <asm/hw_irq.h>
17588
17589 -atomic_t irq_err_count;
17590 +atomic_unchecked_t irq_err_count;
17591
17592 /* Function pointer for generic interrupt vector handling */
17593 void (*x86_platform_ipi_callback)(void) = NULL;
17594 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17595 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17596 seq_printf(p, " Machine check polls\n");
17597 #endif
17598 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17599 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17600 #if defined(CONFIG_X86_IO_APIC)
17601 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17602 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17603 #endif
17604 return 0;
17605 }
17606 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17607
17608 u64 arch_irq_stat(void)
17609 {
17610 - u64 sum = atomic_read(&irq_err_count);
17611 + u64 sum = atomic_read_unchecked(&irq_err_count);
17612
17613 #ifdef CONFIG_X86_IO_APIC
17614 - sum += atomic_read(&irq_mis_count);
17615 + sum += atomic_read_unchecked(&irq_mis_count);
17616 #endif
17617 return sum;
17618 }
17619 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17620 index 58b7f27..e112d08 100644
17621 --- a/arch/x86/kernel/irq_32.c
17622 +++ b/arch/x86/kernel/irq_32.c
17623 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17624 __asm__ __volatile__("andl %%esp,%0" :
17625 "=r" (sp) : "0" (THREAD_SIZE - 1));
17626
17627 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17628 + return sp < STACK_WARN;
17629 }
17630
17631 static void print_stack_overflow(void)
17632 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17633 * per-CPU IRQ handling contexts (thread information and stack)
17634 */
17635 union irq_ctx {
17636 - struct thread_info tinfo;
17637 - u32 stack[THREAD_SIZE/sizeof(u32)];
17638 + unsigned long previous_esp;
17639 + u32 stack[THREAD_SIZE/sizeof(u32)];
17640 } __attribute__((aligned(THREAD_SIZE)));
17641
17642 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17643 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17644 static inline int
17645 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17646 {
17647 - union irq_ctx *curctx, *irqctx;
17648 + union irq_ctx *irqctx;
17649 u32 *isp, arg1, arg2;
17650
17651 - curctx = (union irq_ctx *) current_thread_info();
17652 irqctx = __this_cpu_read(hardirq_ctx);
17653
17654 /*
17655 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17656 * handler) we can't do that and just have to keep using the
17657 * current stack (which is the irq stack already after all)
17658 */
17659 - if (unlikely(curctx == irqctx))
17660 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17661 return 0;
17662
17663 /* build the stack frame on the IRQ stack */
17664 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17665 - irqctx->tinfo.task = curctx->tinfo.task;
17666 - irqctx->tinfo.previous_esp = current_stack_pointer;
17667 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17668 + irqctx->previous_esp = current_stack_pointer;
17669
17670 - /* Copy the preempt_count so that the [soft]irq checks work. */
17671 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17673 + __set_fs(MAKE_MM_SEG(0));
17674 +#endif
17675
17676 if (unlikely(overflow))
17677 call_on_stack(print_stack_overflow, isp);
17678 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17679 : "0" (irq), "1" (desc), "2" (isp),
17680 "D" (desc->handle_irq)
17681 : "memory", "cc", "ecx");
17682 +
17683 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17684 + __set_fs(current_thread_info()->addr_limit);
17685 +#endif
17686 +
17687 return 1;
17688 }
17689
17690 @@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17691 */
17692 void __cpuinit irq_ctx_init(int cpu)
17693 {
17694 - union irq_ctx *irqctx;
17695 -
17696 if (per_cpu(hardirq_ctx, cpu))
17697 return;
17698
17699 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17700 - THREAD_FLAGS,
17701 - THREAD_ORDER));
17702 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17703 - irqctx->tinfo.cpu = cpu;
17704 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17705 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17706 -
17707 - per_cpu(hardirq_ctx, cpu) = irqctx;
17708 -
17709 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17710 - THREAD_FLAGS,
17711 - THREAD_ORDER));
17712 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17713 - irqctx->tinfo.cpu = cpu;
17714 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17715 -
17716 - per_cpu(softirq_ctx, cpu) = irqctx;
17717 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17718 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17719
17720 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17721 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17722 @@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17723 asmlinkage void do_softirq(void)
17724 {
17725 unsigned long flags;
17726 - struct thread_info *curctx;
17727 union irq_ctx *irqctx;
17728 u32 *isp;
17729
17730 @@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
17731 local_irq_save(flags);
17732
17733 if (local_softirq_pending()) {
17734 - curctx = current_thread_info();
17735 irqctx = __this_cpu_read(softirq_ctx);
17736 - irqctx->tinfo.task = curctx->task;
17737 - irqctx->tinfo.previous_esp = current_stack_pointer;
17738 + irqctx->previous_esp = current_stack_pointer;
17739
17740 /* build the stack frame on the softirq stack */
17741 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17742 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17743 +
17744 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17745 + __set_fs(MAKE_MM_SEG(0));
17746 +#endif
17747
17748 call_on_stack(__do_softirq, isp);
17749 +
17750 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17751 + __set_fs(current_thread_info()->addr_limit);
17752 +#endif
17753 +
17754 /*
17755 * Shouldn't happen, we returned above if in_interrupt():
17756 */
17757 @@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17758 if (unlikely(!desc))
17759 return false;
17760
17761 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17762 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17763 if (unlikely(overflow))
17764 print_stack_overflow();
17765 desc->handle_irq(irq, desc);
17766 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17767 index d04d3ec..ea4b374 100644
17768 --- a/arch/x86/kernel/irq_64.c
17769 +++ b/arch/x86/kernel/irq_64.c
17770 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17771 u64 estack_top, estack_bottom;
17772 u64 curbase = (u64)task_stack_page(current);
17773
17774 - if (user_mode_vm(regs))
17775 + if (user_mode(regs))
17776 return;
17777
17778 if (regs->sp >= curbase + sizeof(struct thread_info) +
17779 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17780 index 1d5d31e..ab846ed 100644
17781 --- a/arch/x86/kernel/kdebugfs.c
17782 +++ b/arch/x86/kernel/kdebugfs.c
17783 @@ -28,6 +28,8 @@ struct setup_data_node {
17784 };
17785
17786 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17787 + size_t count, loff_t *ppos) __size_overflow(3);
17788 +static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17789 size_t count, loff_t *ppos)
17790 {
17791 struct setup_data_node *node = file->private_data;
17792 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17793 index 8bfb614..2b3b35f 100644
17794 --- a/arch/x86/kernel/kgdb.c
17795 +++ b/arch/x86/kernel/kgdb.c
17796 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17797 #ifdef CONFIG_X86_32
17798 switch (regno) {
17799 case GDB_SS:
17800 - if (!user_mode_vm(regs))
17801 + if (!user_mode(regs))
17802 *(unsigned long *)mem = __KERNEL_DS;
17803 break;
17804 case GDB_SP:
17805 - if (!user_mode_vm(regs))
17806 + if (!user_mode(regs))
17807 *(unsigned long *)mem = kernel_stack_pointer(regs);
17808 break;
17809 case GDB_GS:
17810 @@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17811 case 'k':
17812 /* clear the trace bit */
17813 linux_regs->flags &= ~X86_EFLAGS_TF;
17814 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17815 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17816
17817 /* set the trace bit if we're stepping */
17818 if (remcomInBuffer[0] == 's') {
17819 linux_regs->flags |= X86_EFLAGS_TF;
17820 - atomic_set(&kgdb_cpu_doing_single_step,
17821 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17822 raw_smp_processor_id());
17823 }
17824
17825 @@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17826
17827 switch (cmd) {
17828 case DIE_DEBUG:
17829 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17830 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17831 if (user_mode(regs))
17832 return single_step_cont(regs, args);
17833 break;
17834 diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17835 index c5e410e..da6aaf9 100644
17836 --- a/arch/x86/kernel/kprobes-opt.c
17837 +++ b/arch/x86/kernel/kprobes-opt.c
17838 @@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17839 * Verify if the address gap is in 2GB range, because this uses
17840 * a relative jump.
17841 */
17842 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17843 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17844 if (abs(rel) > 0x7fffffff)
17845 return -ERANGE;
17846
17847 @@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17848 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17849
17850 /* Set probe function call */
17851 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17852 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17853
17854 /* Set returning jmp instruction at the tail of out-of-line buffer */
17855 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17856 - (u8 *)op->kp.addr + op->optinsn.size);
17857 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17858
17859 flush_icache_range((unsigned long) buf,
17860 (unsigned long) buf + TMPL_END_IDX +
17861 @@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17862 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17863
17864 /* Backup instructions which will be replaced by jump address */
17865 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17866 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17867 RELATIVE_ADDR_SIZE);
17868
17869 insn_buf[0] = RELATIVEJUMP_OPCODE;
17870 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17871 index e213fc8..d783ba4 100644
17872 --- a/arch/x86/kernel/kprobes.c
17873 +++ b/arch/x86/kernel/kprobes.c
17874 @@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17875 } __attribute__((packed)) *insn;
17876
17877 insn = (struct __arch_relative_insn *)from;
17878 +
17879 + pax_open_kernel();
17880 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17881 insn->op = op;
17882 + pax_close_kernel();
17883 }
17884
17885 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17886 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17887 kprobe_opcode_t opcode;
17888 kprobe_opcode_t *orig_opcodes = opcodes;
17889
17890 - if (search_exception_tables((unsigned long)opcodes))
17891 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17892 return 0; /* Page fault may occur on this address. */
17893
17894 retry:
17895 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17896 /* Another subsystem puts a breakpoint, failed to recover */
17897 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17898 return 0;
17899 + pax_open_kernel();
17900 memcpy(dest, insn.kaddr, insn.length);
17901 + pax_close_kernel();
17902
17903 #ifdef CONFIG_X86_64
17904 if (insn_rip_relative(&insn)) {
17905 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17906 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17907 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17908 disp = (u8 *) dest + insn_offset_displacement(&insn);
17909 + pax_open_kernel();
17910 *(s32 *) disp = (s32) newdisp;
17911 + pax_close_kernel();
17912 }
17913 #endif
17914 return insn.length;
17915 @@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17916 * nor set current_kprobe, because it doesn't use single
17917 * stepping.
17918 */
17919 - regs->ip = (unsigned long)p->ainsn.insn;
17920 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17921 preempt_enable_no_resched();
17922 return;
17923 }
17924 @@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17925 if (p->opcode == BREAKPOINT_INSTRUCTION)
17926 regs->ip = (unsigned long)p->addr;
17927 else
17928 - regs->ip = (unsigned long)p->ainsn.insn;
17929 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17930 }
17931
17932 /*
17933 @@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17934 setup_singlestep(p, regs, kcb, 0);
17935 return 1;
17936 }
17937 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17938 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17939 /*
17940 * The breakpoint instruction was removed right
17941 * after we hit it. Another cpu has removed
17942 @@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17943 " movq %rax, 152(%rsp)\n"
17944 RESTORE_REGS_STRING
17945 " popfq\n"
17946 +#ifdef KERNEXEC_PLUGIN
17947 + " btsq $63,(%rsp)\n"
17948 +#endif
17949 #else
17950 " pushf\n"
17951 SAVE_REGS_STRING
17952 @@ -765,7 +775,7 @@ static void __kprobes
17953 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17954 {
17955 unsigned long *tos = stack_addr(regs);
17956 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17957 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17958 unsigned long orig_ip = (unsigned long)p->addr;
17959 kprobe_opcode_t *insn = p->ainsn.insn;
17960
17961 @@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
17962 struct die_args *args = data;
17963 int ret = NOTIFY_DONE;
17964
17965 - if (args->regs && user_mode_vm(args->regs))
17966 + if (args->regs && user_mode(args->regs))
17967 return ret;
17968
17969 switch (val) {
17970 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17971 index ebc9873..1b9724b 100644
17972 --- a/arch/x86/kernel/ldt.c
17973 +++ b/arch/x86/kernel/ldt.c
17974 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17975 if (reload) {
17976 #ifdef CONFIG_SMP
17977 preempt_disable();
17978 - load_LDT(pc);
17979 + load_LDT_nolock(pc);
17980 if (!cpumask_equal(mm_cpumask(current->mm),
17981 cpumask_of(smp_processor_id())))
17982 smp_call_function(flush_ldt, current->mm, 1);
17983 preempt_enable();
17984 #else
17985 - load_LDT(pc);
17986 + load_LDT_nolock(pc);
17987 #endif
17988 }
17989 if (oldsize) {
17990 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17991 return err;
17992
17993 for (i = 0; i < old->size; i++)
17994 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17995 + write_ldt_entry(new->ldt, i, old->ldt + i);
17996 return 0;
17997 }
17998
17999 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18000 retval = copy_ldt(&mm->context, &old_mm->context);
18001 mutex_unlock(&old_mm->context.lock);
18002 }
18003 +
18004 + if (tsk == current) {
18005 + mm->context.vdso = 0;
18006 +
18007 +#ifdef CONFIG_X86_32
18008 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18009 + mm->context.user_cs_base = 0UL;
18010 + mm->context.user_cs_limit = ~0UL;
18011 +
18012 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18013 + cpus_clear(mm->context.cpu_user_cs_mask);
18014 +#endif
18015 +
18016 +#endif
18017 +#endif
18018 +
18019 + }
18020 +
18021 return retval;
18022 }
18023
18024 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18025 }
18026 }
18027
18028 +#ifdef CONFIG_PAX_SEGMEXEC
18029 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18030 + error = -EINVAL;
18031 + goto out_unlock;
18032 + }
18033 +#endif
18034 +
18035 fill_ldt(&ldt, &ldt_info);
18036 if (oldmode)
18037 ldt.avl = 0;
18038 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18039 index 5b19e4d..6476a76 100644
18040 --- a/arch/x86/kernel/machine_kexec_32.c
18041 +++ b/arch/x86/kernel/machine_kexec_32.c
18042 @@ -26,7 +26,7 @@
18043 #include <asm/cacheflush.h>
18044 #include <asm/debugreg.h>
18045
18046 -static void set_idt(void *newidt, __u16 limit)
18047 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18048 {
18049 struct desc_ptr curidt;
18050
18051 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18052 }
18053
18054
18055 -static void set_gdt(void *newgdt, __u16 limit)
18056 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18057 {
18058 struct desc_ptr curgdt;
18059
18060 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18061 }
18062
18063 control_page = page_address(image->control_code_page);
18064 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18065 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18066
18067 relocate_kernel_ptr = control_page;
18068 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18069 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18070 index 0327e2b..e43737b 100644
18071 --- a/arch/x86/kernel/microcode_intel.c
18072 +++ b/arch/x86/kernel/microcode_intel.c
18073 @@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18074
18075 static int get_ucode_user(void *to, const void *from, size_t n)
18076 {
18077 - return copy_from_user(to, from, n);
18078 + return copy_from_user(to, (const void __force_user *)from, n);
18079 }
18080
18081 static enum ucode_state
18082 request_microcode_user(int cpu, const void __user *buf, size_t size)
18083 {
18084 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18085 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18086 }
18087
18088 static void microcode_fini_cpu(int cpu)
18089 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18090 index f21fd94..61565cd 100644
18091 --- a/arch/x86/kernel/module.c
18092 +++ b/arch/x86/kernel/module.c
18093 @@ -35,15 +35,60 @@
18094 #define DEBUGP(fmt...)
18095 #endif
18096
18097 -void *module_alloc(unsigned long size)
18098 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18099 {
18100 - if (PAGE_ALIGN(size) > MODULES_LEN)
18101 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18102 return NULL;
18103 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18104 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18105 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18106 -1, __builtin_return_address(0));
18107 }
18108
18109 +void *module_alloc(unsigned long size)
18110 +{
18111 +
18112 +#ifdef CONFIG_PAX_KERNEXEC
18113 + return __module_alloc(size, PAGE_KERNEL);
18114 +#else
18115 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18116 +#endif
18117 +
18118 +}
18119 +
18120 +#ifdef CONFIG_PAX_KERNEXEC
18121 +#ifdef CONFIG_X86_32
18122 +void *module_alloc_exec(unsigned long size)
18123 +{
18124 + struct vm_struct *area;
18125 +
18126 + if (size == 0)
18127 + return NULL;
18128 +
18129 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18130 + return area ? area->addr : NULL;
18131 +}
18132 +EXPORT_SYMBOL(module_alloc_exec);
18133 +
18134 +void module_free_exec(struct module *mod, void *module_region)
18135 +{
18136 + vunmap(module_region);
18137 +}
18138 +EXPORT_SYMBOL(module_free_exec);
18139 +#else
18140 +void module_free_exec(struct module *mod, void *module_region)
18141 +{
18142 + module_free(mod, module_region);
18143 +}
18144 +EXPORT_SYMBOL(module_free_exec);
18145 +
18146 +void *module_alloc_exec(unsigned long size)
18147 +{
18148 + return __module_alloc(size, PAGE_KERNEL_RX);
18149 +}
18150 +EXPORT_SYMBOL(module_alloc_exec);
18151 +#endif
18152 +#endif
18153 +
18154 #ifdef CONFIG_X86_32
18155 int apply_relocate(Elf32_Shdr *sechdrs,
18156 const char *strtab,
18157 @@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18158 unsigned int i;
18159 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18160 Elf32_Sym *sym;
18161 - uint32_t *location;
18162 + uint32_t *plocation, location;
18163
18164 DEBUGP("Applying relocate section %u to %u\n", relsec,
18165 sechdrs[relsec].sh_info);
18166 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18167 /* This is where to make the change */
18168 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18169 - + rel[i].r_offset;
18170 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18171 + location = (uint32_t)plocation;
18172 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18173 + plocation = ktla_ktva((void *)plocation);
18174 /* This is the symbol it is referring to. Note that all
18175 undefined symbols have been resolved. */
18176 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18177 @@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18178 switch (ELF32_R_TYPE(rel[i].r_info)) {
18179 case R_386_32:
18180 /* We add the value into the location given */
18181 - *location += sym->st_value;
18182 + pax_open_kernel();
18183 + *plocation += sym->st_value;
18184 + pax_close_kernel();
18185 break;
18186 case R_386_PC32:
18187 /* Add the value, subtract its postition */
18188 - *location += sym->st_value - (uint32_t)location;
18189 + pax_open_kernel();
18190 + *plocation += sym->st_value - location;
18191 + pax_close_kernel();
18192 break;
18193 default:
18194 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18195 @@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18196 case R_X86_64_NONE:
18197 break;
18198 case R_X86_64_64:
18199 + pax_open_kernel();
18200 *(u64 *)loc = val;
18201 + pax_close_kernel();
18202 break;
18203 case R_X86_64_32:
18204 + pax_open_kernel();
18205 *(u32 *)loc = val;
18206 + pax_close_kernel();
18207 if (val != *(u32 *)loc)
18208 goto overflow;
18209 break;
18210 case R_X86_64_32S:
18211 + pax_open_kernel();
18212 *(s32 *)loc = val;
18213 + pax_close_kernel();
18214 if ((s64)val != *(s32 *)loc)
18215 goto overflow;
18216 break;
18217 case R_X86_64_PC32:
18218 val -= (u64)loc;
18219 + pax_open_kernel();
18220 *(u32 *)loc = val;
18221 + pax_close_kernel();
18222 +
18223 #if 0
18224 if ((s64)val != *(s32 *)loc)
18225 goto overflow;
18226 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18227 index 32856fa..ce95eaa 100644
18228 --- a/arch/x86/kernel/nmi.c
18229 +++ b/arch/x86/kernel/nmi.c
18230 @@ -507,6 +507,17 @@ static inline void nmi_nesting_postprocess(void)
18231 dotraplinkage notrace __kprobes void
18232 do_nmi(struct pt_regs *regs, long error_code)
18233 {
18234 +
18235 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18236 + if (!user_mode(regs)) {
18237 + unsigned long cs = regs->cs & 0xFFFF;
18238 + unsigned long ip = ktva_ktla(regs->ip);
18239 +
18240 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18241 + regs->ip = ip;
18242 + }
18243 +#endif
18244 +
18245 nmi_nesting_preprocess(regs);
18246
18247 nmi_enter();
18248 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18249 index 676b8c7..870ba04 100644
18250 --- a/arch/x86/kernel/paravirt-spinlocks.c
18251 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18252 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18253 arch_spin_lock(lock);
18254 }
18255
18256 -struct pv_lock_ops pv_lock_ops = {
18257 +struct pv_lock_ops pv_lock_ops __read_only = {
18258 #ifdef CONFIG_SMP
18259 .spin_is_locked = __ticket_spin_is_locked,
18260 .spin_is_contended = __ticket_spin_is_contended,
18261 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18262 index ab13760..01218e0 100644
18263 --- a/arch/x86/kernel/paravirt.c
18264 +++ b/arch/x86/kernel/paravirt.c
18265 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18266 {
18267 return x;
18268 }
18269 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18270 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18271 +#endif
18272
18273 void __init default_banner(void)
18274 {
18275 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18276 if (opfunc == NULL)
18277 /* If there's no function, patch it with a ud2a (BUG) */
18278 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18279 - else if (opfunc == _paravirt_nop)
18280 + else if (opfunc == (void *)_paravirt_nop)
18281 /* If the operation is a nop, then nop the callsite */
18282 ret = paravirt_patch_nop();
18283
18284 /* identity functions just return their single argument */
18285 - else if (opfunc == _paravirt_ident_32)
18286 + else if (opfunc == (void *)_paravirt_ident_32)
18287 ret = paravirt_patch_ident_32(insnbuf, len);
18288 - else if (opfunc == _paravirt_ident_64)
18289 + else if (opfunc == (void *)_paravirt_ident_64)
18290 ret = paravirt_patch_ident_64(insnbuf, len);
18291 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18292 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18293 + ret = paravirt_patch_ident_64(insnbuf, len);
18294 +#endif
18295
18296 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18297 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18298 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18299 if (insn_len > len || start == NULL)
18300 insn_len = len;
18301 else
18302 - memcpy(insnbuf, start, insn_len);
18303 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18304
18305 return insn_len;
18306 }
18307 @@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18308 preempt_enable();
18309 }
18310
18311 -struct pv_info pv_info = {
18312 +struct pv_info pv_info __read_only = {
18313 .name = "bare hardware",
18314 .paravirt_enabled = 0,
18315 .kernel_rpl = 0,
18316 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
18317 #endif
18318 };
18319
18320 -struct pv_init_ops pv_init_ops = {
18321 +struct pv_init_ops pv_init_ops __read_only = {
18322 .patch = native_patch,
18323 };
18324
18325 -struct pv_time_ops pv_time_ops = {
18326 +struct pv_time_ops pv_time_ops __read_only = {
18327 .sched_clock = native_sched_clock,
18328 .steal_clock = native_steal_clock,
18329 };
18330
18331 -struct pv_irq_ops pv_irq_ops = {
18332 +struct pv_irq_ops pv_irq_ops __read_only = {
18333 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18334 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18335 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18336 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18337 #endif
18338 };
18339
18340 -struct pv_cpu_ops pv_cpu_ops = {
18341 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18342 .cpuid = native_cpuid,
18343 .get_debugreg = native_get_debugreg,
18344 .set_debugreg = native_set_debugreg,
18345 @@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18346 .end_context_switch = paravirt_nop,
18347 };
18348
18349 -struct pv_apic_ops pv_apic_ops = {
18350 +struct pv_apic_ops pv_apic_ops __read_only = {
18351 #ifdef CONFIG_X86_LOCAL_APIC
18352 .startup_ipi_hook = paravirt_nop,
18353 #endif
18354 };
18355
18356 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18357 +#ifdef CONFIG_X86_32
18358 +#ifdef CONFIG_X86_PAE
18359 +/* 64-bit pagetable entries */
18360 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18361 +#else
18362 /* 32-bit pagetable entries */
18363 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18364 +#endif
18365 #else
18366 /* 64-bit pagetable entries */
18367 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18368 #endif
18369
18370 -struct pv_mmu_ops pv_mmu_ops = {
18371 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18372
18373 .read_cr2 = native_read_cr2,
18374 .write_cr2 = native_write_cr2,
18375 @@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18376 .make_pud = PTE_IDENT,
18377
18378 .set_pgd = native_set_pgd,
18379 + .set_pgd_batched = native_set_pgd_batched,
18380 #endif
18381 #endif /* PAGETABLE_LEVELS >= 3 */
18382
18383 @@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18384 },
18385
18386 .set_fixmap = native_set_fixmap,
18387 +
18388 +#ifdef CONFIG_PAX_KERNEXEC
18389 + .pax_open_kernel = native_pax_open_kernel,
18390 + .pax_close_kernel = native_pax_close_kernel,
18391 +#endif
18392 +
18393 };
18394
18395 EXPORT_SYMBOL_GPL(pv_time_ops);
18396 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18397 index 35ccf75..7a15747 100644
18398 --- a/arch/x86/kernel/pci-iommu_table.c
18399 +++ b/arch/x86/kernel/pci-iommu_table.c
18400 @@ -2,7 +2,7 @@
18401 #include <asm/iommu_table.h>
18402 #include <linux/string.h>
18403 #include <linux/kallsyms.h>
18404 -
18405 +#include <linux/sched.h>
18406
18407 #define DEBUG 1
18408
18409 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18410 index 1d92a5a..7bc8c29 100644
18411 --- a/arch/x86/kernel/process.c
18412 +++ b/arch/x86/kernel/process.c
18413 @@ -69,16 +69,33 @@ void free_thread_xstate(struct task_struct *tsk)
18414
18415 void free_thread_info(struct thread_info *ti)
18416 {
18417 - free_thread_xstate(ti->task);
18418 free_pages((unsigned long)ti, THREAD_ORDER);
18419 }
18420
18421 +static struct kmem_cache *task_struct_cachep;
18422 +
18423 void arch_task_cache_init(void)
18424 {
18425 - task_xstate_cachep =
18426 - kmem_cache_create("task_xstate", xstate_size,
18427 + /* create a slab on which task_structs can be allocated */
18428 + task_struct_cachep =
18429 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18430 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18431 +
18432 + task_xstate_cachep =
18433 + kmem_cache_create("task_xstate", xstate_size,
18434 __alignof__(union thread_xstate),
18435 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18436 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18437 +}
18438 +
18439 +struct task_struct *alloc_task_struct_node(int node)
18440 +{
18441 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18442 +}
18443 +
18444 +void free_task_struct(struct task_struct *task)
18445 +{
18446 + free_thread_xstate(task);
18447 + kmem_cache_free(task_struct_cachep, task);
18448 }
18449
18450 /*
18451 @@ -91,7 +108,7 @@ void exit_thread(void)
18452 unsigned long *bp = t->io_bitmap_ptr;
18453
18454 if (bp) {
18455 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18456 + struct tss_struct *tss = init_tss + get_cpu();
18457
18458 t->io_bitmap_ptr = NULL;
18459 clear_thread_flag(TIF_IO_BITMAP);
18460 @@ -127,7 +144,7 @@ void show_regs_common(void)
18461
18462 printk(KERN_CONT "\n");
18463 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18464 - current->pid, current->comm, print_tainted(),
18465 + task_pid_nr(current), current->comm, print_tainted(),
18466 init_utsname()->release,
18467 (int)strcspn(init_utsname()->version, " "),
18468 init_utsname()->version);
18469 @@ -141,6 +158,9 @@ void flush_thread(void)
18470 {
18471 struct task_struct *tsk = current;
18472
18473 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18474 + loadsegment(gs, 0);
18475 +#endif
18476 flush_ptrace_hw_breakpoint(tsk);
18477 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18478 /*
18479 @@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18480 regs.di = (unsigned long) arg;
18481
18482 #ifdef CONFIG_X86_32
18483 - regs.ds = __USER_DS;
18484 - regs.es = __USER_DS;
18485 + regs.ds = __KERNEL_DS;
18486 + regs.es = __KERNEL_DS;
18487 regs.fs = __KERNEL_PERCPU;
18488 - regs.gs = __KERNEL_STACK_CANARY;
18489 + savesegment(gs, regs.gs);
18490 #else
18491 regs.ss = __KERNEL_DS;
18492 #endif
18493 @@ -392,7 +412,7 @@ static void __exit_idle(void)
18494 void exit_idle(void)
18495 {
18496 /* idle loop has pid 0 */
18497 - if (current->pid)
18498 + if (task_pid_nr(current))
18499 return;
18500 __exit_idle();
18501 }
18502 @@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
18503
18504 return ret;
18505 }
18506 -void stop_this_cpu(void *dummy)
18507 +__noreturn void stop_this_cpu(void *dummy)
18508 {
18509 local_irq_disable();
18510 /*
18511 @@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
18512 }
18513 early_param("idle", idle_setup);
18514
18515 -unsigned long arch_align_stack(unsigned long sp)
18516 +#ifdef CONFIG_PAX_RANDKSTACK
18517 +void pax_randomize_kstack(struct pt_regs *regs)
18518 {
18519 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18520 - sp -= get_random_int() % 8192;
18521 - return sp & ~0xf;
18522 -}
18523 + struct thread_struct *thread = &current->thread;
18524 + unsigned long time;
18525
18526 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18527 -{
18528 - unsigned long range_end = mm->brk + 0x02000000;
18529 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18530 -}
18531 + if (!randomize_va_space)
18532 + return;
18533 +
18534 + if (v8086_mode(regs))
18535 + return;
18536
18537 + rdtscl(time);
18538 +
18539 + /* P4 seems to return a 0 LSB, ignore it */
18540 +#ifdef CONFIG_MPENTIUM4
18541 + time &= 0x3EUL;
18542 + time <<= 2;
18543 +#elif defined(CONFIG_X86_64)
18544 + time &= 0xFUL;
18545 + time <<= 4;
18546 +#else
18547 + time &= 0x1FUL;
18548 + time <<= 3;
18549 +#endif
18550 +
18551 + thread->sp0 ^= time;
18552 + load_sp0(init_tss + smp_processor_id(), thread);
18553 +
18554 +#ifdef CONFIG_X86_64
18555 + percpu_write(kernel_stack, thread->sp0);
18556 +#endif
18557 +}
18558 +#endif
18559 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18560 index ae68473..7b0bb71 100644
18561 --- a/arch/x86/kernel/process_32.c
18562 +++ b/arch/x86/kernel/process_32.c
18563 @@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18564 unsigned long thread_saved_pc(struct task_struct *tsk)
18565 {
18566 return ((unsigned long *)tsk->thread.sp)[3];
18567 +//XXX return tsk->thread.eip;
18568 }
18569
18570 void __show_regs(struct pt_regs *regs, int all)
18571 @@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18572 unsigned long sp;
18573 unsigned short ss, gs;
18574
18575 - if (user_mode_vm(regs)) {
18576 + if (user_mode(regs)) {
18577 sp = regs->sp;
18578 ss = regs->ss & 0xffff;
18579 - gs = get_user_gs(regs);
18580 } else {
18581 sp = kernel_stack_pointer(regs);
18582 savesegment(ss, ss);
18583 - savesegment(gs, gs);
18584 }
18585 + gs = get_user_gs(regs);
18586
18587 show_regs_common();
18588
18589 @@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18590 struct task_struct *tsk;
18591 int err;
18592
18593 - childregs = task_pt_regs(p);
18594 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18595 *childregs = *regs;
18596 childregs->ax = 0;
18597 childregs->sp = sp;
18598
18599 p->thread.sp = (unsigned long) childregs;
18600 p->thread.sp0 = (unsigned long) (childregs+1);
18601 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18602
18603 p->thread.ip = (unsigned long) ret_from_fork;
18604
18605 @@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18606 struct thread_struct *prev = &prev_p->thread,
18607 *next = &next_p->thread;
18608 int cpu = smp_processor_id();
18609 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18610 + struct tss_struct *tss = init_tss + cpu;
18611 fpu_switch_t fpu;
18612
18613 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18614 @@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18615 */
18616 lazy_save_gs(prev->gs);
18617
18618 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18619 + __set_fs(task_thread_info(next_p)->addr_limit);
18620 +#endif
18621 +
18622 /*
18623 * Load the per-thread Thread-Local Storage descriptor.
18624 */
18625 @@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18626 */
18627 arch_end_context_switch(next_p);
18628
18629 + percpu_write(current_task, next_p);
18630 + percpu_write(current_tinfo, &next_p->tinfo);
18631 +
18632 /*
18633 * Restore %gs if needed (which is common)
18634 */
18635 @@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18636
18637 switch_fpu_finish(next_p, fpu);
18638
18639 - percpu_write(current_task, next_p);
18640 -
18641 return prev_p;
18642 }
18643
18644 @@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
18645 } while (count++ < 16);
18646 return 0;
18647 }
18648 -
18649 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18650 index 43d8b48..c45d566 100644
18651 --- a/arch/x86/kernel/process_64.c
18652 +++ b/arch/x86/kernel/process_64.c
18653 @@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18654 struct pt_regs *childregs;
18655 struct task_struct *me = current;
18656
18657 - childregs = ((struct pt_regs *)
18658 - (THREAD_SIZE + task_stack_page(p))) - 1;
18659 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18660 *childregs = *regs;
18661
18662 childregs->ax = 0;
18663 @@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18664 p->thread.sp = (unsigned long) childregs;
18665 p->thread.sp0 = (unsigned long) (childregs+1);
18666 p->thread.usersp = me->thread.usersp;
18667 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18668
18669 set_tsk_thread_flag(p, TIF_FORK);
18670
18671 @@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18672 struct thread_struct *prev = &prev_p->thread;
18673 struct thread_struct *next = &next_p->thread;
18674 int cpu = smp_processor_id();
18675 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18676 + struct tss_struct *tss = init_tss + cpu;
18677 unsigned fsindex, gsindex;
18678 fpu_switch_t fpu;
18679
18680 @@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18681 prev->usersp = percpu_read(old_rsp);
18682 percpu_write(old_rsp, next->usersp);
18683 percpu_write(current_task, next_p);
18684 + percpu_write(current_tinfo, &next_p->tinfo);
18685
18686 - percpu_write(kernel_stack,
18687 - (unsigned long)task_stack_page(next_p) +
18688 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18689 + percpu_write(kernel_stack, next->sp0);
18690
18691 /*
18692 * Now maybe reload the debug registers and handle I/O bitmaps
18693 @@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_struct *p)
18694 if (!p || p == current || p->state == TASK_RUNNING)
18695 return 0;
18696 stack = (unsigned long)task_stack_page(p);
18697 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18698 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18699 return 0;
18700 fp = *(u64 *)(p->thread.sp);
18701 do {
18702 - if (fp < (unsigned long)stack ||
18703 - fp >= (unsigned long)stack+THREAD_SIZE)
18704 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18705 return 0;
18706 ip = *(u64 *)(fp+8);
18707 if (!in_sched_functions(ip))
18708 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18709 index cf11783..e7ce551 100644
18710 --- a/arch/x86/kernel/ptrace.c
18711 +++ b/arch/x86/kernel/ptrace.c
18712 @@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18713 unsigned long addr, unsigned long data)
18714 {
18715 int ret;
18716 - unsigned long __user *datap = (unsigned long __user *)data;
18717 + unsigned long __user *datap = (__force unsigned long __user *)data;
18718
18719 switch (request) {
18720 /* read the word at location addr in the USER area. */
18721 @@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18722 if ((int) addr < 0)
18723 return -EIO;
18724 ret = do_get_thread_area(child, addr,
18725 - (struct user_desc __user *)data);
18726 + (__force struct user_desc __user *) data);
18727 break;
18728
18729 case PTRACE_SET_THREAD_AREA:
18730 if ((int) addr < 0)
18731 return -EIO;
18732 ret = do_set_thread_area(child, addr,
18733 - (struct user_desc __user *)data, 0);
18734 + (__force struct user_desc __user *) data, 0);
18735 break;
18736 #endif
18737
18738 @@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18739 memset(info, 0, sizeof(*info));
18740 info->si_signo = SIGTRAP;
18741 info->si_code = si_code;
18742 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18743 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18744 }
18745
18746 void user_single_step_siginfo(struct task_struct *tsk,
18747 @@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18748 # define IS_IA32 0
18749 #endif
18750
18751 +#ifdef CONFIG_GRKERNSEC_SETXID
18752 +extern void gr_delayed_cred_worker(void);
18753 +#endif
18754 +
18755 /*
18756 * We must return the syscall number to actually look up in the table.
18757 * This can be -1L to skip running any syscall at all.
18758 @@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18759 {
18760 long ret = 0;
18761
18762 +#ifdef CONFIG_GRKERNSEC_SETXID
18763 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18764 + gr_delayed_cred_worker();
18765 +#endif
18766 +
18767 /*
18768 * If we stepped into a sysenter/syscall insn, it trapped in
18769 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18770 @@ -1506,6 +1515,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18771 {
18772 bool step;
18773
18774 +#ifdef CONFIG_GRKERNSEC_SETXID
18775 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18776 + gr_delayed_cred_worker();
18777 +#endif
18778 +
18779 audit_syscall_exit(regs);
18780
18781 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18782 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18783 index 42eb330..139955c 100644
18784 --- a/arch/x86/kernel/pvclock.c
18785 +++ b/arch/x86/kernel/pvclock.c
18786 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18787 return pv_tsc_khz;
18788 }
18789
18790 -static atomic64_t last_value = ATOMIC64_INIT(0);
18791 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18792
18793 void pvclock_resume(void)
18794 {
18795 - atomic64_set(&last_value, 0);
18796 + atomic64_set_unchecked(&last_value, 0);
18797 }
18798
18799 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18800 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18801 * updating at the same time, and one of them could be slightly behind,
18802 * making the assumption that last_value always go forward fail to hold.
18803 */
18804 - last = atomic64_read(&last_value);
18805 + last = atomic64_read_unchecked(&last_value);
18806 do {
18807 if (ret < last)
18808 return last;
18809 - last = atomic64_cmpxchg(&last_value, last, ret);
18810 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18811 } while (unlikely(last != ret));
18812
18813 return ret;
18814 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18815 index d840e69..98e9581 100644
18816 --- a/arch/x86/kernel/reboot.c
18817 +++ b/arch/x86/kernel/reboot.c
18818 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18819 EXPORT_SYMBOL(pm_power_off);
18820
18821 static const struct desc_ptr no_idt = {};
18822 -static int reboot_mode;
18823 +static unsigned short reboot_mode;
18824 enum reboot_type reboot_type = BOOT_ACPI;
18825 int reboot_force;
18826
18827 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18828 extern const unsigned char machine_real_restart_asm[];
18829 extern const u64 machine_real_restart_gdt[3];
18830
18831 -void machine_real_restart(unsigned int type)
18832 +__noreturn void machine_real_restart(unsigned int type)
18833 {
18834 void *restart_va;
18835 unsigned long restart_pa;
18836 - void (*restart_lowmem)(unsigned int);
18837 + void (* __noreturn restart_lowmem)(unsigned int);
18838 u64 *lowmem_gdt;
18839
18840 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18841 + struct desc_struct *gdt;
18842 +#endif
18843 +
18844 local_irq_disable();
18845
18846 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18847 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18848 boot)". This seems like a fairly standard thing that gets set by
18849 REBOOT.COM programs, and the previous reset routine did this
18850 too. */
18851 - *((unsigned short *)0x472) = reboot_mode;
18852 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18853
18854 /* Patch the GDT in the low memory trampoline */
18855 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18856
18857 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18858 restart_pa = virt_to_phys(restart_va);
18859 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18860 + restart_lowmem = (void *)restart_pa;
18861
18862 /* GDT[0]: GDT self-pointer */
18863 lowmem_gdt[0] =
18864 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18865 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18866
18867 /* Jump to the identity-mapped low memory code */
18868 +
18869 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18870 + gdt = get_cpu_gdt_table(smp_processor_id());
18871 + pax_open_kernel();
18872 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18873 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18874 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18875 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18876 +#endif
18877 +#ifdef CONFIG_PAX_KERNEXEC
18878 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18879 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18880 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18881 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18882 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18883 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18884 +#endif
18885 + pax_close_kernel();
18886 +#endif
18887 +
18888 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18889 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18890 + unreachable();
18891 +#else
18892 restart_lowmem(type);
18893 +#endif
18894 +
18895 }
18896 #ifdef CONFIG_APM_MODULE
18897 EXPORT_SYMBOL(machine_real_restart);
18898 @@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18899 * try to force a triple fault and then cycle between hitting the keyboard
18900 * controller and doing that
18901 */
18902 -static void native_machine_emergency_restart(void)
18903 +__noreturn static void native_machine_emergency_restart(void)
18904 {
18905 int i;
18906 int attempt = 0;
18907 @@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18908 #endif
18909 }
18910
18911 -static void __machine_emergency_restart(int emergency)
18912 +static __noreturn void __machine_emergency_restart(int emergency)
18913 {
18914 reboot_emergency = emergency;
18915 machine_ops.emergency_restart();
18916 }
18917
18918 -static void native_machine_restart(char *__unused)
18919 +static __noreturn void native_machine_restart(char *__unused)
18920 {
18921 printk("machine restart\n");
18922
18923 @@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18924 __machine_emergency_restart(0);
18925 }
18926
18927 -static void native_machine_halt(void)
18928 +static __noreturn void native_machine_halt(void)
18929 {
18930 /* stop other cpus and apics */
18931 machine_shutdown();
18932 @@ -706,7 +736,7 @@ static void native_machine_halt(void)
18933 stop_this_cpu(NULL);
18934 }
18935
18936 -static void native_machine_power_off(void)
18937 +__noreturn static void native_machine_power_off(void)
18938 {
18939 if (pm_power_off) {
18940 if (!reboot_force)
18941 @@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18942 }
18943 /* a fallback in case there is no PM info available */
18944 tboot_shutdown(TB_SHUTDOWN_HALT);
18945 + unreachable();
18946 }
18947
18948 struct machine_ops machine_ops = {
18949 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18950 index 7a6f3b3..bed145d7 100644
18951 --- a/arch/x86/kernel/relocate_kernel_64.S
18952 +++ b/arch/x86/kernel/relocate_kernel_64.S
18953 @@ -11,6 +11,7 @@
18954 #include <asm/kexec.h>
18955 #include <asm/processor-flags.h>
18956 #include <asm/pgtable_types.h>
18957 +#include <asm/alternative-asm.h>
18958
18959 /*
18960 * Must be relocatable PIC code callable as a C function
18961 @@ -160,13 +161,14 @@ identity_mapped:
18962 xorq %rbp, %rbp
18963 xorq %r8, %r8
18964 xorq %r9, %r9
18965 - xorq %r10, %r9
18966 + xorq %r10, %r10
18967 xorq %r11, %r11
18968 xorq %r12, %r12
18969 xorq %r13, %r13
18970 xorq %r14, %r14
18971 xorq %r15, %r15
18972
18973 + pax_force_retaddr 0, 1
18974 ret
18975
18976 1:
18977 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18978 index 1a29015..712f324 100644
18979 --- a/arch/x86/kernel/setup.c
18980 +++ b/arch/x86/kernel/setup.c
18981 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
18982
18983 switch (data->type) {
18984 case SETUP_E820_EXT:
18985 - parse_e820_ext(data);
18986 + parse_e820_ext((struct setup_data __force_kernel *)data);
18987 break;
18988 case SETUP_DTB:
18989 add_dtb(pa_data);
18990 @@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
18991 * area (640->1Mb) as ram even though it is not.
18992 * take them out.
18993 */
18994 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18995 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18996 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18997 }
18998
18999 @@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
19000
19001 if (!boot_params.hdr.root_flags)
19002 root_mountflags &= ~MS_RDONLY;
19003 - init_mm.start_code = (unsigned long) _text;
19004 - init_mm.end_code = (unsigned long) _etext;
19005 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19006 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19007 init_mm.end_data = (unsigned long) _edata;
19008 init_mm.brk = _brk_end;
19009
19010 - code_resource.start = virt_to_phys(_text);
19011 - code_resource.end = virt_to_phys(_etext)-1;
19012 - data_resource.start = virt_to_phys(_etext);
19013 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19014 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19015 + data_resource.start = virt_to_phys(_sdata);
19016 data_resource.end = virt_to_phys(_edata)-1;
19017 bss_resource.start = virt_to_phys(&__bss_start);
19018 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19019 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19020 index 5a98aa2..2f9288d 100644
19021 --- a/arch/x86/kernel/setup_percpu.c
19022 +++ b/arch/x86/kernel/setup_percpu.c
19023 @@ -21,19 +21,17 @@
19024 #include <asm/cpu.h>
19025 #include <asm/stackprotector.h>
19026
19027 -DEFINE_PER_CPU(int, cpu_number);
19028 +#ifdef CONFIG_SMP
19029 +DEFINE_PER_CPU(unsigned int, cpu_number);
19030 EXPORT_PER_CPU_SYMBOL(cpu_number);
19031 +#endif
19032
19033 -#ifdef CONFIG_X86_64
19034 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19035 -#else
19036 -#define BOOT_PERCPU_OFFSET 0
19037 -#endif
19038
19039 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19040 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19041
19042 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19043 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19044 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19045 };
19046 EXPORT_SYMBOL(__per_cpu_offset);
19047 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19048 {
19049 #ifdef CONFIG_X86_32
19050 struct desc_struct gdt;
19051 + unsigned long base = per_cpu_offset(cpu);
19052
19053 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19054 - 0x2 | DESCTYPE_S, 0x8);
19055 - gdt.s = 1;
19056 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19057 + 0x83 | DESCTYPE_S, 0xC);
19058 write_gdt_entry(get_cpu_gdt_table(cpu),
19059 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19060 #endif
19061 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19062 /* alrighty, percpu areas up and running */
19063 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19064 for_each_possible_cpu(cpu) {
19065 +#ifdef CONFIG_CC_STACKPROTECTOR
19066 +#ifdef CONFIG_X86_32
19067 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19068 +#endif
19069 +#endif
19070 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19071 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19072 per_cpu(cpu_number, cpu) = cpu;
19073 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19074 */
19075 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19076 #endif
19077 +#ifdef CONFIG_CC_STACKPROTECTOR
19078 +#ifdef CONFIG_X86_32
19079 + if (!cpu)
19080 + per_cpu(stack_canary.canary, cpu) = canary;
19081 +#endif
19082 +#endif
19083 /*
19084 * Up to this point, the boot CPU has been using .init.data
19085 * area. Reload any changed state for the boot CPU.
19086 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19087 index 115eac4..c0591d5 100644
19088 --- a/arch/x86/kernel/signal.c
19089 +++ b/arch/x86/kernel/signal.c
19090 @@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
19091 * Align the stack pointer according to the i386 ABI,
19092 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19093 */
19094 - sp = ((sp + 4) & -16ul) - 4;
19095 + sp = ((sp - 12) & -16ul) - 4;
19096 #else /* !CONFIG_X86_32 */
19097 sp = round_down(sp, 16) - 8;
19098 #endif
19099 @@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19100 * Return an always-bogus address instead so we will die with SIGSEGV.
19101 */
19102 if (onsigstack && !likely(on_sig_stack(sp)))
19103 - return (void __user *)-1L;
19104 + return (__force void __user *)-1L;
19105
19106 /* save i387 state */
19107 if (used_math() && save_i387_xstate(*fpstate) < 0)
19108 - return (void __user *)-1L;
19109 + return (__force void __user *)-1L;
19110
19111 return (void __user *)sp;
19112 }
19113 @@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19114 }
19115
19116 if (current->mm->context.vdso)
19117 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19118 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19119 else
19120 - restorer = &frame->retcode;
19121 + restorer = (void __user *)&frame->retcode;
19122 if (ka->sa.sa_flags & SA_RESTORER)
19123 restorer = ka->sa.sa_restorer;
19124
19125 @@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19126 * reasons and because gdb uses it as a signature to notice
19127 * signal handler stack frames.
19128 */
19129 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19130 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19131
19132 if (err)
19133 return -EFAULT;
19134 @@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19135 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19136
19137 /* Set up to return from userspace. */
19138 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19139 + if (current->mm->context.vdso)
19140 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19141 + else
19142 + restorer = (void __user *)&frame->retcode;
19143 if (ka->sa.sa_flags & SA_RESTORER)
19144 restorer = ka->sa.sa_restorer;
19145 put_user_ex(restorer, &frame->pretcode);
19146 @@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19147 * reasons and because gdb uses it as a signature to notice
19148 * signal handler stack frames.
19149 */
19150 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19151 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19152 } put_user_catch(err);
19153
19154 if (err)
19155 @@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
19156 * X86_32: vm86 regs switched out by assembly code before reaching
19157 * here, so testing against kernel CS suffices.
19158 */
19159 - if (!user_mode(regs))
19160 + if (!user_mode_novm(regs))
19161 return;
19162
19163 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19164 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19165 index 6e1e406..edfb7cb 100644
19166 --- a/arch/x86/kernel/smpboot.c
19167 +++ b/arch/x86/kernel/smpboot.c
19168 @@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19169 set_idle_for_cpu(cpu, c_idle.idle);
19170 do_rest:
19171 per_cpu(current_task, cpu) = c_idle.idle;
19172 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19173 #ifdef CONFIG_X86_32
19174 /* Stack for startup_32 can be just as for start_secondary onwards */
19175 irq_ctx_init(cpu);
19176 #else
19177 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19178 initial_gs = per_cpu_offset(cpu);
19179 - per_cpu(kernel_stack, cpu) =
19180 - (unsigned long)task_stack_page(c_idle.idle) -
19181 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19182 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19183 #endif
19184 +
19185 + pax_open_kernel();
19186 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19187 + pax_close_kernel();
19188 +
19189 initial_code = (unsigned long)start_secondary;
19190 stack_start = c_idle.idle->thread.sp;
19191
19192 @@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19193
19194 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19195
19196 +#ifdef CONFIG_PAX_PER_CPU_PGD
19197 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19198 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19199 + KERNEL_PGD_PTRS);
19200 +#endif
19201 +
19202 err = do_boot_cpu(apicid, cpu);
19203 if (err) {
19204 pr_debug("do_boot_cpu failed %d\n", err);
19205 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19206 index c346d11..d43b163 100644
19207 --- a/arch/x86/kernel/step.c
19208 +++ b/arch/x86/kernel/step.c
19209 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19210 struct desc_struct *desc;
19211 unsigned long base;
19212
19213 - seg &= ~7UL;
19214 + seg >>= 3;
19215
19216 mutex_lock(&child->mm->context.lock);
19217 - if (unlikely((seg >> 3) >= child->mm->context.size))
19218 + if (unlikely(seg >= child->mm->context.size))
19219 addr = -1L; /* bogus selector, access would fault */
19220 else {
19221 desc = child->mm->context.ldt + seg;
19222 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19223 addr += base;
19224 }
19225 mutex_unlock(&child->mm->context.lock);
19226 - }
19227 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19228 + addr = ktla_ktva(addr);
19229
19230 return addr;
19231 }
19232 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19233 unsigned char opcode[15];
19234 unsigned long addr = convert_ip_to_linear(child, regs);
19235
19236 + if (addr == -EINVAL)
19237 + return 0;
19238 +
19239 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19240 for (i = 0; i < copied; i++) {
19241 switch (opcode[i]) {
19242 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19243 index 0b0cb5f..db6b9ed 100644
19244 --- a/arch/x86/kernel/sys_i386_32.c
19245 +++ b/arch/x86/kernel/sys_i386_32.c
19246 @@ -24,17 +24,224 @@
19247
19248 #include <asm/syscalls.h>
19249
19250 -/*
19251 - * Do a system call from kernel instead of calling sys_execve so we
19252 - * end up with proper pt_regs.
19253 - */
19254 -int kernel_execve(const char *filename,
19255 - const char *const argv[],
19256 - const char *const envp[])
19257 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19258 {
19259 - long __res;
19260 - asm volatile ("int $0x80"
19261 - : "=a" (__res)
19262 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19263 - return __res;
19264 + unsigned long pax_task_size = TASK_SIZE;
19265 +
19266 +#ifdef CONFIG_PAX_SEGMEXEC
19267 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19268 + pax_task_size = SEGMEXEC_TASK_SIZE;
19269 +#endif
19270 +
19271 + if (len > pax_task_size || addr > pax_task_size - len)
19272 + return -EINVAL;
19273 +
19274 + return 0;
19275 +}
19276 +
19277 +unsigned long
19278 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19279 + unsigned long len, unsigned long pgoff, unsigned long flags)
19280 +{
19281 + struct mm_struct *mm = current->mm;
19282 + struct vm_area_struct *vma;
19283 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19284 +
19285 +#ifdef CONFIG_PAX_SEGMEXEC
19286 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19287 + pax_task_size = SEGMEXEC_TASK_SIZE;
19288 +#endif
19289 +
19290 + pax_task_size -= PAGE_SIZE;
19291 +
19292 + if (len > pax_task_size)
19293 + return -ENOMEM;
19294 +
19295 + if (flags & MAP_FIXED)
19296 + return addr;
19297 +
19298 +#ifdef CONFIG_PAX_RANDMMAP
19299 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19300 +#endif
19301 +
19302 + if (addr) {
19303 + addr = PAGE_ALIGN(addr);
19304 + if (pax_task_size - len >= addr) {
19305 + vma = find_vma(mm, addr);
19306 + if (check_heap_stack_gap(vma, addr, len))
19307 + return addr;
19308 + }
19309 + }
19310 + if (len > mm->cached_hole_size) {
19311 + start_addr = addr = mm->free_area_cache;
19312 + } else {
19313 + start_addr = addr = mm->mmap_base;
19314 + mm->cached_hole_size = 0;
19315 + }
19316 +
19317 +#ifdef CONFIG_PAX_PAGEEXEC
19318 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19319 + start_addr = 0x00110000UL;
19320 +
19321 +#ifdef CONFIG_PAX_RANDMMAP
19322 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19323 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19324 +#endif
19325 +
19326 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19327 + start_addr = addr = mm->mmap_base;
19328 + else
19329 + addr = start_addr;
19330 + }
19331 +#endif
19332 +
19333 +full_search:
19334 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19335 + /* At this point: (!vma || addr < vma->vm_end). */
19336 + if (pax_task_size - len < addr) {
19337 + /*
19338 + * Start a new search - just in case we missed
19339 + * some holes.
19340 + */
19341 + if (start_addr != mm->mmap_base) {
19342 + start_addr = addr = mm->mmap_base;
19343 + mm->cached_hole_size = 0;
19344 + goto full_search;
19345 + }
19346 + return -ENOMEM;
19347 + }
19348 + if (check_heap_stack_gap(vma, addr, len))
19349 + break;
19350 + if (addr + mm->cached_hole_size < vma->vm_start)
19351 + mm->cached_hole_size = vma->vm_start - addr;
19352 + addr = vma->vm_end;
19353 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19354 + start_addr = addr = mm->mmap_base;
19355 + mm->cached_hole_size = 0;
19356 + goto full_search;
19357 + }
19358 + }
19359 +
19360 + /*
19361 + * Remember the place where we stopped the search:
19362 + */
19363 + mm->free_area_cache = addr + len;
19364 + return addr;
19365 +}
19366 +
19367 +unsigned long
19368 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19369 + const unsigned long len, const unsigned long pgoff,
19370 + const unsigned long flags)
19371 +{
19372 + struct vm_area_struct *vma;
19373 + struct mm_struct *mm = current->mm;
19374 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19375 +
19376 +#ifdef CONFIG_PAX_SEGMEXEC
19377 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19378 + pax_task_size = SEGMEXEC_TASK_SIZE;
19379 +#endif
19380 +
19381 + pax_task_size -= PAGE_SIZE;
19382 +
19383 + /* requested length too big for entire address space */
19384 + if (len > pax_task_size)
19385 + return -ENOMEM;
19386 +
19387 + if (flags & MAP_FIXED)
19388 + return addr;
19389 +
19390 +#ifdef CONFIG_PAX_PAGEEXEC
19391 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19392 + goto bottomup;
19393 +#endif
19394 +
19395 +#ifdef CONFIG_PAX_RANDMMAP
19396 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19397 +#endif
19398 +
19399 + /* requesting a specific address */
19400 + if (addr) {
19401 + addr = PAGE_ALIGN(addr);
19402 + if (pax_task_size - len >= addr) {
19403 + vma = find_vma(mm, addr);
19404 + if (check_heap_stack_gap(vma, addr, len))
19405 + return addr;
19406 + }
19407 + }
19408 +
19409 + /* check if free_area_cache is useful for us */
19410 + if (len <= mm->cached_hole_size) {
19411 + mm->cached_hole_size = 0;
19412 + mm->free_area_cache = mm->mmap_base;
19413 + }
19414 +
19415 + /* either no address requested or can't fit in requested address hole */
19416 + addr = mm->free_area_cache;
19417 +
19418 + /* make sure it can fit in the remaining address space */
19419 + if (addr > len) {
19420 + vma = find_vma(mm, addr-len);
19421 + if (check_heap_stack_gap(vma, addr - len, len))
19422 + /* remember the address as a hint for next time */
19423 + return (mm->free_area_cache = addr-len);
19424 + }
19425 +
19426 + if (mm->mmap_base < len)
19427 + goto bottomup;
19428 +
19429 + addr = mm->mmap_base-len;
19430 +
19431 + do {
19432 + /*
19433 + * Lookup failure means no vma is above this address,
19434 + * else if new region fits below vma->vm_start,
19435 + * return with success:
19436 + */
19437 + vma = find_vma(mm, addr);
19438 + if (check_heap_stack_gap(vma, addr, len))
19439 + /* remember the address as a hint for next time */
19440 + return (mm->free_area_cache = addr);
19441 +
19442 + /* remember the largest hole we saw so far */
19443 + if (addr + mm->cached_hole_size < vma->vm_start)
19444 + mm->cached_hole_size = vma->vm_start - addr;
19445 +
19446 + /* try just below the current vma->vm_start */
19447 + addr = skip_heap_stack_gap(vma, len);
19448 + } while (!IS_ERR_VALUE(addr));
19449 +
19450 +bottomup:
19451 + /*
19452 + * A failed mmap() very likely causes application failure,
19453 + * so fall back to the bottom-up function here. This scenario
19454 + * can happen with large stack limits and large mmap()
19455 + * allocations.
19456 + */
19457 +
19458 +#ifdef CONFIG_PAX_SEGMEXEC
19459 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19460 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19461 + else
19462 +#endif
19463 +
19464 + mm->mmap_base = TASK_UNMAPPED_BASE;
19465 +
19466 +#ifdef CONFIG_PAX_RANDMMAP
19467 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19468 + mm->mmap_base += mm->delta_mmap;
19469 +#endif
19470 +
19471 + mm->free_area_cache = mm->mmap_base;
19472 + mm->cached_hole_size = ~0UL;
19473 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19474 + /*
19475 + * Restore the topdown base:
19476 + */
19477 + mm->mmap_base = base;
19478 + mm->free_area_cache = base;
19479 + mm->cached_hole_size = ~0UL;
19480 +
19481 + return addr;
19482 }
19483 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19484 index b4d3c39..82bb73b 100644
19485 --- a/arch/x86/kernel/sys_x86_64.c
19486 +++ b/arch/x86/kernel/sys_x86_64.c
19487 @@ -95,8 +95,8 @@ out:
19488 return error;
19489 }
19490
19491 -static void find_start_end(unsigned long flags, unsigned long *begin,
19492 - unsigned long *end)
19493 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19494 + unsigned long *begin, unsigned long *end)
19495 {
19496 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19497 unsigned long new_begin;
19498 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19499 *begin = new_begin;
19500 }
19501 } else {
19502 - *begin = TASK_UNMAPPED_BASE;
19503 + *begin = mm->mmap_base;
19504 *end = TASK_SIZE;
19505 }
19506 }
19507 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19508 if (flags & MAP_FIXED)
19509 return addr;
19510
19511 - find_start_end(flags, &begin, &end);
19512 + find_start_end(mm, flags, &begin, &end);
19513
19514 if (len > end)
19515 return -ENOMEM;
19516
19517 +#ifdef CONFIG_PAX_RANDMMAP
19518 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19519 +#endif
19520 +
19521 if (addr) {
19522 addr = PAGE_ALIGN(addr);
19523 vma = find_vma(mm, addr);
19524 - if (end - len >= addr &&
19525 - (!vma || addr + len <= vma->vm_start))
19526 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19527 return addr;
19528 }
19529 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19530 @@ -172,7 +175,7 @@ full_search:
19531 }
19532 return -ENOMEM;
19533 }
19534 - if (!vma || addr + len <= vma->vm_start) {
19535 + if (check_heap_stack_gap(vma, addr, len)) {
19536 /*
19537 * Remember the place where we stopped the search:
19538 */
19539 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19540 {
19541 struct vm_area_struct *vma;
19542 struct mm_struct *mm = current->mm;
19543 - unsigned long addr = addr0, start_addr;
19544 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19545
19546 /* requested length too big for entire address space */
19547 if (len > TASK_SIZE)
19548 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19549 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19550 goto bottomup;
19551
19552 +#ifdef CONFIG_PAX_RANDMMAP
19553 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19554 +#endif
19555 +
19556 /* requesting a specific address */
19557 if (addr) {
19558 addr = PAGE_ALIGN(addr);
19559 - vma = find_vma(mm, addr);
19560 - if (TASK_SIZE - len >= addr &&
19561 - (!vma || addr + len <= vma->vm_start))
19562 - return addr;
19563 + if (TASK_SIZE - len >= addr) {
19564 + vma = find_vma(mm, addr);
19565 + if (check_heap_stack_gap(vma, addr, len))
19566 + return addr;
19567 + }
19568 }
19569
19570 /* check if free_area_cache is useful for us */
19571 @@ -240,7 +248,7 @@ try_again:
19572 * return with success:
19573 */
19574 vma = find_vma(mm, addr);
19575 - if (!vma || addr+len <= vma->vm_start)
19576 + if (check_heap_stack_gap(vma, addr, len))
19577 /* remember the address as a hint for next time */
19578 return mm->free_area_cache = addr;
19579
19580 @@ -249,8 +257,8 @@ try_again:
19581 mm->cached_hole_size = vma->vm_start - addr;
19582
19583 /* try just below the current vma->vm_start */
19584 - addr = vma->vm_start-len;
19585 - } while (len < vma->vm_start);
19586 + addr = skip_heap_stack_gap(vma, len);
19587 + } while (!IS_ERR_VALUE(addr));
19588
19589 fail:
19590 /*
19591 @@ -270,13 +278,21 @@ bottomup:
19592 * can happen with large stack limits and large mmap()
19593 * allocations.
19594 */
19595 + mm->mmap_base = TASK_UNMAPPED_BASE;
19596 +
19597 +#ifdef CONFIG_PAX_RANDMMAP
19598 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19599 + mm->mmap_base += mm->delta_mmap;
19600 +#endif
19601 +
19602 + mm->free_area_cache = mm->mmap_base;
19603 mm->cached_hole_size = ~0UL;
19604 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19605 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19606 /*
19607 * Restore the topdown base:
19608 */
19609 - mm->free_area_cache = mm->mmap_base;
19610 + mm->mmap_base = base;
19611 + mm->free_area_cache = base;
19612 mm->cached_hole_size = ~0UL;
19613
19614 return addr;
19615 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19616 index 6410744..79758f0 100644
19617 --- a/arch/x86/kernel/tboot.c
19618 +++ b/arch/x86/kernel/tboot.c
19619 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19620
19621 void tboot_shutdown(u32 shutdown_type)
19622 {
19623 - void (*shutdown)(void);
19624 + void (* __noreturn shutdown)(void);
19625
19626 if (!tboot_enabled())
19627 return;
19628 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19629
19630 switch_to_tboot_pt();
19631
19632 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19633 + shutdown = (void *)tboot->shutdown_entry;
19634 shutdown();
19635
19636 /* should not reach here */
19637 @@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19638 return 0;
19639 }
19640
19641 -static atomic_t ap_wfs_count;
19642 +static atomic_unchecked_t ap_wfs_count;
19643
19644 static int tboot_wait_for_aps(int num_aps)
19645 {
19646 @@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19647 {
19648 switch (action) {
19649 case CPU_DYING:
19650 - atomic_inc(&ap_wfs_count);
19651 + atomic_inc_unchecked(&ap_wfs_count);
19652 if (num_online_cpus() == 1)
19653 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19654 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19655 return NOTIFY_BAD;
19656 break;
19657 }
19658 @@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
19659
19660 tboot_create_trampoline();
19661
19662 - atomic_set(&ap_wfs_count, 0);
19663 + atomic_set_unchecked(&ap_wfs_count, 0);
19664 register_hotcpu_notifier(&tboot_cpu_notifier);
19665
19666 acpi_os_set_prepare_sleep(&tboot_sleep);
19667 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19668 index c6eba2b..3303326 100644
19669 --- a/arch/x86/kernel/time.c
19670 +++ b/arch/x86/kernel/time.c
19671 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19672 {
19673 unsigned long pc = instruction_pointer(regs);
19674
19675 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19676 + if (!user_mode(regs) && in_lock_functions(pc)) {
19677 #ifdef CONFIG_FRAME_POINTER
19678 - return *(unsigned long *)(regs->bp + sizeof(long));
19679 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19680 #else
19681 unsigned long *sp =
19682 (unsigned long *)kernel_stack_pointer(regs);
19683 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19684 * or above a saved flags. Eflags has bits 22-31 zero,
19685 * kernel addresses don't.
19686 */
19687 +
19688 +#ifdef CONFIG_PAX_KERNEXEC
19689 + return ktla_ktva(sp[0]);
19690 +#else
19691 if (sp[0] >> 22)
19692 return sp[0];
19693 if (sp[1] >> 22)
19694 return sp[1];
19695 #endif
19696 +
19697 +#endif
19698 }
19699 return pc;
19700 }
19701 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19702 index 9d9d2f9..ed344e4 100644
19703 --- a/arch/x86/kernel/tls.c
19704 +++ b/arch/x86/kernel/tls.c
19705 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19706 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19707 return -EINVAL;
19708
19709 +#ifdef CONFIG_PAX_SEGMEXEC
19710 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19711 + return -EINVAL;
19712 +#endif
19713 +
19714 set_tls_desc(p, idx, &info, 1);
19715
19716 return 0;
19717 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19718 index 451c0a7..e57f551 100644
19719 --- a/arch/x86/kernel/trampoline_32.S
19720 +++ b/arch/x86/kernel/trampoline_32.S
19721 @@ -32,6 +32,12 @@
19722 #include <asm/segment.h>
19723 #include <asm/page_types.h>
19724
19725 +#ifdef CONFIG_PAX_KERNEXEC
19726 +#define ta(X) (X)
19727 +#else
19728 +#define ta(X) ((X) - __PAGE_OFFSET)
19729 +#endif
19730 +
19731 #ifdef CONFIG_SMP
19732
19733 .section ".x86_trampoline","a"
19734 @@ -62,7 +68,7 @@ r_base = .
19735 inc %ax # protected mode (PE) bit
19736 lmsw %ax # into protected mode
19737 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19738 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19739 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19740
19741 # These need to be in the same 64K segment as the above;
19742 # hence we don't use the boot_gdt_descr defined in head.S
19743 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19744 index 09ff517..df19fbff 100644
19745 --- a/arch/x86/kernel/trampoline_64.S
19746 +++ b/arch/x86/kernel/trampoline_64.S
19747 @@ -90,7 +90,7 @@ startup_32:
19748 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19749 movl %eax, %ds
19750
19751 - movl $X86_CR4_PAE, %eax
19752 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19753 movl %eax, %cr4 # Enable PAE mode
19754
19755 # Setup trampoline 4 level pagetables
19756 @@ -138,7 +138,7 @@ tidt:
19757 # so the kernel can live anywhere
19758 .balign 4
19759 tgdt:
19760 - .short tgdt_end - tgdt # gdt limit
19761 + .short tgdt_end - tgdt - 1 # gdt limit
19762 .long tgdt - r_base
19763 .short 0
19764 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19765 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19766 index ff9281f1..30cb4ac 100644
19767 --- a/arch/x86/kernel/traps.c
19768 +++ b/arch/x86/kernel/traps.c
19769 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19770
19771 /* Do we ignore FPU interrupts ? */
19772 char ignore_fpu_irq;
19773 -
19774 -/*
19775 - * The IDT has to be page-aligned to simplify the Pentium
19776 - * F0 0F bug workaround.
19777 - */
19778 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19779 #endif
19780
19781 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19782 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19783 }
19784
19785 static void __kprobes
19786 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19787 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19788 long error_code, siginfo_t *info)
19789 {
19790 struct task_struct *tsk = current;
19791
19792 #ifdef CONFIG_X86_32
19793 - if (regs->flags & X86_VM_MASK) {
19794 + if (v8086_mode(regs)) {
19795 /*
19796 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19797 * On nmi (interrupt 2), do_trap should not be called.
19798 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19799 }
19800 #endif
19801
19802 - if (!user_mode(regs))
19803 + if (!user_mode_novm(regs))
19804 goto kernel_trap;
19805
19806 #ifdef CONFIG_X86_32
19807 @@ -148,7 +142,7 @@ trap_signal:
19808 printk_ratelimit()) {
19809 printk(KERN_INFO
19810 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19811 - tsk->comm, tsk->pid, str,
19812 + tsk->comm, task_pid_nr(tsk), str,
19813 regs->ip, regs->sp, error_code);
19814 print_vma_addr(" in ", regs->ip);
19815 printk("\n");
19816 @@ -165,8 +159,20 @@ kernel_trap:
19817 if (!fixup_exception(regs)) {
19818 tsk->thread.error_code = error_code;
19819 tsk->thread.trap_nr = trapnr;
19820 +
19821 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19822 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19823 + str = "PAX: suspicious stack segment fault";
19824 +#endif
19825 +
19826 die(str, regs, error_code);
19827 }
19828 +
19829 +#ifdef CONFIG_PAX_REFCOUNT
19830 + if (trapnr == 4)
19831 + pax_report_refcount_overflow(regs);
19832 +#endif
19833 +
19834 return;
19835
19836 #ifdef CONFIG_X86_32
19837 @@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19838 conditional_sti(regs);
19839
19840 #ifdef CONFIG_X86_32
19841 - if (regs->flags & X86_VM_MASK)
19842 + if (v8086_mode(regs))
19843 goto gp_in_vm86;
19844 #endif
19845
19846 tsk = current;
19847 - if (!user_mode(regs))
19848 + if (!user_mode_novm(regs))
19849 goto gp_in_kernel;
19850
19851 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19852 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19853 + struct mm_struct *mm = tsk->mm;
19854 + unsigned long limit;
19855 +
19856 + down_write(&mm->mmap_sem);
19857 + limit = mm->context.user_cs_limit;
19858 + if (limit < TASK_SIZE) {
19859 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19860 + up_write(&mm->mmap_sem);
19861 + return;
19862 + }
19863 + up_write(&mm->mmap_sem);
19864 + }
19865 +#endif
19866 +
19867 tsk->thread.error_code = error_code;
19868 tsk->thread.trap_nr = X86_TRAP_GP;
19869
19870 @@ -299,6 +321,13 @@ gp_in_kernel:
19871 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19872 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19873 return;
19874 +
19875 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19876 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19877 + die("PAX: suspicious general protection fault", regs, error_code);
19878 + else
19879 +#endif
19880 +
19881 die("general protection fault", regs, error_code);
19882 }
19883
19884 @@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19885 /* It's safe to allow irq's after DR6 has been saved */
19886 preempt_conditional_sti(regs);
19887
19888 - if (regs->flags & X86_VM_MASK) {
19889 + if (v8086_mode(regs)) {
19890 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19891 X86_TRAP_DB);
19892 preempt_conditional_cli(regs);
19893 @@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19894 * We already checked v86 mode above, so we can check for kernel mode
19895 * by just checking the CPL of CS.
19896 */
19897 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19898 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19899 tsk->thread.debugreg6 &= ~DR_STEP;
19900 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19901 regs->flags &= ~X86_EFLAGS_TF;
19902 @@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19903 return;
19904 conditional_sti(regs);
19905
19906 - if (!user_mode_vm(regs))
19907 + if (!user_mode(regs))
19908 {
19909 if (!fixup_exception(regs)) {
19910 task->thread.error_code = error_code;
19911 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19912 index b9242ba..50c5edd 100644
19913 --- a/arch/x86/kernel/verify_cpu.S
19914 +++ b/arch/x86/kernel/verify_cpu.S
19915 @@ -20,6 +20,7 @@
19916 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19917 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19918 * arch/x86/kernel/head_32.S: processor startup
19919 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19920 *
19921 * verify_cpu, returns the status of longmode and SSE in register %eax.
19922 * 0: Success 1: Failure
19923 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19924 index 255f58a..5e91150 100644
19925 --- a/arch/x86/kernel/vm86_32.c
19926 +++ b/arch/x86/kernel/vm86_32.c
19927 @@ -41,6 +41,7 @@
19928 #include <linux/ptrace.h>
19929 #include <linux/audit.h>
19930 #include <linux/stddef.h>
19931 +#include <linux/grsecurity.h>
19932
19933 #include <asm/uaccess.h>
19934 #include <asm/io.h>
19935 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19936 do_exit(SIGSEGV);
19937 }
19938
19939 - tss = &per_cpu(init_tss, get_cpu());
19940 + tss = init_tss + get_cpu();
19941 current->thread.sp0 = current->thread.saved_sp0;
19942 current->thread.sysenter_cs = __KERNEL_CS;
19943 load_sp0(tss, &current->thread);
19944 @@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19945 struct task_struct *tsk;
19946 int tmp, ret = -EPERM;
19947
19948 +#ifdef CONFIG_GRKERNSEC_VM86
19949 + if (!capable(CAP_SYS_RAWIO)) {
19950 + gr_handle_vm86();
19951 + goto out;
19952 + }
19953 +#endif
19954 +
19955 tsk = current;
19956 if (tsk->thread.saved_sp0)
19957 goto out;
19958 @@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19959 int tmp, ret;
19960 struct vm86plus_struct __user *v86;
19961
19962 +#ifdef CONFIG_GRKERNSEC_VM86
19963 + if (!capable(CAP_SYS_RAWIO)) {
19964 + gr_handle_vm86();
19965 + ret = -EPERM;
19966 + goto out;
19967 + }
19968 +#endif
19969 +
19970 tsk = current;
19971 switch (cmd) {
19972 case VM86_REQUEST_IRQ:
19973 @@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19974 tsk->thread.saved_fs = info->regs32->fs;
19975 tsk->thread.saved_gs = get_user_gs(info->regs32);
19976
19977 - tss = &per_cpu(init_tss, get_cpu());
19978 + tss = init_tss + get_cpu();
19979 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19980 if (cpu_has_sep)
19981 tsk->thread.sysenter_cs = 0;
19982 @@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19983 goto cannot_handle;
19984 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19985 goto cannot_handle;
19986 - intr_ptr = (unsigned long __user *) (i << 2);
19987 + intr_ptr = (__force unsigned long __user *) (i << 2);
19988 if (get_user(segoffs, intr_ptr))
19989 goto cannot_handle;
19990 if ((segoffs >> 16) == BIOSSEG)
19991 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19992 index 0f703f1..9e15f64 100644
19993 --- a/arch/x86/kernel/vmlinux.lds.S
19994 +++ b/arch/x86/kernel/vmlinux.lds.S
19995 @@ -26,6 +26,13 @@
19996 #include <asm/page_types.h>
19997 #include <asm/cache.h>
19998 #include <asm/boot.h>
19999 +#include <asm/segment.h>
20000 +
20001 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20002 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20003 +#else
20004 +#define __KERNEL_TEXT_OFFSET 0
20005 +#endif
20006
20007 #undef i386 /* in case the preprocessor is a 32bit one */
20008
20009 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
20010
20011 PHDRS {
20012 text PT_LOAD FLAGS(5); /* R_E */
20013 +#ifdef CONFIG_X86_32
20014 + module PT_LOAD FLAGS(5); /* R_E */
20015 +#endif
20016 +#ifdef CONFIG_XEN
20017 + rodata PT_LOAD FLAGS(5); /* R_E */
20018 +#else
20019 + rodata PT_LOAD FLAGS(4); /* R__ */
20020 +#endif
20021 data PT_LOAD FLAGS(6); /* RW_ */
20022 -#ifdef CONFIG_X86_64
20023 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20024 #ifdef CONFIG_SMP
20025 percpu PT_LOAD FLAGS(6); /* RW_ */
20026 #endif
20027 + text.init PT_LOAD FLAGS(5); /* R_E */
20028 + text.exit PT_LOAD FLAGS(5); /* R_E */
20029 init PT_LOAD FLAGS(7); /* RWE */
20030 -#endif
20031 note PT_NOTE FLAGS(0); /* ___ */
20032 }
20033
20034 SECTIONS
20035 {
20036 #ifdef CONFIG_X86_32
20037 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20038 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20039 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20040 #else
20041 - . = __START_KERNEL;
20042 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20043 + . = __START_KERNEL;
20044 #endif
20045
20046 /* Text and read-only data */
20047 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20048 - _text = .;
20049 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20050 /* bootstrapping code */
20051 +#ifdef CONFIG_X86_32
20052 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20053 +#else
20054 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20055 +#endif
20056 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20057 + _text = .;
20058 HEAD_TEXT
20059 #ifdef CONFIG_X86_32
20060 . = ALIGN(PAGE_SIZE);
20061 @@ -108,13 +128,47 @@ SECTIONS
20062 IRQENTRY_TEXT
20063 *(.fixup)
20064 *(.gnu.warning)
20065 - /* End of text section */
20066 - _etext = .;
20067 } :text = 0x9090
20068
20069 - NOTES :text :note
20070 + . += __KERNEL_TEXT_OFFSET;
20071
20072 - EXCEPTION_TABLE(16) :text = 0x9090
20073 +#ifdef CONFIG_X86_32
20074 + . = ALIGN(PAGE_SIZE);
20075 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20076 +
20077 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20078 + MODULES_EXEC_VADDR = .;
20079 + BYTE(0)
20080 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20081 + . = ALIGN(HPAGE_SIZE);
20082 + MODULES_EXEC_END = . - 1;
20083 +#endif
20084 +
20085 + } :module
20086 +#endif
20087 +
20088 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20089 + /* End of text section */
20090 + _etext = . - __KERNEL_TEXT_OFFSET;
20091 + }
20092 +
20093 +#ifdef CONFIG_X86_32
20094 + . = ALIGN(PAGE_SIZE);
20095 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20096 + *(.idt)
20097 + . = ALIGN(PAGE_SIZE);
20098 + *(.empty_zero_page)
20099 + *(.initial_pg_fixmap)
20100 + *(.initial_pg_pmd)
20101 + *(.initial_page_table)
20102 + *(.swapper_pg_dir)
20103 + } :rodata
20104 +#endif
20105 +
20106 + . = ALIGN(PAGE_SIZE);
20107 + NOTES :rodata :note
20108 +
20109 + EXCEPTION_TABLE(16) :rodata
20110
20111 #if defined(CONFIG_DEBUG_RODATA)
20112 /* .text should occupy whole number of pages */
20113 @@ -126,16 +180,20 @@ SECTIONS
20114
20115 /* Data */
20116 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20117 +
20118 +#ifdef CONFIG_PAX_KERNEXEC
20119 + . = ALIGN(HPAGE_SIZE);
20120 +#else
20121 + . = ALIGN(PAGE_SIZE);
20122 +#endif
20123 +
20124 /* Start of data section */
20125 _sdata = .;
20126
20127 /* init_task */
20128 INIT_TASK_DATA(THREAD_SIZE)
20129
20130 -#ifdef CONFIG_X86_32
20131 - /* 32 bit has nosave before _edata */
20132 NOSAVE_DATA
20133 -#endif
20134
20135 PAGE_ALIGNED_DATA(PAGE_SIZE)
20136
20137 @@ -176,12 +234,19 @@ SECTIONS
20138 #endif /* CONFIG_X86_64 */
20139
20140 /* Init code and data - will be freed after init */
20141 - . = ALIGN(PAGE_SIZE);
20142 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20143 + BYTE(0)
20144 +
20145 +#ifdef CONFIG_PAX_KERNEXEC
20146 + . = ALIGN(HPAGE_SIZE);
20147 +#else
20148 + . = ALIGN(PAGE_SIZE);
20149 +#endif
20150 +
20151 __init_begin = .; /* paired with __init_end */
20152 - }
20153 + } :init.begin
20154
20155 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20156 +#ifdef CONFIG_SMP
20157 /*
20158 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20159 * output PHDR, so the next output section - .init.text - should
20160 @@ -190,12 +255,27 @@ SECTIONS
20161 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20162 #endif
20163
20164 - INIT_TEXT_SECTION(PAGE_SIZE)
20165 -#ifdef CONFIG_X86_64
20166 - :init
20167 -#endif
20168 + . = ALIGN(PAGE_SIZE);
20169 + init_begin = .;
20170 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20171 + VMLINUX_SYMBOL(_sinittext) = .;
20172 + INIT_TEXT
20173 + VMLINUX_SYMBOL(_einittext) = .;
20174 + . = ALIGN(PAGE_SIZE);
20175 + } :text.init
20176
20177 - INIT_DATA_SECTION(16)
20178 + /*
20179 + * .exit.text is discard at runtime, not link time, to deal with
20180 + * references from .altinstructions and .eh_frame
20181 + */
20182 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20183 + EXIT_TEXT
20184 + . = ALIGN(16);
20185 + } :text.exit
20186 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20187 +
20188 + . = ALIGN(PAGE_SIZE);
20189 + INIT_DATA_SECTION(16) :init
20190
20191 /*
20192 * Code and data for a variety of lowlevel trampolines, to be
20193 @@ -269,19 +349,12 @@ SECTIONS
20194 }
20195
20196 . = ALIGN(8);
20197 - /*
20198 - * .exit.text is discard at runtime, not link time, to deal with
20199 - * references from .altinstructions and .eh_frame
20200 - */
20201 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20202 - EXIT_TEXT
20203 - }
20204
20205 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20206 EXIT_DATA
20207 }
20208
20209 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20210 +#ifndef CONFIG_SMP
20211 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20212 #endif
20213
20214 @@ -300,16 +373,10 @@ SECTIONS
20215 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20216 __smp_locks = .;
20217 *(.smp_locks)
20218 - . = ALIGN(PAGE_SIZE);
20219 __smp_locks_end = .;
20220 + . = ALIGN(PAGE_SIZE);
20221 }
20222
20223 -#ifdef CONFIG_X86_64
20224 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20225 - NOSAVE_DATA
20226 - }
20227 -#endif
20228 -
20229 /* BSS */
20230 . = ALIGN(PAGE_SIZE);
20231 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20232 @@ -325,6 +392,7 @@ SECTIONS
20233 __brk_base = .;
20234 . += 64 * 1024; /* 64k alignment slop space */
20235 *(.brk_reservation) /* areas brk users have reserved */
20236 + . = ALIGN(HPAGE_SIZE);
20237 __brk_limit = .;
20238 }
20239
20240 @@ -351,13 +419,12 @@ SECTIONS
20241 * for the boot processor.
20242 */
20243 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20244 -INIT_PER_CPU(gdt_page);
20245 INIT_PER_CPU(irq_stack_union);
20246
20247 /*
20248 * Build-time check on the image size:
20249 */
20250 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20251 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20252 "kernel image bigger than KERNEL_IMAGE_SIZE");
20253
20254 #ifdef CONFIG_SMP
20255 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20256 index 7515cf0..331a1a0 100644
20257 --- a/arch/x86/kernel/vsyscall_64.c
20258 +++ b/arch/x86/kernel/vsyscall_64.c
20259 @@ -54,15 +54,13 @@
20260 DEFINE_VVAR(int, vgetcpu_mode);
20261 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20262
20263 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20264 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20265
20266 static int __init vsyscall_setup(char *str)
20267 {
20268 if (str) {
20269 if (!strcmp("emulate", str))
20270 vsyscall_mode = EMULATE;
20271 - else if (!strcmp("native", str))
20272 - vsyscall_mode = NATIVE;
20273 else if (!strcmp("none", str))
20274 vsyscall_mode = NONE;
20275 else
20276 @@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20277
20278 tsk = current;
20279 if (seccomp_mode(&tsk->seccomp))
20280 - do_exit(SIGKILL);
20281 + do_group_exit(SIGKILL);
20282
20283 /*
20284 * With a real vsyscall, page faults cause SIGSEGV. We want to
20285 @@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20286 return true;
20287
20288 sigsegv:
20289 - force_sig(SIGSEGV, current);
20290 - return true;
20291 + do_group_exit(SIGKILL);
20292 }
20293
20294 /*
20295 @@ -332,10 +329,7 @@ void __init map_vsyscall(void)
20296 extern char __vvar_page;
20297 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20298
20299 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20300 - vsyscall_mode == NATIVE
20301 - ? PAGE_KERNEL_VSYSCALL
20302 - : PAGE_KERNEL_VVAR);
20303 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20304 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20305 (unsigned long)VSYSCALL_START);
20306
20307 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20308 index 9796c2f..f686fbf 100644
20309 --- a/arch/x86/kernel/x8664_ksyms_64.c
20310 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20311 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20312 EXPORT_SYMBOL(copy_user_generic_string);
20313 EXPORT_SYMBOL(copy_user_generic_unrolled);
20314 EXPORT_SYMBOL(__copy_user_nocache);
20315 -EXPORT_SYMBOL(_copy_from_user);
20316 -EXPORT_SYMBOL(_copy_to_user);
20317
20318 EXPORT_SYMBOL(copy_page);
20319 EXPORT_SYMBOL(clear_page);
20320 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20321 index e62728e..5fc3a07 100644
20322 --- a/arch/x86/kernel/xsave.c
20323 +++ b/arch/x86/kernel/xsave.c
20324 @@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20325 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20326 return -EINVAL;
20327
20328 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20329 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20330 fx_sw_user->extended_size -
20331 FP_XSTATE_MAGIC2_SIZE));
20332 if (err)
20333 @@ -267,7 +267,7 @@ fx_only:
20334 * the other extended state.
20335 */
20336 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20337 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20338 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20339 }
20340
20341 /*
20342 @@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
20343 if (use_xsave())
20344 err = restore_user_xstate(buf);
20345 else
20346 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
20347 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20348 buf);
20349 if (unlikely(err)) {
20350 /*
20351 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20352 index 9fed5be..18fd595 100644
20353 --- a/arch/x86/kvm/cpuid.c
20354 +++ b/arch/x86/kvm/cpuid.c
20355 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20356 struct kvm_cpuid2 *cpuid,
20357 struct kvm_cpuid_entry2 __user *entries)
20358 {
20359 - int r;
20360 + int r, i;
20361
20362 r = -E2BIG;
20363 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20364 goto out;
20365 r = -EFAULT;
20366 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20367 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20368 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20369 goto out;
20370 + for (i = 0; i < cpuid->nent; ++i) {
20371 + struct kvm_cpuid_entry2 cpuid_entry;
20372 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20373 + goto out;
20374 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20375 + }
20376 vcpu->arch.cpuid_nent = cpuid->nent;
20377 kvm_apic_set_version(vcpu);
20378 kvm_x86_ops->cpuid_update(vcpu);
20379 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20380 struct kvm_cpuid2 *cpuid,
20381 struct kvm_cpuid_entry2 __user *entries)
20382 {
20383 - int r;
20384 + int r, i;
20385
20386 r = -E2BIG;
20387 if (cpuid->nent < vcpu->arch.cpuid_nent)
20388 goto out;
20389 r = -EFAULT;
20390 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20391 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20392 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20393 goto out;
20394 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20395 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20396 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20397 + goto out;
20398 + }
20399 return 0;
20400
20401 out:
20402 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20403 index 8375622..b7bca1a 100644
20404 --- a/arch/x86/kvm/emulate.c
20405 +++ b/arch/x86/kvm/emulate.c
20406 @@ -252,6 +252,7 @@ struct gprefix {
20407
20408 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20409 do { \
20410 + unsigned long _tmp; \
20411 __asm__ __volatile__ ( \
20412 _PRE_EFLAGS("0", "4", "2") \
20413 _op _suffix " %"_x"3,%1; " \
20414 @@ -266,8 +267,6 @@ struct gprefix {
20415 /* Raw emulation: instruction has two explicit operands. */
20416 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20417 do { \
20418 - unsigned long _tmp; \
20419 - \
20420 switch ((ctxt)->dst.bytes) { \
20421 case 2: \
20422 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20423 @@ -283,7 +282,6 @@ struct gprefix {
20424
20425 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20426 do { \
20427 - unsigned long _tmp; \
20428 switch ((ctxt)->dst.bytes) { \
20429 case 1: \
20430 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20431 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20432 index 8584322..17d5955 100644
20433 --- a/arch/x86/kvm/lapic.c
20434 +++ b/arch/x86/kvm/lapic.c
20435 @@ -54,7 +54,7 @@
20436 #define APIC_BUS_CYCLE_NS 1
20437
20438 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20439 -#define apic_debug(fmt, arg...)
20440 +#define apic_debug(fmt, arg...) do {} while (0)
20441
20442 #define APIC_LVT_NUM 6
20443 /* 14 is the version for Xeon and Pentium 8.4.8*/
20444 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20445 index df5a703..63748a7 100644
20446 --- a/arch/x86/kvm/paging_tmpl.h
20447 +++ b/arch/x86/kvm/paging_tmpl.h
20448 @@ -197,7 +197,7 @@ retry_walk:
20449 if (unlikely(kvm_is_error_hva(host_addr)))
20450 goto error;
20451
20452 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20453 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20454 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20455 goto error;
20456
20457 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20458 index e334389..6839087 100644
20459 --- a/arch/x86/kvm/svm.c
20460 +++ b/arch/x86/kvm/svm.c
20461 @@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20462 int cpu = raw_smp_processor_id();
20463
20464 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20465 +
20466 + pax_open_kernel();
20467 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20468 + pax_close_kernel();
20469 +
20470 load_TR_desc();
20471 }
20472
20473 @@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20474 #endif
20475 #endif
20476
20477 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20478 + __set_fs(current_thread_info()->addr_limit);
20479 +#endif
20480 +
20481 reload_tss(vcpu);
20482
20483 local_irq_disable();
20484 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20485 index 4ff0ab9..2ff68d3 100644
20486 --- a/arch/x86/kvm/vmx.c
20487 +++ b/arch/x86/kvm/vmx.c
20488 @@ -1303,7 +1303,11 @@ static void reload_tss(void)
20489 struct desc_struct *descs;
20490
20491 descs = (void *)gdt->address;
20492 +
20493 + pax_open_kernel();
20494 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20495 + pax_close_kernel();
20496 +
20497 load_TR_desc();
20498 }
20499
20500 @@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
20501 if (!cpu_has_vmx_flexpriority())
20502 flexpriority_enabled = 0;
20503
20504 - if (!cpu_has_vmx_tpr_shadow())
20505 - kvm_x86_ops->update_cr8_intercept = NULL;
20506 + if (!cpu_has_vmx_tpr_shadow()) {
20507 + pax_open_kernel();
20508 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20509 + pax_close_kernel();
20510 + }
20511
20512 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20513 kvm_disable_largepages();
20514 @@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(void)
20515 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20516
20517 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20518 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20519 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20520
20521 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20522 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20523 @@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20524 "jmp .Lkvm_vmx_return \n\t"
20525 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20526 ".Lkvm_vmx_return: "
20527 +
20528 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20529 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20530 + ".Lkvm_vmx_return2: "
20531 +#endif
20532 +
20533 /* Save guest registers, load host registers, keep flags */
20534 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20535 "pop %0 \n\t"
20536 @@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20537 #endif
20538 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20539 [wordsize]"i"(sizeof(ulong))
20540 +
20541 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20542 + ,[cs]"i"(__KERNEL_CS)
20543 +#endif
20544 +
20545 : "cc", "memory"
20546 , R"ax", R"bx", R"di", R"si"
20547 #ifdef CONFIG_X86_64
20548 @@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20549 }
20550 }
20551
20552 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20553 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20554 +
20555 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20556 + loadsegment(fs, __KERNEL_PERCPU);
20557 +#endif
20558 +
20559 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20560 + __set_fs(current_thread_info()->addr_limit);
20561 +#endif
20562 +
20563 vmx->loaded_vmcs->launched = 1;
20564
20565 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20566 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20567 index 185a2b8..866d2a6 100644
20568 --- a/arch/x86/kvm/x86.c
20569 +++ b/arch/x86/kvm/x86.c
20570 @@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20571 {
20572 struct kvm *kvm = vcpu->kvm;
20573 int lm = is_long_mode(vcpu);
20574 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20575 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20576 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20577 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20578 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20579 : kvm->arch.xen_hvm_config.blob_size_32;
20580 u32 page_num = data & ~PAGE_MASK;
20581 @@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20582 if (n < msr_list.nmsrs)
20583 goto out;
20584 r = -EFAULT;
20585 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20586 + goto out;
20587 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20588 num_msrs_to_save * sizeof(u32)))
20589 goto out;
20590 @@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20591 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20592 struct kvm_interrupt *irq)
20593 {
20594 - if (irq->irq < 0 || irq->irq >= 256)
20595 + if (irq->irq >= 256)
20596 return -EINVAL;
20597 if (irqchip_in_kernel(vcpu->kvm))
20598 return -ENXIO;
20599 @@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
20600 kvm_mmu_set_mmio_spte_mask(mask);
20601 }
20602
20603 -int kvm_arch_init(void *opaque)
20604 +int kvm_arch_init(const void *opaque)
20605 {
20606 int r;
20607 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20608 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20609 index 642d880..44e0f3f 100644
20610 --- a/arch/x86/lguest/boot.c
20611 +++ b/arch/x86/lguest/boot.c
20612 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20613 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20614 * Launcher to reboot us.
20615 */
20616 -static void lguest_restart(char *reason)
20617 +static __noreturn void lguest_restart(char *reason)
20618 {
20619 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20620 + BUG();
20621 }
20622
20623 /*G:050
20624 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20625 index 00933d5..3a64af9 100644
20626 --- a/arch/x86/lib/atomic64_386_32.S
20627 +++ b/arch/x86/lib/atomic64_386_32.S
20628 @@ -48,6 +48,10 @@ BEGIN(read)
20629 movl (v), %eax
20630 movl 4(v), %edx
20631 RET_ENDP
20632 +BEGIN(read_unchecked)
20633 + movl (v), %eax
20634 + movl 4(v), %edx
20635 +RET_ENDP
20636 #undef v
20637
20638 #define v %esi
20639 @@ -55,6 +59,10 @@ BEGIN(set)
20640 movl %ebx, (v)
20641 movl %ecx, 4(v)
20642 RET_ENDP
20643 +BEGIN(set_unchecked)
20644 + movl %ebx, (v)
20645 + movl %ecx, 4(v)
20646 +RET_ENDP
20647 #undef v
20648
20649 #define v %esi
20650 @@ -70,6 +78,20 @@ RET_ENDP
20651 BEGIN(add)
20652 addl %eax, (v)
20653 adcl %edx, 4(v)
20654 +
20655 +#ifdef CONFIG_PAX_REFCOUNT
20656 + jno 0f
20657 + subl %eax, (v)
20658 + sbbl %edx, 4(v)
20659 + int $4
20660 +0:
20661 + _ASM_EXTABLE(0b, 0b)
20662 +#endif
20663 +
20664 +RET_ENDP
20665 +BEGIN(add_unchecked)
20666 + addl %eax, (v)
20667 + adcl %edx, 4(v)
20668 RET_ENDP
20669 #undef v
20670
20671 @@ -77,6 +99,24 @@ RET_ENDP
20672 BEGIN(add_return)
20673 addl (v), %eax
20674 adcl 4(v), %edx
20675 +
20676 +#ifdef CONFIG_PAX_REFCOUNT
20677 + into
20678 +1234:
20679 + _ASM_EXTABLE(1234b, 2f)
20680 +#endif
20681 +
20682 + movl %eax, (v)
20683 + movl %edx, 4(v)
20684 +
20685 +#ifdef CONFIG_PAX_REFCOUNT
20686 +2:
20687 +#endif
20688 +
20689 +RET_ENDP
20690 +BEGIN(add_return_unchecked)
20691 + addl (v), %eax
20692 + adcl 4(v), %edx
20693 movl %eax, (v)
20694 movl %edx, 4(v)
20695 RET_ENDP
20696 @@ -86,6 +126,20 @@ RET_ENDP
20697 BEGIN(sub)
20698 subl %eax, (v)
20699 sbbl %edx, 4(v)
20700 +
20701 +#ifdef CONFIG_PAX_REFCOUNT
20702 + jno 0f
20703 + addl %eax, (v)
20704 + adcl %edx, 4(v)
20705 + int $4
20706 +0:
20707 + _ASM_EXTABLE(0b, 0b)
20708 +#endif
20709 +
20710 +RET_ENDP
20711 +BEGIN(sub_unchecked)
20712 + subl %eax, (v)
20713 + sbbl %edx, 4(v)
20714 RET_ENDP
20715 #undef v
20716
20717 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20718 sbbl $0, %edx
20719 addl (v), %eax
20720 adcl 4(v), %edx
20721 +
20722 +#ifdef CONFIG_PAX_REFCOUNT
20723 + into
20724 +1234:
20725 + _ASM_EXTABLE(1234b, 2f)
20726 +#endif
20727 +
20728 + movl %eax, (v)
20729 + movl %edx, 4(v)
20730 +
20731 +#ifdef CONFIG_PAX_REFCOUNT
20732 +2:
20733 +#endif
20734 +
20735 +RET_ENDP
20736 +BEGIN(sub_return_unchecked)
20737 + negl %edx
20738 + negl %eax
20739 + sbbl $0, %edx
20740 + addl (v), %eax
20741 + adcl 4(v), %edx
20742 movl %eax, (v)
20743 movl %edx, 4(v)
20744 RET_ENDP
20745 @@ -105,6 +180,20 @@ RET_ENDP
20746 BEGIN(inc)
20747 addl $1, (v)
20748 adcl $0, 4(v)
20749 +
20750 +#ifdef CONFIG_PAX_REFCOUNT
20751 + jno 0f
20752 + subl $1, (v)
20753 + sbbl $0, 4(v)
20754 + int $4
20755 +0:
20756 + _ASM_EXTABLE(0b, 0b)
20757 +#endif
20758 +
20759 +RET_ENDP
20760 +BEGIN(inc_unchecked)
20761 + addl $1, (v)
20762 + adcl $0, 4(v)
20763 RET_ENDP
20764 #undef v
20765
20766 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20767 movl 4(v), %edx
20768 addl $1, %eax
20769 adcl $0, %edx
20770 +
20771 +#ifdef CONFIG_PAX_REFCOUNT
20772 + into
20773 +1234:
20774 + _ASM_EXTABLE(1234b, 2f)
20775 +#endif
20776 +
20777 + movl %eax, (v)
20778 + movl %edx, 4(v)
20779 +
20780 +#ifdef CONFIG_PAX_REFCOUNT
20781 +2:
20782 +#endif
20783 +
20784 +RET_ENDP
20785 +BEGIN(inc_return_unchecked)
20786 + movl (v), %eax
20787 + movl 4(v), %edx
20788 + addl $1, %eax
20789 + adcl $0, %edx
20790 movl %eax, (v)
20791 movl %edx, 4(v)
20792 RET_ENDP
20793 @@ -123,6 +232,20 @@ RET_ENDP
20794 BEGIN(dec)
20795 subl $1, (v)
20796 sbbl $0, 4(v)
20797 +
20798 +#ifdef CONFIG_PAX_REFCOUNT
20799 + jno 0f
20800 + addl $1, (v)
20801 + adcl $0, 4(v)
20802 + int $4
20803 +0:
20804 + _ASM_EXTABLE(0b, 0b)
20805 +#endif
20806 +
20807 +RET_ENDP
20808 +BEGIN(dec_unchecked)
20809 + subl $1, (v)
20810 + sbbl $0, 4(v)
20811 RET_ENDP
20812 #undef v
20813
20814 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20815 movl 4(v), %edx
20816 subl $1, %eax
20817 sbbl $0, %edx
20818 +
20819 +#ifdef CONFIG_PAX_REFCOUNT
20820 + into
20821 +1234:
20822 + _ASM_EXTABLE(1234b, 2f)
20823 +#endif
20824 +
20825 + movl %eax, (v)
20826 + movl %edx, 4(v)
20827 +
20828 +#ifdef CONFIG_PAX_REFCOUNT
20829 +2:
20830 +#endif
20831 +
20832 +RET_ENDP
20833 +BEGIN(dec_return_unchecked)
20834 + movl (v), %eax
20835 + movl 4(v), %edx
20836 + subl $1, %eax
20837 + sbbl $0, %edx
20838 movl %eax, (v)
20839 movl %edx, 4(v)
20840 RET_ENDP
20841 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20842 adcl %edx, %edi
20843 addl (v), %eax
20844 adcl 4(v), %edx
20845 +
20846 +#ifdef CONFIG_PAX_REFCOUNT
20847 + into
20848 +1234:
20849 + _ASM_EXTABLE(1234b, 2f)
20850 +#endif
20851 +
20852 cmpl %eax, %ecx
20853 je 3f
20854 1:
20855 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20856 1:
20857 addl $1, %eax
20858 adcl $0, %edx
20859 +
20860 +#ifdef CONFIG_PAX_REFCOUNT
20861 + into
20862 +1234:
20863 + _ASM_EXTABLE(1234b, 2f)
20864 +#endif
20865 +
20866 movl %eax, (v)
20867 movl %edx, 4(v)
20868 movl $1, %eax
20869 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20870 movl 4(v), %edx
20871 subl $1, %eax
20872 sbbl $0, %edx
20873 +
20874 +#ifdef CONFIG_PAX_REFCOUNT
20875 + into
20876 +1234:
20877 + _ASM_EXTABLE(1234b, 1f)
20878 +#endif
20879 +
20880 js 1f
20881 movl %eax, (v)
20882 movl %edx, 4(v)
20883 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20884 index f5cc9eb..51fa319 100644
20885 --- a/arch/x86/lib/atomic64_cx8_32.S
20886 +++ b/arch/x86/lib/atomic64_cx8_32.S
20887 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20888 CFI_STARTPROC
20889
20890 read64 %ecx
20891 + pax_force_retaddr
20892 ret
20893 CFI_ENDPROC
20894 ENDPROC(atomic64_read_cx8)
20895
20896 +ENTRY(atomic64_read_unchecked_cx8)
20897 + CFI_STARTPROC
20898 +
20899 + read64 %ecx
20900 + pax_force_retaddr
20901 + ret
20902 + CFI_ENDPROC
20903 +ENDPROC(atomic64_read_unchecked_cx8)
20904 +
20905 ENTRY(atomic64_set_cx8)
20906 CFI_STARTPROC
20907
20908 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20909 cmpxchg8b (%esi)
20910 jne 1b
20911
20912 + pax_force_retaddr
20913 ret
20914 CFI_ENDPROC
20915 ENDPROC(atomic64_set_cx8)
20916
20917 +ENTRY(atomic64_set_unchecked_cx8)
20918 + CFI_STARTPROC
20919 +
20920 +1:
20921 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20922 + * are atomic on 586 and newer */
20923 + cmpxchg8b (%esi)
20924 + jne 1b
20925 +
20926 + pax_force_retaddr
20927 + ret
20928 + CFI_ENDPROC
20929 +ENDPROC(atomic64_set_unchecked_cx8)
20930 +
20931 ENTRY(atomic64_xchg_cx8)
20932 CFI_STARTPROC
20933
20934 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
20935 cmpxchg8b (%esi)
20936 jne 1b
20937
20938 + pax_force_retaddr
20939 ret
20940 CFI_ENDPROC
20941 ENDPROC(atomic64_xchg_cx8)
20942
20943 -.macro addsub_return func ins insc
20944 -ENTRY(atomic64_\func\()_return_cx8)
20945 +.macro addsub_return func ins insc unchecked=""
20946 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20947 CFI_STARTPROC
20948 SAVE ebp
20949 SAVE ebx
20950 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20951 movl %edx, %ecx
20952 \ins\()l %esi, %ebx
20953 \insc\()l %edi, %ecx
20954 +
20955 +.ifb \unchecked
20956 +#ifdef CONFIG_PAX_REFCOUNT
20957 + into
20958 +2:
20959 + _ASM_EXTABLE(2b, 3f)
20960 +#endif
20961 +.endif
20962 +
20963 LOCK_PREFIX
20964 cmpxchg8b (%ebp)
20965 jne 1b
20966 -
20967 -10:
20968 movl %ebx, %eax
20969 movl %ecx, %edx
20970 +
20971 +.ifb \unchecked
20972 +#ifdef CONFIG_PAX_REFCOUNT
20973 +3:
20974 +#endif
20975 +.endif
20976 +
20977 RESTORE edi
20978 RESTORE esi
20979 RESTORE ebx
20980 RESTORE ebp
20981 + pax_force_retaddr
20982 ret
20983 CFI_ENDPROC
20984 -ENDPROC(atomic64_\func\()_return_cx8)
20985 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20986 .endm
20987
20988 addsub_return add add adc
20989 addsub_return sub sub sbb
20990 +addsub_return add add adc _unchecked
20991 +addsub_return sub sub sbb _unchecked
20992
20993 -.macro incdec_return func ins insc
20994 -ENTRY(atomic64_\func\()_return_cx8)
20995 +.macro incdec_return func ins insc unchecked=""
20996 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20997 CFI_STARTPROC
20998 SAVE ebx
20999
21000 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21001 movl %edx, %ecx
21002 \ins\()l $1, %ebx
21003 \insc\()l $0, %ecx
21004 +
21005 +.ifb \unchecked
21006 +#ifdef CONFIG_PAX_REFCOUNT
21007 + into
21008 +2:
21009 + _ASM_EXTABLE(2b, 3f)
21010 +#endif
21011 +.endif
21012 +
21013 LOCK_PREFIX
21014 cmpxchg8b (%esi)
21015 jne 1b
21016
21017 -10:
21018 movl %ebx, %eax
21019 movl %ecx, %edx
21020 +
21021 +.ifb \unchecked
21022 +#ifdef CONFIG_PAX_REFCOUNT
21023 +3:
21024 +#endif
21025 +.endif
21026 +
21027 RESTORE ebx
21028 + pax_force_retaddr
21029 ret
21030 CFI_ENDPROC
21031 -ENDPROC(atomic64_\func\()_return_cx8)
21032 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21033 .endm
21034
21035 incdec_return inc add adc
21036 incdec_return dec sub sbb
21037 +incdec_return inc add adc _unchecked
21038 +incdec_return dec sub sbb _unchecked
21039
21040 ENTRY(atomic64_dec_if_positive_cx8)
21041 CFI_STARTPROC
21042 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21043 movl %edx, %ecx
21044 subl $1, %ebx
21045 sbb $0, %ecx
21046 +
21047 +#ifdef CONFIG_PAX_REFCOUNT
21048 + into
21049 +1234:
21050 + _ASM_EXTABLE(1234b, 2f)
21051 +#endif
21052 +
21053 js 2f
21054 LOCK_PREFIX
21055 cmpxchg8b (%esi)
21056 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21057 movl %ebx, %eax
21058 movl %ecx, %edx
21059 RESTORE ebx
21060 + pax_force_retaddr
21061 ret
21062 CFI_ENDPROC
21063 ENDPROC(atomic64_dec_if_positive_cx8)
21064 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21065 movl %edx, %ecx
21066 addl %ebp, %ebx
21067 adcl %edi, %ecx
21068 +
21069 +#ifdef CONFIG_PAX_REFCOUNT
21070 + into
21071 +1234:
21072 + _ASM_EXTABLE(1234b, 3f)
21073 +#endif
21074 +
21075 LOCK_PREFIX
21076 cmpxchg8b (%esi)
21077 jne 1b
21078 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21079 CFI_ADJUST_CFA_OFFSET -8
21080 RESTORE ebx
21081 RESTORE ebp
21082 + pax_force_retaddr
21083 ret
21084 4:
21085 cmpl %edx, 4(%esp)
21086 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21087 xorl %ecx, %ecx
21088 addl $1, %ebx
21089 adcl %edx, %ecx
21090 +
21091 +#ifdef CONFIG_PAX_REFCOUNT
21092 + into
21093 +1234:
21094 + _ASM_EXTABLE(1234b, 3f)
21095 +#endif
21096 +
21097 LOCK_PREFIX
21098 cmpxchg8b (%esi)
21099 jne 1b
21100 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21101 movl $1, %eax
21102 3:
21103 RESTORE ebx
21104 + pax_force_retaddr
21105 ret
21106 CFI_ENDPROC
21107 ENDPROC(atomic64_inc_not_zero_cx8)
21108 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21109 index 78d16a5..fbcf666 100644
21110 --- a/arch/x86/lib/checksum_32.S
21111 +++ b/arch/x86/lib/checksum_32.S
21112 @@ -28,7 +28,8 @@
21113 #include <linux/linkage.h>
21114 #include <asm/dwarf2.h>
21115 #include <asm/errno.h>
21116 -
21117 +#include <asm/segment.h>
21118 +
21119 /*
21120 * computes a partial checksum, e.g. for TCP/UDP fragments
21121 */
21122 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21123
21124 #define ARGBASE 16
21125 #define FP 12
21126 -
21127 -ENTRY(csum_partial_copy_generic)
21128 +
21129 +ENTRY(csum_partial_copy_generic_to_user)
21130 CFI_STARTPROC
21131 +
21132 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21133 + pushl_cfi %gs
21134 + popl_cfi %es
21135 + jmp csum_partial_copy_generic
21136 +#endif
21137 +
21138 +ENTRY(csum_partial_copy_generic_from_user)
21139 +
21140 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21141 + pushl_cfi %gs
21142 + popl_cfi %ds
21143 +#endif
21144 +
21145 +ENTRY(csum_partial_copy_generic)
21146 subl $4,%esp
21147 CFI_ADJUST_CFA_OFFSET 4
21148 pushl_cfi %edi
21149 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21150 jmp 4f
21151 SRC(1: movw (%esi), %bx )
21152 addl $2, %esi
21153 -DST( movw %bx, (%edi) )
21154 +DST( movw %bx, %es:(%edi) )
21155 addl $2, %edi
21156 addw %bx, %ax
21157 adcl $0, %eax
21158 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21159 SRC(1: movl (%esi), %ebx )
21160 SRC( movl 4(%esi), %edx )
21161 adcl %ebx, %eax
21162 -DST( movl %ebx, (%edi) )
21163 +DST( movl %ebx, %es:(%edi) )
21164 adcl %edx, %eax
21165 -DST( movl %edx, 4(%edi) )
21166 +DST( movl %edx, %es:4(%edi) )
21167
21168 SRC( movl 8(%esi), %ebx )
21169 SRC( movl 12(%esi), %edx )
21170 adcl %ebx, %eax
21171 -DST( movl %ebx, 8(%edi) )
21172 +DST( movl %ebx, %es:8(%edi) )
21173 adcl %edx, %eax
21174 -DST( movl %edx, 12(%edi) )
21175 +DST( movl %edx, %es:12(%edi) )
21176
21177 SRC( movl 16(%esi), %ebx )
21178 SRC( movl 20(%esi), %edx )
21179 adcl %ebx, %eax
21180 -DST( movl %ebx, 16(%edi) )
21181 +DST( movl %ebx, %es:16(%edi) )
21182 adcl %edx, %eax
21183 -DST( movl %edx, 20(%edi) )
21184 +DST( movl %edx, %es:20(%edi) )
21185
21186 SRC( movl 24(%esi), %ebx )
21187 SRC( movl 28(%esi), %edx )
21188 adcl %ebx, %eax
21189 -DST( movl %ebx, 24(%edi) )
21190 +DST( movl %ebx, %es:24(%edi) )
21191 adcl %edx, %eax
21192 -DST( movl %edx, 28(%edi) )
21193 +DST( movl %edx, %es:28(%edi) )
21194
21195 lea 32(%esi), %esi
21196 lea 32(%edi), %edi
21197 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21198 shrl $2, %edx # This clears CF
21199 SRC(3: movl (%esi), %ebx )
21200 adcl %ebx, %eax
21201 -DST( movl %ebx, (%edi) )
21202 +DST( movl %ebx, %es:(%edi) )
21203 lea 4(%esi), %esi
21204 lea 4(%edi), %edi
21205 dec %edx
21206 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21207 jb 5f
21208 SRC( movw (%esi), %cx )
21209 leal 2(%esi), %esi
21210 -DST( movw %cx, (%edi) )
21211 +DST( movw %cx, %es:(%edi) )
21212 leal 2(%edi), %edi
21213 je 6f
21214 shll $16,%ecx
21215 SRC(5: movb (%esi), %cl )
21216 -DST( movb %cl, (%edi) )
21217 +DST( movb %cl, %es:(%edi) )
21218 6: addl %ecx, %eax
21219 adcl $0, %eax
21220 7:
21221 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21222
21223 6001:
21224 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21225 - movl $-EFAULT, (%ebx)
21226 + movl $-EFAULT, %ss:(%ebx)
21227
21228 # zero the complete destination - computing the rest
21229 # is too much work
21230 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21231
21232 6002:
21233 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21234 - movl $-EFAULT,(%ebx)
21235 + movl $-EFAULT,%ss:(%ebx)
21236 jmp 5000b
21237
21238 .previous
21239
21240 + pushl_cfi %ss
21241 + popl_cfi %ds
21242 + pushl_cfi %ss
21243 + popl_cfi %es
21244 popl_cfi %ebx
21245 CFI_RESTORE ebx
21246 popl_cfi %esi
21247 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21248 popl_cfi %ecx # equivalent to addl $4,%esp
21249 ret
21250 CFI_ENDPROC
21251 -ENDPROC(csum_partial_copy_generic)
21252 +ENDPROC(csum_partial_copy_generic_to_user)
21253
21254 #else
21255
21256 /* Version for PentiumII/PPro */
21257
21258 #define ROUND1(x) \
21259 + nop; nop; nop; \
21260 SRC(movl x(%esi), %ebx ) ; \
21261 addl %ebx, %eax ; \
21262 - DST(movl %ebx, x(%edi) ) ;
21263 + DST(movl %ebx, %es:x(%edi)) ;
21264
21265 #define ROUND(x) \
21266 + nop; nop; nop; \
21267 SRC(movl x(%esi), %ebx ) ; \
21268 adcl %ebx, %eax ; \
21269 - DST(movl %ebx, x(%edi) ) ;
21270 + DST(movl %ebx, %es:x(%edi)) ;
21271
21272 #define ARGBASE 12
21273 -
21274 -ENTRY(csum_partial_copy_generic)
21275 +
21276 +ENTRY(csum_partial_copy_generic_to_user)
21277 CFI_STARTPROC
21278 +
21279 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21280 + pushl_cfi %gs
21281 + popl_cfi %es
21282 + jmp csum_partial_copy_generic
21283 +#endif
21284 +
21285 +ENTRY(csum_partial_copy_generic_from_user)
21286 +
21287 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21288 + pushl_cfi %gs
21289 + popl_cfi %ds
21290 +#endif
21291 +
21292 +ENTRY(csum_partial_copy_generic)
21293 pushl_cfi %ebx
21294 CFI_REL_OFFSET ebx, 0
21295 pushl_cfi %edi
21296 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21297 subl %ebx, %edi
21298 lea -1(%esi),%edx
21299 andl $-32,%edx
21300 - lea 3f(%ebx,%ebx), %ebx
21301 + lea 3f(%ebx,%ebx,2), %ebx
21302 testl %esi, %esi
21303 jmp *%ebx
21304 1: addl $64,%esi
21305 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21306 jb 5f
21307 SRC( movw (%esi), %dx )
21308 leal 2(%esi), %esi
21309 -DST( movw %dx, (%edi) )
21310 +DST( movw %dx, %es:(%edi) )
21311 leal 2(%edi), %edi
21312 je 6f
21313 shll $16,%edx
21314 5:
21315 SRC( movb (%esi), %dl )
21316 -DST( movb %dl, (%edi) )
21317 +DST( movb %dl, %es:(%edi) )
21318 6: addl %edx, %eax
21319 adcl $0, %eax
21320 7:
21321 .section .fixup, "ax"
21322 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21323 - movl $-EFAULT, (%ebx)
21324 + movl $-EFAULT, %ss:(%ebx)
21325 # zero the complete destination (computing the rest is too much work)
21326 movl ARGBASE+8(%esp),%edi # dst
21327 movl ARGBASE+12(%esp),%ecx # len
21328 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21329 rep; stosb
21330 jmp 7b
21331 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21332 - movl $-EFAULT, (%ebx)
21333 + movl $-EFAULT, %ss:(%ebx)
21334 jmp 7b
21335 .previous
21336
21337 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21338 + pushl_cfi %ss
21339 + popl_cfi %ds
21340 + pushl_cfi %ss
21341 + popl_cfi %es
21342 +#endif
21343 +
21344 popl_cfi %esi
21345 CFI_RESTORE esi
21346 popl_cfi %edi
21347 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21348 CFI_RESTORE ebx
21349 ret
21350 CFI_ENDPROC
21351 -ENDPROC(csum_partial_copy_generic)
21352 +ENDPROC(csum_partial_copy_generic_to_user)
21353
21354 #undef ROUND
21355 #undef ROUND1
21356 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21357 index f2145cf..cea889d 100644
21358 --- a/arch/x86/lib/clear_page_64.S
21359 +++ b/arch/x86/lib/clear_page_64.S
21360 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21361 movl $4096/8,%ecx
21362 xorl %eax,%eax
21363 rep stosq
21364 + pax_force_retaddr
21365 ret
21366 CFI_ENDPROC
21367 ENDPROC(clear_page_c)
21368 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21369 movl $4096,%ecx
21370 xorl %eax,%eax
21371 rep stosb
21372 + pax_force_retaddr
21373 ret
21374 CFI_ENDPROC
21375 ENDPROC(clear_page_c_e)
21376 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21377 leaq 64(%rdi),%rdi
21378 jnz .Lloop
21379 nop
21380 + pax_force_retaddr
21381 ret
21382 CFI_ENDPROC
21383 .Lclear_page_end:
21384 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21385
21386 #include <asm/cpufeature.h>
21387
21388 - .section .altinstr_replacement,"ax"
21389 + .section .altinstr_replacement,"a"
21390 1: .byte 0xeb /* jmp <disp8> */
21391 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21392 2: .byte 0xeb /* jmp <disp8> */
21393 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21394 index 1e572c5..2a162cd 100644
21395 --- a/arch/x86/lib/cmpxchg16b_emu.S
21396 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21397 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21398
21399 popf
21400 mov $1, %al
21401 + pax_force_retaddr
21402 ret
21403
21404 not_same:
21405 popf
21406 xor %al,%al
21407 + pax_force_retaddr
21408 ret
21409
21410 CFI_ENDPROC
21411 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21412 index 6b34d04..dccb07f 100644
21413 --- a/arch/x86/lib/copy_page_64.S
21414 +++ b/arch/x86/lib/copy_page_64.S
21415 @@ -9,6 +9,7 @@ copy_page_c:
21416 CFI_STARTPROC
21417 movl $4096/8,%ecx
21418 rep movsq
21419 + pax_force_retaddr
21420 ret
21421 CFI_ENDPROC
21422 ENDPROC(copy_page_c)
21423 @@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21424
21425 ENTRY(copy_page)
21426 CFI_STARTPROC
21427 - subq $2*8,%rsp
21428 - CFI_ADJUST_CFA_OFFSET 2*8
21429 + subq $3*8,%rsp
21430 + CFI_ADJUST_CFA_OFFSET 3*8
21431 movq %rbx,(%rsp)
21432 CFI_REL_OFFSET rbx, 0
21433 movq %r12,1*8(%rsp)
21434 CFI_REL_OFFSET r12, 1*8
21435 + movq %r13,2*8(%rsp)
21436 + CFI_REL_OFFSET r13, 2*8
21437
21438 movl $(4096/64)-5,%ecx
21439 .p2align 4
21440 @@ -37,7 +40,7 @@ ENTRY(copy_page)
21441 movq 16 (%rsi), %rdx
21442 movq 24 (%rsi), %r8
21443 movq 32 (%rsi), %r9
21444 - movq 40 (%rsi), %r10
21445 + movq 40 (%rsi), %r13
21446 movq 48 (%rsi), %r11
21447 movq 56 (%rsi), %r12
21448
21449 @@ -48,7 +51,7 @@ ENTRY(copy_page)
21450 movq %rdx, 16 (%rdi)
21451 movq %r8, 24 (%rdi)
21452 movq %r9, 32 (%rdi)
21453 - movq %r10, 40 (%rdi)
21454 + movq %r13, 40 (%rdi)
21455 movq %r11, 48 (%rdi)
21456 movq %r12, 56 (%rdi)
21457
21458 @@ -67,7 +70,7 @@ ENTRY(copy_page)
21459 movq 16 (%rsi), %rdx
21460 movq 24 (%rsi), %r8
21461 movq 32 (%rsi), %r9
21462 - movq 40 (%rsi), %r10
21463 + movq 40 (%rsi), %r13
21464 movq 48 (%rsi), %r11
21465 movq 56 (%rsi), %r12
21466
21467 @@ -76,7 +79,7 @@ ENTRY(copy_page)
21468 movq %rdx, 16 (%rdi)
21469 movq %r8, 24 (%rdi)
21470 movq %r9, 32 (%rdi)
21471 - movq %r10, 40 (%rdi)
21472 + movq %r13, 40 (%rdi)
21473 movq %r11, 48 (%rdi)
21474 movq %r12, 56 (%rdi)
21475
21476 @@ -89,8 +92,11 @@ ENTRY(copy_page)
21477 CFI_RESTORE rbx
21478 movq 1*8(%rsp),%r12
21479 CFI_RESTORE r12
21480 - addq $2*8,%rsp
21481 - CFI_ADJUST_CFA_OFFSET -2*8
21482 + movq 2*8(%rsp),%r13
21483 + CFI_RESTORE r13
21484 + addq $3*8,%rsp
21485 + CFI_ADJUST_CFA_OFFSET -3*8
21486 + pax_force_retaddr
21487 ret
21488 .Lcopy_page_end:
21489 CFI_ENDPROC
21490 @@ -101,7 +107,7 @@ ENDPROC(copy_page)
21491
21492 #include <asm/cpufeature.h>
21493
21494 - .section .altinstr_replacement,"ax"
21495 + .section .altinstr_replacement,"a"
21496 1: .byte 0xeb /* jmp <disp8> */
21497 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21498 2:
21499 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21500 index 0248402..821c786 100644
21501 --- a/arch/x86/lib/copy_user_64.S
21502 +++ b/arch/x86/lib/copy_user_64.S
21503 @@ -16,6 +16,7 @@
21504 #include <asm/thread_info.h>
21505 #include <asm/cpufeature.h>
21506 #include <asm/alternative-asm.h>
21507 +#include <asm/pgtable.h>
21508
21509 /*
21510 * By placing feature2 after feature1 in altinstructions section, we logically
21511 @@ -29,7 +30,7 @@
21512 .byte 0xe9 /* 32bit jump */
21513 .long \orig-1f /* by default jump to orig */
21514 1:
21515 - .section .altinstr_replacement,"ax"
21516 + .section .altinstr_replacement,"a"
21517 2: .byte 0xe9 /* near jump with 32bit immediate */
21518 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21519 3: .byte 0xe9 /* near jump with 32bit immediate */
21520 @@ -71,47 +72,20 @@
21521 #endif
21522 .endm
21523
21524 -/* Standard copy_to_user with segment limit checking */
21525 -ENTRY(_copy_to_user)
21526 - CFI_STARTPROC
21527 - GET_THREAD_INFO(%rax)
21528 - movq %rdi,%rcx
21529 - addq %rdx,%rcx
21530 - jc bad_to_user
21531 - cmpq TI_addr_limit(%rax),%rcx
21532 - ja bad_to_user
21533 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21534 - copy_user_generic_unrolled,copy_user_generic_string, \
21535 - copy_user_enhanced_fast_string
21536 - CFI_ENDPROC
21537 -ENDPROC(_copy_to_user)
21538 -
21539 -/* Standard copy_from_user with segment limit checking */
21540 -ENTRY(_copy_from_user)
21541 - CFI_STARTPROC
21542 - GET_THREAD_INFO(%rax)
21543 - movq %rsi,%rcx
21544 - addq %rdx,%rcx
21545 - jc bad_from_user
21546 - cmpq TI_addr_limit(%rax),%rcx
21547 - ja bad_from_user
21548 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21549 - copy_user_generic_unrolled,copy_user_generic_string, \
21550 - copy_user_enhanced_fast_string
21551 - CFI_ENDPROC
21552 -ENDPROC(_copy_from_user)
21553 -
21554 .section .fixup,"ax"
21555 /* must zero dest */
21556 ENTRY(bad_from_user)
21557 bad_from_user:
21558 CFI_STARTPROC
21559 + testl %edx,%edx
21560 + js bad_to_user
21561 movl %edx,%ecx
21562 xorl %eax,%eax
21563 rep
21564 stosb
21565 bad_to_user:
21566 movl %edx,%eax
21567 + pax_force_retaddr
21568 ret
21569 CFI_ENDPROC
21570 ENDPROC(bad_from_user)
21571 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21572 jz 17f
21573 1: movq (%rsi),%r8
21574 2: movq 1*8(%rsi),%r9
21575 -3: movq 2*8(%rsi),%r10
21576 +3: movq 2*8(%rsi),%rax
21577 4: movq 3*8(%rsi),%r11
21578 5: movq %r8,(%rdi)
21579 6: movq %r9,1*8(%rdi)
21580 -7: movq %r10,2*8(%rdi)
21581 +7: movq %rax,2*8(%rdi)
21582 8: movq %r11,3*8(%rdi)
21583 9: movq 4*8(%rsi),%r8
21584 10: movq 5*8(%rsi),%r9
21585 -11: movq 6*8(%rsi),%r10
21586 +11: movq 6*8(%rsi),%rax
21587 12: movq 7*8(%rsi),%r11
21588 13: movq %r8,4*8(%rdi)
21589 14: movq %r9,5*8(%rdi)
21590 -15: movq %r10,6*8(%rdi)
21591 +15: movq %rax,6*8(%rdi)
21592 16: movq %r11,7*8(%rdi)
21593 leaq 64(%rsi),%rsi
21594 leaq 64(%rdi),%rdi
21595 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21596 decl %ecx
21597 jnz 21b
21598 23: xor %eax,%eax
21599 + pax_force_retaddr
21600 ret
21601
21602 .section .fixup,"ax"
21603 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21604 3: rep
21605 movsb
21606 4: xorl %eax,%eax
21607 + pax_force_retaddr
21608 ret
21609
21610 .section .fixup,"ax"
21611 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21612 1: rep
21613 movsb
21614 2: xorl %eax,%eax
21615 + pax_force_retaddr
21616 ret
21617
21618 .section .fixup,"ax"
21619 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21620 index cb0c112..e3a6895 100644
21621 --- a/arch/x86/lib/copy_user_nocache_64.S
21622 +++ b/arch/x86/lib/copy_user_nocache_64.S
21623 @@ -8,12 +8,14 @@
21624
21625 #include <linux/linkage.h>
21626 #include <asm/dwarf2.h>
21627 +#include <asm/alternative-asm.h>
21628
21629 #define FIX_ALIGNMENT 1
21630
21631 #include <asm/current.h>
21632 #include <asm/asm-offsets.h>
21633 #include <asm/thread_info.h>
21634 +#include <asm/pgtable.h>
21635
21636 .macro ALIGN_DESTINATION
21637 #ifdef FIX_ALIGNMENT
21638 @@ -50,6 +52,15 @@
21639 */
21640 ENTRY(__copy_user_nocache)
21641 CFI_STARTPROC
21642 +
21643 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21644 + mov $PAX_USER_SHADOW_BASE,%rcx
21645 + cmp %rcx,%rsi
21646 + jae 1f
21647 + add %rcx,%rsi
21648 +1:
21649 +#endif
21650 +
21651 cmpl $8,%edx
21652 jb 20f /* less then 8 bytes, go to byte copy loop */
21653 ALIGN_DESTINATION
21654 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21655 jz 17f
21656 1: movq (%rsi),%r8
21657 2: movq 1*8(%rsi),%r9
21658 -3: movq 2*8(%rsi),%r10
21659 +3: movq 2*8(%rsi),%rax
21660 4: movq 3*8(%rsi),%r11
21661 5: movnti %r8,(%rdi)
21662 6: movnti %r9,1*8(%rdi)
21663 -7: movnti %r10,2*8(%rdi)
21664 +7: movnti %rax,2*8(%rdi)
21665 8: movnti %r11,3*8(%rdi)
21666 9: movq 4*8(%rsi),%r8
21667 10: movq 5*8(%rsi),%r9
21668 -11: movq 6*8(%rsi),%r10
21669 +11: movq 6*8(%rsi),%rax
21670 12: movq 7*8(%rsi),%r11
21671 13: movnti %r8,4*8(%rdi)
21672 14: movnti %r9,5*8(%rdi)
21673 -15: movnti %r10,6*8(%rdi)
21674 +15: movnti %rax,6*8(%rdi)
21675 16: movnti %r11,7*8(%rdi)
21676 leaq 64(%rsi),%rsi
21677 leaq 64(%rdi),%rdi
21678 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21679 jnz 21b
21680 23: xorl %eax,%eax
21681 sfence
21682 + pax_force_retaddr
21683 ret
21684
21685 .section .fixup,"ax"
21686 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21687 index fb903b7..c92b7f7 100644
21688 --- a/arch/x86/lib/csum-copy_64.S
21689 +++ b/arch/x86/lib/csum-copy_64.S
21690 @@ -8,6 +8,7 @@
21691 #include <linux/linkage.h>
21692 #include <asm/dwarf2.h>
21693 #include <asm/errno.h>
21694 +#include <asm/alternative-asm.h>
21695
21696 /*
21697 * Checksum copy with exception handling.
21698 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21699 CFI_RESTORE rbp
21700 addq $7*8, %rsp
21701 CFI_ADJUST_CFA_OFFSET -7*8
21702 + pax_force_retaddr 0, 1
21703 ret
21704 CFI_RESTORE_STATE
21705
21706 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21707 index 459b58a..9570bc7 100644
21708 --- a/arch/x86/lib/csum-wrappers_64.c
21709 +++ b/arch/x86/lib/csum-wrappers_64.c
21710 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21711 len -= 2;
21712 }
21713 }
21714 - isum = csum_partial_copy_generic((__force const void *)src,
21715 +
21716 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21717 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21718 + src += PAX_USER_SHADOW_BASE;
21719 +#endif
21720 +
21721 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21722 dst, len, isum, errp, NULL);
21723 if (unlikely(*errp))
21724 goto out_err;
21725 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21726 }
21727
21728 *errp = 0;
21729 - return csum_partial_copy_generic(src, (void __force *)dst,
21730 +
21731 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21732 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21733 + dst += PAX_USER_SHADOW_BASE;
21734 +#endif
21735 +
21736 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21737 len, isum, NULL, errp);
21738 }
21739 EXPORT_SYMBOL(csum_partial_copy_to_user);
21740 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21741 index 51f1504..ddac4c1 100644
21742 --- a/arch/x86/lib/getuser.S
21743 +++ b/arch/x86/lib/getuser.S
21744 @@ -33,15 +33,38 @@
21745 #include <asm/asm-offsets.h>
21746 #include <asm/thread_info.h>
21747 #include <asm/asm.h>
21748 +#include <asm/segment.h>
21749 +#include <asm/pgtable.h>
21750 +#include <asm/alternative-asm.h>
21751 +
21752 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21753 +#define __copyuser_seg gs;
21754 +#else
21755 +#define __copyuser_seg
21756 +#endif
21757
21758 .text
21759 ENTRY(__get_user_1)
21760 CFI_STARTPROC
21761 +
21762 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21763 GET_THREAD_INFO(%_ASM_DX)
21764 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21765 jae bad_get_user
21766 -1: movzb (%_ASM_AX),%edx
21767 +
21768 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21769 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21770 + cmp %_ASM_DX,%_ASM_AX
21771 + jae 1234f
21772 + add %_ASM_DX,%_ASM_AX
21773 +1234:
21774 +#endif
21775 +
21776 +#endif
21777 +
21778 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21779 xor %eax,%eax
21780 + pax_force_retaddr
21781 ret
21782 CFI_ENDPROC
21783 ENDPROC(__get_user_1)
21784 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21785 ENTRY(__get_user_2)
21786 CFI_STARTPROC
21787 add $1,%_ASM_AX
21788 +
21789 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21790 jc bad_get_user
21791 GET_THREAD_INFO(%_ASM_DX)
21792 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21793 jae bad_get_user
21794 -2: movzwl -1(%_ASM_AX),%edx
21795 +
21796 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21797 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21798 + cmp %_ASM_DX,%_ASM_AX
21799 + jae 1234f
21800 + add %_ASM_DX,%_ASM_AX
21801 +1234:
21802 +#endif
21803 +
21804 +#endif
21805 +
21806 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21807 xor %eax,%eax
21808 + pax_force_retaddr
21809 ret
21810 CFI_ENDPROC
21811 ENDPROC(__get_user_2)
21812 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21813 ENTRY(__get_user_4)
21814 CFI_STARTPROC
21815 add $3,%_ASM_AX
21816 +
21817 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21818 jc bad_get_user
21819 GET_THREAD_INFO(%_ASM_DX)
21820 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21821 jae bad_get_user
21822 -3: mov -3(%_ASM_AX),%edx
21823 +
21824 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21825 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21826 + cmp %_ASM_DX,%_ASM_AX
21827 + jae 1234f
21828 + add %_ASM_DX,%_ASM_AX
21829 +1234:
21830 +#endif
21831 +
21832 +#endif
21833 +
21834 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21835 xor %eax,%eax
21836 + pax_force_retaddr
21837 ret
21838 CFI_ENDPROC
21839 ENDPROC(__get_user_4)
21840 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21841 GET_THREAD_INFO(%_ASM_DX)
21842 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21843 jae bad_get_user
21844 +
21845 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21846 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21847 + cmp %_ASM_DX,%_ASM_AX
21848 + jae 1234f
21849 + add %_ASM_DX,%_ASM_AX
21850 +1234:
21851 +#endif
21852 +
21853 4: movq -7(%_ASM_AX),%_ASM_DX
21854 xor %eax,%eax
21855 + pax_force_retaddr
21856 ret
21857 CFI_ENDPROC
21858 ENDPROC(__get_user_8)
21859 @@ -91,6 +152,7 @@ bad_get_user:
21860 CFI_STARTPROC
21861 xor %edx,%edx
21862 mov $(-EFAULT),%_ASM_AX
21863 + pax_force_retaddr
21864 ret
21865 CFI_ENDPROC
21866 END(bad_get_user)
21867 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21868 index b1e6c4b..21ae8fc 100644
21869 --- a/arch/x86/lib/insn.c
21870 +++ b/arch/x86/lib/insn.c
21871 @@ -21,6 +21,11 @@
21872 #include <linux/string.h>
21873 #include <asm/inat.h>
21874 #include <asm/insn.h>
21875 +#ifdef __KERNEL__
21876 +#include <asm/pgtable_types.h>
21877 +#else
21878 +#define ktla_ktva(addr) addr
21879 +#endif
21880
21881 /* Verify next sizeof(t) bytes can be on the same instruction */
21882 #define validate_next(t, insn, n) \
21883 @@ -49,8 +54,8 @@
21884 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21885 {
21886 memset(insn, 0, sizeof(*insn));
21887 - insn->kaddr = kaddr;
21888 - insn->next_byte = kaddr;
21889 + insn->kaddr = ktla_ktva(kaddr);
21890 + insn->next_byte = ktla_ktva(kaddr);
21891 insn->x86_64 = x86_64 ? 1 : 0;
21892 insn->opnd_bytes = 4;
21893 if (x86_64)
21894 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21895 index 05a95e7..326f2fa 100644
21896 --- a/arch/x86/lib/iomap_copy_64.S
21897 +++ b/arch/x86/lib/iomap_copy_64.S
21898 @@ -17,6 +17,7 @@
21899
21900 #include <linux/linkage.h>
21901 #include <asm/dwarf2.h>
21902 +#include <asm/alternative-asm.h>
21903
21904 /*
21905 * override generic version in lib/iomap_copy.c
21906 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21907 CFI_STARTPROC
21908 movl %edx,%ecx
21909 rep movsd
21910 + pax_force_retaddr
21911 ret
21912 CFI_ENDPROC
21913 ENDPROC(__iowrite32_copy)
21914 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21915 index 1c273be..da9cc0e 100644
21916 --- a/arch/x86/lib/memcpy_64.S
21917 +++ b/arch/x86/lib/memcpy_64.S
21918 @@ -33,6 +33,7 @@
21919 rep movsq
21920 movl %edx, %ecx
21921 rep movsb
21922 + pax_force_retaddr
21923 ret
21924 .Lmemcpy_e:
21925 .previous
21926 @@ -49,6 +50,7 @@
21927 movq %rdi, %rax
21928 movq %rdx, %rcx
21929 rep movsb
21930 + pax_force_retaddr
21931 ret
21932 .Lmemcpy_e_e:
21933 .previous
21934 @@ -76,13 +78,13 @@ ENTRY(memcpy)
21935 */
21936 movq 0*8(%rsi), %r8
21937 movq 1*8(%rsi), %r9
21938 - movq 2*8(%rsi), %r10
21939 + movq 2*8(%rsi), %rcx
21940 movq 3*8(%rsi), %r11
21941 leaq 4*8(%rsi), %rsi
21942
21943 movq %r8, 0*8(%rdi)
21944 movq %r9, 1*8(%rdi)
21945 - movq %r10, 2*8(%rdi)
21946 + movq %rcx, 2*8(%rdi)
21947 movq %r11, 3*8(%rdi)
21948 leaq 4*8(%rdi), %rdi
21949 jae .Lcopy_forward_loop
21950 @@ -105,12 +107,12 @@ ENTRY(memcpy)
21951 subq $0x20, %rdx
21952 movq -1*8(%rsi), %r8
21953 movq -2*8(%rsi), %r9
21954 - movq -3*8(%rsi), %r10
21955 + movq -3*8(%rsi), %rcx
21956 movq -4*8(%rsi), %r11
21957 leaq -4*8(%rsi), %rsi
21958 movq %r8, -1*8(%rdi)
21959 movq %r9, -2*8(%rdi)
21960 - movq %r10, -3*8(%rdi)
21961 + movq %rcx, -3*8(%rdi)
21962 movq %r11, -4*8(%rdi)
21963 leaq -4*8(%rdi), %rdi
21964 jae .Lcopy_backward_loop
21965 @@ -130,12 +132,13 @@ ENTRY(memcpy)
21966 */
21967 movq 0*8(%rsi), %r8
21968 movq 1*8(%rsi), %r9
21969 - movq -2*8(%rsi, %rdx), %r10
21970 + movq -2*8(%rsi, %rdx), %rcx
21971 movq -1*8(%rsi, %rdx), %r11
21972 movq %r8, 0*8(%rdi)
21973 movq %r9, 1*8(%rdi)
21974 - movq %r10, -2*8(%rdi, %rdx)
21975 + movq %rcx, -2*8(%rdi, %rdx)
21976 movq %r11, -1*8(%rdi, %rdx)
21977 + pax_force_retaddr
21978 retq
21979 .p2align 4
21980 .Lless_16bytes:
21981 @@ -148,6 +151,7 @@ ENTRY(memcpy)
21982 movq -1*8(%rsi, %rdx), %r9
21983 movq %r8, 0*8(%rdi)
21984 movq %r9, -1*8(%rdi, %rdx)
21985 + pax_force_retaddr
21986 retq
21987 .p2align 4
21988 .Lless_8bytes:
21989 @@ -161,6 +165,7 @@ ENTRY(memcpy)
21990 movl -4(%rsi, %rdx), %r8d
21991 movl %ecx, (%rdi)
21992 movl %r8d, -4(%rdi, %rdx)
21993 + pax_force_retaddr
21994 retq
21995 .p2align 4
21996 .Lless_3bytes:
21997 @@ -179,6 +184,7 @@ ENTRY(memcpy)
21998 movb %cl, (%rdi)
21999
22000 .Lend:
22001 + pax_force_retaddr
22002 retq
22003 CFI_ENDPROC
22004 ENDPROC(memcpy)
22005 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22006 index ee16461..c39c199 100644
22007 --- a/arch/x86/lib/memmove_64.S
22008 +++ b/arch/x86/lib/memmove_64.S
22009 @@ -61,13 +61,13 @@ ENTRY(memmove)
22010 5:
22011 sub $0x20, %rdx
22012 movq 0*8(%rsi), %r11
22013 - movq 1*8(%rsi), %r10
22014 + movq 1*8(%rsi), %rcx
22015 movq 2*8(%rsi), %r9
22016 movq 3*8(%rsi), %r8
22017 leaq 4*8(%rsi), %rsi
22018
22019 movq %r11, 0*8(%rdi)
22020 - movq %r10, 1*8(%rdi)
22021 + movq %rcx, 1*8(%rdi)
22022 movq %r9, 2*8(%rdi)
22023 movq %r8, 3*8(%rdi)
22024 leaq 4*8(%rdi), %rdi
22025 @@ -81,10 +81,10 @@ ENTRY(memmove)
22026 4:
22027 movq %rdx, %rcx
22028 movq -8(%rsi, %rdx), %r11
22029 - lea -8(%rdi, %rdx), %r10
22030 + lea -8(%rdi, %rdx), %r9
22031 shrq $3, %rcx
22032 rep movsq
22033 - movq %r11, (%r10)
22034 + movq %r11, (%r9)
22035 jmp 13f
22036 .Lmemmove_end_forward:
22037
22038 @@ -95,14 +95,14 @@ ENTRY(memmove)
22039 7:
22040 movq %rdx, %rcx
22041 movq (%rsi), %r11
22042 - movq %rdi, %r10
22043 + movq %rdi, %r9
22044 leaq -8(%rsi, %rdx), %rsi
22045 leaq -8(%rdi, %rdx), %rdi
22046 shrq $3, %rcx
22047 std
22048 rep movsq
22049 cld
22050 - movq %r11, (%r10)
22051 + movq %r11, (%r9)
22052 jmp 13f
22053
22054 /*
22055 @@ -127,13 +127,13 @@ ENTRY(memmove)
22056 8:
22057 subq $0x20, %rdx
22058 movq -1*8(%rsi), %r11
22059 - movq -2*8(%rsi), %r10
22060 + movq -2*8(%rsi), %rcx
22061 movq -3*8(%rsi), %r9
22062 movq -4*8(%rsi), %r8
22063 leaq -4*8(%rsi), %rsi
22064
22065 movq %r11, -1*8(%rdi)
22066 - movq %r10, -2*8(%rdi)
22067 + movq %rcx, -2*8(%rdi)
22068 movq %r9, -3*8(%rdi)
22069 movq %r8, -4*8(%rdi)
22070 leaq -4*8(%rdi), %rdi
22071 @@ -151,11 +151,11 @@ ENTRY(memmove)
22072 * Move data from 16 bytes to 31 bytes.
22073 */
22074 movq 0*8(%rsi), %r11
22075 - movq 1*8(%rsi), %r10
22076 + movq 1*8(%rsi), %rcx
22077 movq -2*8(%rsi, %rdx), %r9
22078 movq -1*8(%rsi, %rdx), %r8
22079 movq %r11, 0*8(%rdi)
22080 - movq %r10, 1*8(%rdi)
22081 + movq %rcx, 1*8(%rdi)
22082 movq %r9, -2*8(%rdi, %rdx)
22083 movq %r8, -1*8(%rdi, %rdx)
22084 jmp 13f
22085 @@ -167,9 +167,9 @@ ENTRY(memmove)
22086 * Move data from 8 bytes to 15 bytes.
22087 */
22088 movq 0*8(%rsi), %r11
22089 - movq -1*8(%rsi, %rdx), %r10
22090 + movq -1*8(%rsi, %rdx), %r9
22091 movq %r11, 0*8(%rdi)
22092 - movq %r10, -1*8(%rdi, %rdx)
22093 + movq %r9, -1*8(%rdi, %rdx)
22094 jmp 13f
22095 10:
22096 cmpq $4, %rdx
22097 @@ -178,9 +178,9 @@ ENTRY(memmove)
22098 * Move data from 4 bytes to 7 bytes.
22099 */
22100 movl (%rsi), %r11d
22101 - movl -4(%rsi, %rdx), %r10d
22102 + movl -4(%rsi, %rdx), %r9d
22103 movl %r11d, (%rdi)
22104 - movl %r10d, -4(%rdi, %rdx)
22105 + movl %r9d, -4(%rdi, %rdx)
22106 jmp 13f
22107 11:
22108 cmp $2, %rdx
22109 @@ -189,9 +189,9 @@ ENTRY(memmove)
22110 * Move data from 2 bytes to 3 bytes.
22111 */
22112 movw (%rsi), %r11w
22113 - movw -2(%rsi, %rdx), %r10w
22114 + movw -2(%rsi, %rdx), %r9w
22115 movw %r11w, (%rdi)
22116 - movw %r10w, -2(%rdi, %rdx)
22117 + movw %r9w, -2(%rdi, %rdx)
22118 jmp 13f
22119 12:
22120 cmp $1, %rdx
22121 @@ -202,6 +202,7 @@ ENTRY(memmove)
22122 movb (%rsi), %r11b
22123 movb %r11b, (%rdi)
22124 13:
22125 + pax_force_retaddr
22126 retq
22127 CFI_ENDPROC
22128
22129 @@ -210,6 +211,7 @@ ENTRY(memmove)
22130 /* Forward moving data. */
22131 movq %rdx, %rcx
22132 rep movsb
22133 + pax_force_retaddr
22134 retq
22135 .Lmemmove_end_forward_efs:
22136 .previous
22137 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22138 index 2dcb380..963660a 100644
22139 --- a/arch/x86/lib/memset_64.S
22140 +++ b/arch/x86/lib/memset_64.S
22141 @@ -30,6 +30,7 @@
22142 movl %edx,%ecx
22143 rep stosb
22144 movq %r9,%rax
22145 + pax_force_retaddr
22146 ret
22147 .Lmemset_e:
22148 .previous
22149 @@ -52,6 +53,7 @@
22150 movq %rdx,%rcx
22151 rep stosb
22152 movq %r9,%rax
22153 + pax_force_retaddr
22154 ret
22155 .Lmemset_e_e:
22156 .previous
22157 @@ -59,7 +61,7 @@
22158 ENTRY(memset)
22159 ENTRY(__memset)
22160 CFI_STARTPROC
22161 - movq %rdi,%r10
22162 + movq %rdi,%r11
22163
22164 /* expand byte value */
22165 movzbl %sil,%ecx
22166 @@ -117,7 +119,8 @@ ENTRY(__memset)
22167 jnz .Lloop_1
22168
22169 .Lende:
22170 - movq %r10,%rax
22171 + movq %r11,%rax
22172 + pax_force_retaddr
22173 ret
22174
22175 CFI_RESTORE_STATE
22176 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22177 index c9f2d9b..e7fd2c0 100644
22178 --- a/arch/x86/lib/mmx_32.c
22179 +++ b/arch/x86/lib/mmx_32.c
22180 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22181 {
22182 void *p;
22183 int i;
22184 + unsigned long cr0;
22185
22186 if (unlikely(in_interrupt()))
22187 return __memcpy(to, from, len);
22188 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22189 kernel_fpu_begin();
22190
22191 __asm__ __volatile__ (
22192 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22193 - " prefetch 64(%0)\n"
22194 - " prefetch 128(%0)\n"
22195 - " prefetch 192(%0)\n"
22196 - " prefetch 256(%0)\n"
22197 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22198 + " prefetch 64(%1)\n"
22199 + " prefetch 128(%1)\n"
22200 + " prefetch 192(%1)\n"
22201 + " prefetch 256(%1)\n"
22202 "2: \n"
22203 ".section .fixup, \"ax\"\n"
22204 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22205 + "3: \n"
22206 +
22207 +#ifdef CONFIG_PAX_KERNEXEC
22208 + " movl %%cr0, %0\n"
22209 + " movl %0, %%eax\n"
22210 + " andl $0xFFFEFFFF, %%eax\n"
22211 + " movl %%eax, %%cr0\n"
22212 +#endif
22213 +
22214 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22215 +
22216 +#ifdef CONFIG_PAX_KERNEXEC
22217 + " movl %0, %%cr0\n"
22218 +#endif
22219 +
22220 " jmp 2b\n"
22221 ".previous\n"
22222 _ASM_EXTABLE(1b, 3b)
22223 - : : "r" (from));
22224 + : "=&r" (cr0) : "r" (from) : "ax");
22225
22226 for ( ; i > 5; i--) {
22227 __asm__ __volatile__ (
22228 - "1: prefetch 320(%0)\n"
22229 - "2: movq (%0), %%mm0\n"
22230 - " movq 8(%0), %%mm1\n"
22231 - " movq 16(%0), %%mm2\n"
22232 - " movq 24(%0), %%mm3\n"
22233 - " movq %%mm0, (%1)\n"
22234 - " movq %%mm1, 8(%1)\n"
22235 - " movq %%mm2, 16(%1)\n"
22236 - " movq %%mm3, 24(%1)\n"
22237 - " movq 32(%0), %%mm0\n"
22238 - " movq 40(%0), %%mm1\n"
22239 - " movq 48(%0), %%mm2\n"
22240 - " movq 56(%0), %%mm3\n"
22241 - " movq %%mm0, 32(%1)\n"
22242 - " movq %%mm1, 40(%1)\n"
22243 - " movq %%mm2, 48(%1)\n"
22244 - " movq %%mm3, 56(%1)\n"
22245 + "1: prefetch 320(%1)\n"
22246 + "2: movq (%1), %%mm0\n"
22247 + " movq 8(%1), %%mm1\n"
22248 + " movq 16(%1), %%mm2\n"
22249 + " movq 24(%1), %%mm3\n"
22250 + " movq %%mm0, (%2)\n"
22251 + " movq %%mm1, 8(%2)\n"
22252 + " movq %%mm2, 16(%2)\n"
22253 + " movq %%mm3, 24(%2)\n"
22254 + " movq 32(%1), %%mm0\n"
22255 + " movq 40(%1), %%mm1\n"
22256 + " movq 48(%1), %%mm2\n"
22257 + " movq 56(%1), %%mm3\n"
22258 + " movq %%mm0, 32(%2)\n"
22259 + " movq %%mm1, 40(%2)\n"
22260 + " movq %%mm2, 48(%2)\n"
22261 + " movq %%mm3, 56(%2)\n"
22262 ".section .fixup, \"ax\"\n"
22263 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22264 + "3:\n"
22265 +
22266 +#ifdef CONFIG_PAX_KERNEXEC
22267 + " movl %%cr0, %0\n"
22268 + " movl %0, %%eax\n"
22269 + " andl $0xFFFEFFFF, %%eax\n"
22270 + " movl %%eax, %%cr0\n"
22271 +#endif
22272 +
22273 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22274 +
22275 +#ifdef CONFIG_PAX_KERNEXEC
22276 + " movl %0, %%cr0\n"
22277 +#endif
22278 +
22279 " jmp 2b\n"
22280 ".previous\n"
22281 _ASM_EXTABLE(1b, 3b)
22282 - : : "r" (from), "r" (to) : "memory");
22283 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22284
22285 from += 64;
22286 to += 64;
22287 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22288 static void fast_copy_page(void *to, void *from)
22289 {
22290 int i;
22291 + unsigned long cr0;
22292
22293 kernel_fpu_begin();
22294
22295 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22296 * but that is for later. -AV
22297 */
22298 __asm__ __volatile__(
22299 - "1: prefetch (%0)\n"
22300 - " prefetch 64(%0)\n"
22301 - " prefetch 128(%0)\n"
22302 - " prefetch 192(%0)\n"
22303 - " prefetch 256(%0)\n"
22304 + "1: prefetch (%1)\n"
22305 + " prefetch 64(%1)\n"
22306 + " prefetch 128(%1)\n"
22307 + " prefetch 192(%1)\n"
22308 + " prefetch 256(%1)\n"
22309 "2: \n"
22310 ".section .fixup, \"ax\"\n"
22311 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22312 + "3: \n"
22313 +
22314 +#ifdef CONFIG_PAX_KERNEXEC
22315 + " movl %%cr0, %0\n"
22316 + " movl %0, %%eax\n"
22317 + " andl $0xFFFEFFFF, %%eax\n"
22318 + " movl %%eax, %%cr0\n"
22319 +#endif
22320 +
22321 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22322 +
22323 +#ifdef CONFIG_PAX_KERNEXEC
22324 + " movl %0, %%cr0\n"
22325 +#endif
22326 +
22327 " jmp 2b\n"
22328 ".previous\n"
22329 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22330 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22331
22332 for (i = 0; i < (4096-320)/64; i++) {
22333 __asm__ __volatile__ (
22334 - "1: prefetch 320(%0)\n"
22335 - "2: movq (%0), %%mm0\n"
22336 - " movntq %%mm0, (%1)\n"
22337 - " movq 8(%0), %%mm1\n"
22338 - " movntq %%mm1, 8(%1)\n"
22339 - " movq 16(%0), %%mm2\n"
22340 - " movntq %%mm2, 16(%1)\n"
22341 - " movq 24(%0), %%mm3\n"
22342 - " movntq %%mm3, 24(%1)\n"
22343 - " movq 32(%0), %%mm4\n"
22344 - " movntq %%mm4, 32(%1)\n"
22345 - " movq 40(%0), %%mm5\n"
22346 - " movntq %%mm5, 40(%1)\n"
22347 - " movq 48(%0), %%mm6\n"
22348 - " movntq %%mm6, 48(%1)\n"
22349 - " movq 56(%0), %%mm7\n"
22350 - " movntq %%mm7, 56(%1)\n"
22351 + "1: prefetch 320(%1)\n"
22352 + "2: movq (%1), %%mm0\n"
22353 + " movntq %%mm0, (%2)\n"
22354 + " movq 8(%1), %%mm1\n"
22355 + " movntq %%mm1, 8(%2)\n"
22356 + " movq 16(%1), %%mm2\n"
22357 + " movntq %%mm2, 16(%2)\n"
22358 + " movq 24(%1), %%mm3\n"
22359 + " movntq %%mm3, 24(%2)\n"
22360 + " movq 32(%1), %%mm4\n"
22361 + " movntq %%mm4, 32(%2)\n"
22362 + " movq 40(%1), %%mm5\n"
22363 + " movntq %%mm5, 40(%2)\n"
22364 + " movq 48(%1), %%mm6\n"
22365 + " movntq %%mm6, 48(%2)\n"
22366 + " movq 56(%1), %%mm7\n"
22367 + " movntq %%mm7, 56(%2)\n"
22368 ".section .fixup, \"ax\"\n"
22369 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22370 + "3:\n"
22371 +
22372 +#ifdef CONFIG_PAX_KERNEXEC
22373 + " movl %%cr0, %0\n"
22374 + " movl %0, %%eax\n"
22375 + " andl $0xFFFEFFFF, %%eax\n"
22376 + " movl %%eax, %%cr0\n"
22377 +#endif
22378 +
22379 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22380 +
22381 +#ifdef CONFIG_PAX_KERNEXEC
22382 + " movl %0, %%cr0\n"
22383 +#endif
22384 +
22385 " jmp 2b\n"
22386 ".previous\n"
22387 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22388 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22389
22390 from += 64;
22391 to += 64;
22392 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22393 static void fast_copy_page(void *to, void *from)
22394 {
22395 int i;
22396 + unsigned long cr0;
22397
22398 kernel_fpu_begin();
22399
22400 __asm__ __volatile__ (
22401 - "1: prefetch (%0)\n"
22402 - " prefetch 64(%0)\n"
22403 - " prefetch 128(%0)\n"
22404 - " prefetch 192(%0)\n"
22405 - " prefetch 256(%0)\n"
22406 + "1: prefetch (%1)\n"
22407 + " prefetch 64(%1)\n"
22408 + " prefetch 128(%1)\n"
22409 + " prefetch 192(%1)\n"
22410 + " prefetch 256(%1)\n"
22411 "2: \n"
22412 ".section .fixup, \"ax\"\n"
22413 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22414 + "3: \n"
22415 +
22416 +#ifdef CONFIG_PAX_KERNEXEC
22417 + " movl %%cr0, %0\n"
22418 + " movl %0, %%eax\n"
22419 + " andl $0xFFFEFFFF, %%eax\n"
22420 + " movl %%eax, %%cr0\n"
22421 +#endif
22422 +
22423 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22424 +
22425 +#ifdef CONFIG_PAX_KERNEXEC
22426 + " movl %0, %%cr0\n"
22427 +#endif
22428 +
22429 " jmp 2b\n"
22430 ".previous\n"
22431 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22432 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22433
22434 for (i = 0; i < 4096/64; i++) {
22435 __asm__ __volatile__ (
22436 - "1: prefetch 320(%0)\n"
22437 - "2: movq (%0), %%mm0\n"
22438 - " movq 8(%0), %%mm1\n"
22439 - " movq 16(%0), %%mm2\n"
22440 - " movq 24(%0), %%mm3\n"
22441 - " movq %%mm0, (%1)\n"
22442 - " movq %%mm1, 8(%1)\n"
22443 - " movq %%mm2, 16(%1)\n"
22444 - " movq %%mm3, 24(%1)\n"
22445 - " movq 32(%0), %%mm0\n"
22446 - " movq 40(%0), %%mm1\n"
22447 - " movq 48(%0), %%mm2\n"
22448 - " movq 56(%0), %%mm3\n"
22449 - " movq %%mm0, 32(%1)\n"
22450 - " movq %%mm1, 40(%1)\n"
22451 - " movq %%mm2, 48(%1)\n"
22452 - " movq %%mm3, 56(%1)\n"
22453 + "1: prefetch 320(%1)\n"
22454 + "2: movq (%1), %%mm0\n"
22455 + " movq 8(%1), %%mm1\n"
22456 + " movq 16(%1), %%mm2\n"
22457 + " movq 24(%1), %%mm3\n"
22458 + " movq %%mm0, (%2)\n"
22459 + " movq %%mm1, 8(%2)\n"
22460 + " movq %%mm2, 16(%2)\n"
22461 + " movq %%mm3, 24(%2)\n"
22462 + " movq 32(%1), %%mm0\n"
22463 + " movq 40(%1), %%mm1\n"
22464 + " movq 48(%1), %%mm2\n"
22465 + " movq 56(%1), %%mm3\n"
22466 + " movq %%mm0, 32(%2)\n"
22467 + " movq %%mm1, 40(%2)\n"
22468 + " movq %%mm2, 48(%2)\n"
22469 + " movq %%mm3, 56(%2)\n"
22470 ".section .fixup, \"ax\"\n"
22471 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22472 + "3:\n"
22473 +
22474 +#ifdef CONFIG_PAX_KERNEXEC
22475 + " movl %%cr0, %0\n"
22476 + " movl %0, %%eax\n"
22477 + " andl $0xFFFEFFFF, %%eax\n"
22478 + " movl %%eax, %%cr0\n"
22479 +#endif
22480 +
22481 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22482 +
22483 +#ifdef CONFIG_PAX_KERNEXEC
22484 + " movl %0, %%cr0\n"
22485 +#endif
22486 +
22487 " jmp 2b\n"
22488 ".previous\n"
22489 _ASM_EXTABLE(1b, 3b)
22490 - : : "r" (from), "r" (to) : "memory");
22491 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22492
22493 from += 64;
22494 to += 64;
22495 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22496 index 69fa106..adda88b 100644
22497 --- a/arch/x86/lib/msr-reg.S
22498 +++ b/arch/x86/lib/msr-reg.S
22499 @@ -3,6 +3,7 @@
22500 #include <asm/dwarf2.h>
22501 #include <asm/asm.h>
22502 #include <asm/msr.h>
22503 +#include <asm/alternative-asm.h>
22504
22505 #ifdef CONFIG_X86_64
22506 /*
22507 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22508 CFI_STARTPROC
22509 pushq_cfi %rbx
22510 pushq_cfi %rbp
22511 - movq %rdi, %r10 /* Save pointer */
22512 + movq %rdi, %r9 /* Save pointer */
22513 xorl %r11d, %r11d /* Return value */
22514 movl (%rdi), %eax
22515 movl 4(%rdi), %ecx
22516 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22517 movl 28(%rdi), %edi
22518 CFI_REMEMBER_STATE
22519 1: \op
22520 -2: movl %eax, (%r10)
22521 +2: movl %eax, (%r9)
22522 movl %r11d, %eax /* Return value */
22523 - movl %ecx, 4(%r10)
22524 - movl %edx, 8(%r10)
22525 - movl %ebx, 12(%r10)
22526 - movl %ebp, 20(%r10)
22527 - movl %esi, 24(%r10)
22528 - movl %edi, 28(%r10)
22529 + movl %ecx, 4(%r9)
22530 + movl %edx, 8(%r9)
22531 + movl %ebx, 12(%r9)
22532 + movl %ebp, 20(%r9)
22533 + movl %esi, 24(%r9)
22534 + movl %edi, 28(%r9)
22535 popq_cfi %rbp
22536 popq_cfi %rbx
22537 + pax_force_retaddr
22538 ret
22539 3:
22540 CFI_RESTORE_STATE
22541 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22542 index 36b0d15..d381858 100644
22543 --- a/arch/x86/lib/putuser.S
22544 +++ b/arch/x86/lib/putuser.S
22545 @@ -15,7 +15,9 @@
22546 #include <asm/thread_info.h>
22547 #include <asm/errno.h>
22548 #include <asm/asm.h>
22549 -
22550 +#include <asm/segment.h>
22551 +#include <asm/pgtable.h>
22552 +#include <asm/alternative-asm.h>
22553
22554 /*
22555 * __put_user_X
22556 @@ -29,52 +31,119 @@
22557 * as they get called from within inline assembly.
22558 */
22559
22560 -#define ENTER CFI_STARTPROC ; \
22561 - GET_THREAD_INFO(%_ASM_BX)
22562 -#define EXIT ret ; \
22563 +#define ENTER CFI_STARTPROC
22564 +#define EXIT pax_force_retaddr; ret ; \
22565 CFI_ENDPROC
22566
22567 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22568 +#define _DEST %_ASM_CX,%_ASM_BX
22569 +#else
22570 +#define _DEST %_ASM_CX
22571 +#endif
22572 +
22573 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22574 +#define __copyuser_seg gs;
22575 +#else
22576 +#define __copyuser_seg
22577 +#endif
22578 +
22579 .text
22580 ENTRY(__put_user_1)
22581 ENTER
22582 +
22583 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22584 + GET_THREAD_INFO(%_ASM_BX)
22585 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22586 jae bad_put_user
22587 -1: movb %al,(%_ASM_CX)
22588 +
22589 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22590 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22591 + cmp %_ASM_BX,%_ASM_CX
22592 + jb 1234f
22593 + xor %ebx,%ebx
22594 +1234:
22595 +#endif
22596 +
22597 +#endif
22598 +
22599 +1: __copyuser_seg movb %al,(_DEST)
22600 xor %eax,%eax
22601 EXIT
22602 ENDPROC(__put_user_1)
22603
22604 ENTRY(__put_user_2)
22605 ENTER
22606 +
22607 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22608 + GET_THREAD_INFO(%_ASM_BX)
22609 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22610 sub $1,%_ASM_BX
22611 cmp %_ASM_BX,%_ASM_CX
22612 jae bad_put_user
22613 -2: movw %ax,(%_ASM_CX)
22614 +
22615 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22616 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22617 + cmp %_ASM_BX,%_ASM_CX
22618 + jb 1234f
22619 + xor %ebx,%ebx
22620 +1234:
22621 +#endif
22622 +
22623 +#endif
22624 +
22625 +2: __copyuser_seg movw %ax,(_DEST)
22626 xor %eax,%eax
22627 EXIT
22628 ENDPROC(__put_user_2)
22629
22630 ENTRY(__put_user_4)
22631 ENTER
22632 +
22633 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22634 + GET_THREAD_INFO(%_ASM_BX)
22635 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22636 sub $3,%_ASM_BX
22637 cmp %_ASM_BX,%_ASM_CX
22638 jae bad_put_user
22639 -3: movl %eax,(%_ASM_CX)
22640 +
22641 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22642 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22643 + cmp %_ASM_BX,%_ASM_CX
22644 + jb 1234f
22645 + xor %ebx,%ebx
22646 +1234:
22647 +#endif
22648 +
22649 +#endif
22650 +
22651 +3: __copyuser_seg movl %eax,(_DEST)
22652 xor %eax,%eax
22653 EXIT
22654 ENDPROC(__put_user_4)
22655
22656 ENTRY(__put_user_8)
22657 ENTER
22658 +
22659 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22660 + GET_THREAD_INFO(%_ASM_BX)
22661 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22662 sub $7,%_ASM_BX
22663 cmp %_ASM_BX,%_ASM_CX
22664 jae bad_put_user
22665 -4: mov %_ASM_AX,(%_ASM_CX)
22666 +
22667 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22668 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22669 + cmp %_ASM_BX,%_ASM_CX
22670 + jb 1234f
22671 + xor %ebx,%ebx
22672 +1234:
22673 +#endif
22674 +
22675 +#endif
22676 +
22677 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22678 #ifdef CONFIG_X86_32
22679 -5: movl %edx,4(%_ASM_CX)
22680 +5: __copyuser_seg movl %edx,4(_DEST)
22681 #endif
22682 xor %eax,%eax
22683 EXIT
22684 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22685 index 1cad221..de671ee 100644
22686 --- a/arch/x86/lib/rwlock.S
22687 +++ b/arch/x86/lib/rwlock.S
22688 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22689 FRAME
22690 0: LOCK_PREFIX
22691 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22692 +
22693 +#ifdef CONFIG_PAX_REFCOUNT
22694 + jno 1234f
22695 + LOCK_PREFIX
22696 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22697 + int $4
22698 +1234:
22699 + _ASM_EXTABLE(1234b, 1234b)
22700 +#endif
22701 +
22702 1: rep; nop
22703 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22704 jne 1b
22705 LOCK_PREFIX
22706 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22707 +
22708 +#ifdef CONFIG_PAX_REFCOUNT
22709 + jno 1234f
22710 + LOCK_PREFIX
22711 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22712 + int $4
22713 +1234:
22714 + _ASM_EXTABLE(1234b, 1234b)
22715 +#endif
22716 +
22717 jnz 0b
22718 ENDFRAME
22719 + pax_force_retaddr
22720 ret
22721 CFI_ENDPROC
22722 END(__write_lock_failed)
22723 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22724 FRAME
22725 0: LOCK_PREFIX
22726 READ_LOCK_SIZE(inc) (%__lock_ptr)
22727 +
22728 +#ifdef CONFIG_PAX_REFCOUNT
22729 + jno 1234f
22730 + LOCK_PREFIX
22731 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22732 + int $4
22733 +1234:
22734 + _ASM_EXTABLE(1234b, 1234b)
22735 +#endif
22736 +
22737 1: rep; nop
22738 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22739 js 1b
22740 LOCK_PREFIX
22741 READ_LOCK_SIZE(dec) (%__lock_ptr)
22742 +
22743 +#ifdef CONFIG_PAX_REFCOUNT
22744 + jno 1234f
22745 + LOCK_PREFIX
22746 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22747 + int $4
22748 +1234:
22749 + _ASM_EXTABLE(1234b, 1234b)
22750 +#endif
22751 +
22752 js 0b
22753 ENDFRAME
22754 + pax_force_retaddr
22755 ret
22756 CFI_ENDPROC
22757 END(__read_lock_failed)
22758 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22759 index 5dff5f0..cadebf4 100644
22760 --- a/arch/x86/lib/rwsem.S
22761 +++ b/arch/x86/lib/rwsem.S
22762 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22763 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22764 CFI_RESTORE __ASM_REG(dx)
22765 restore_common_regs
22766 + pax_force_retaddr
22767 ret
22768 CFI_ENDPROC
22769 ENDPROC(call_rwsem_down_read_failed)
22770 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22771 movq %rax,%rdi
22772 call rwsem_down_write_failed
22773 restore_common_regs
22774 + pax_force_retaddr
22775 ret
22776 CFI_ENDPROC
22777 ENDPROC(call_rwsem_down_write_failed)
22778 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22779 movq %rax,%rdi
22780 call rwsem_wake
22781 restore_common_regs
22782 -1: ret
22783 +1: pax_force_retaddr
22784 + ret
22785 CFI_ENDPROC
22786 ENDPROC(call_rwsem_wake)
22787
22788 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22789 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22790 CFI_RESTORE __ASM_REG(dx)
22791 restore_common_regs
22792 + pax_force_retaddr
22793 ret
22794 CFI_ENDPROC
22795 ENDPROC(call_rwsem_downgrade_wake)
22796 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22797 index a63efd6..ccecad8 100644
22798 --- a/arch/x86/lib/thunk_64.S
22799 +++ b/arch/x86/lib/thunk_64.S
22800 @@ -8,6 +8,7 @@
22801 #include <linux/linkage.h>
22802 #include <asm/dwarf2.h>
22803 #include <asm/calling.h>
22804 +#include <asm/alternative-asm.h>
22805
22806 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22807 .macro THUNK name, func, put_ret_addr_in_rdi=0
22808 @@ -41,5 +42,6 @@
22809 SAVE_ARGS
22810 restore:
22811 RESTORE_ARGS
22812 + pax_force_retaddr
22813 ret
22814 CFI_ENDPROC
22815 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22816 index ef2a6a5..3b28862 100644
22817 --- a/arch/x86/lib/usercopy_32.c
22818 +++ b/arch/x86/lib/usercopy_32.c
22819 @@ -41,10 +41,12 @@ do { \
22820 int __d0; \
22821 might_fault(); \
22822 __asm__ __volatile__( \
22823 + __COPYUSER_SET_ES \
22824 "0: rep; stosl\n" \
22825 " movl %2,%0\n" \
22826 "1: rep; stosb\n" \
22827 "2:\n" \
22828 + __COPYUSER_RESTORE_ES \
22829 ".section .fixup,\"ax\"\n" \
22830 "3: lea 0(%2,%0,4),%0\n" \
22831 " jmp 2b\n" \
22832 @@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
22833 might_fault();
22834
22835 __asm__ __volatile__(
22836 + __COPYUSER_SET_ES
22837 " testl %0, %0\n"
22838 " jz 3f\n"
22839 " andl %0,%%ecx\n"
22840 @@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
22841 " subl %%ecx,%0\n"
22842 " addl %0,%%eax\n"
22843 "1:\n"
22844 + __COPYUSER_RESTORE_ES
22845 ".section .fixup,\"ax\"\n"
22846 "2: xorl %%eax,%%eax\n"
22847 " jmp 1b\n"
22848 @@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
22849
22850 #ifdef CONFIG_X86_INTEL_USERCOPY
22851 static unsigned long
22852 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22853 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22854 {
22855 int d0, d1;
22856 __asm__ __volatile__(
22857 @@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22858 " .align 2,0x90\n"
22859 "3: movl 0(%4), %%eax\n"
22860 "4: movl 4(%4), %%edx\n"
22861 - "5: movl %%eax, 0(%3)\n"
22862 - "6: movl %%edx, 4(%3)\n"
22863 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22864 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22865 "7: movl 8(%4), %%eax\n"
22866 "8: movl 12(%4),%%edx\n"
22867 - "9: movl %%eax, 8(%3)\n"
22868 - "10: movl %%edx, 12(%3)\n"
22869 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22870 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22871 "11: movl 16(%4), %%eax\n"
22872 "12: movl 20(%4), %%edx\n"
22873 - "13: movl %%eax, 16(%3)\n"
22874 - "14: movl %%edx, 20(%3)\n"
22875 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22876 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22877 "15: movl 24(%4), %%eax\n"
22878 "16: movl 28(%4), %%edx\n"
22879 - "17: movl %%eax, 24(%3)\n"
22880 - "18: movl %%edx, 28(%3)\n"
22881 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22882 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22883 "19: movl 32(%4), %%eax\n"
22884 "20: movl 36(%4), %%edx\n"
22885 - "21: movl %%eax, 32(%3)\n"
22886 - "22: movl %%edx, 36(%3)\n"
22887 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22888 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22889 "23: movl 40(%4), %%eax\n"
22890 "24: movl 44(%4), %%edx\n"
22891 - "25: movl %%eax, 40(%3)\n"
22892 - "26: movl %%edx, 44(%3)\n"
22893 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22894 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22895 "27: movl 48(%4), %%eax\n"
22896 "28: movl 52(%4), %%edx\n"
22897 - "29: movl %%eax, 48(%3)\n"
22898 - "30: movl %%edx, 52(%3)\n"
22899 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22900 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22901 "31: movl 56(%4), %%eax\n"
22902 "32: movl 60(%4), %%edx\n"
22903 - "33: movl %%eax, 56(%3)\n"
22904 - "34: movl %%edx, 60(%3)\n"
22905 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22906 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22907 " addl $-64, %0\n"
22908 " addl $64, %4\n"
22909 " addl $64, %3\n"
22910 @@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22911 " shrl $2, %0\n"
22912 " andl $3, %%eax\n"
22913 " cld\n"
22914 + __COPYUSER_SET_ES
22915 "99: rep; movsl\n"
22916 "36: movl %%eax, %0\n"
22917 "37: rep; movsb\n"
22918 "100:\n"
22919 + __COPYUSER_RESTORE_ES
22920 ".section .fixup,\"ax\"\n"
22921 "101: lea 0(%%eax,%0,4),%0\n"
22922 " jmp 100b\n"
22923 @@ -247,46 +253,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22924 }
22925
22926 static unsigned long
22927 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22928 +{
22929 + int d0, d1;
22930 + __asm__ __volatile__(
22931 + " .align 2,0x90\n"
22932 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22933 + " cmpl $67, %0\n"
22934 + " jbe 3f\n"
22935 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22936 + " .align 2,0x90\n"
22937 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22938 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22939 + "5: movl %%eax, 0(%3)\n"
22940 + "6: movl %%edx, 4(%3)\n"
22941 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22942 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22943 + "9: movl %%eax, 8(%3)\n"
22944 + "10: movl %%edx, 12(%3)\n"
22945 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22946 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22947 + "13: movl %%eax, 16(%3)\n"
22948 + "14: movl %%edx, 20(%3)\n"
22949 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22950 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22951 + "17: movl %%eax, 24(%3)\n"
22952 + "18: movl %%edx, 28(%3)\n"
22953 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22954 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22955 + "21: movl %%eax, 32(%3)\n"
22956 + "22: movl %%edx, 36(%3)\n"
22957 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22958 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22959 + "25: movl %%eax, 40(%3)\n"
22960 + "26: movl %%edx, 44(%3)\n"
22961 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22962 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22963 + "29: movl %%eax, 48(%3)\n"
22964 + "30: movl %%edx, 52(%3)\n"
22965 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22966 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22967 + "33: movl %%eax, 56(%3)\n"
22968 + "34: movl %%edx, 60(%3)\n"
22969 + " addl $-64, %0\n"
22970 + " addl $64, %4\n"
22971 + " addl $64, %3\n"
22972 + " cmpl $63, %0\n"
22973 + " ja 1b\n"
22974 + "35: movl %0, %%eax\n"
22975 + " shrl $2, %0\n"
22976 + " andl $3, %%eax\n"
22977 + " cld\n"
22978 + "99: rep; "__copyuser_seg" movsl\n"
22979 + "36: movl %%eax, %0\n"
22980 + "37: rep; "__copyuser_seg" movsb\n"
22981 + "100:\n"
22982 + ".section .fixup,\"ax\"\n"
22983 + "101: lea 0(%%eax,%0,4),%0\n"
22984 + " jmp 100b\n"
22985 + ".previous\n"
22986 + ".section __ex_table,\"a\"\n"
22987 + " .align 4\n"
22988 + " .long 1b,100b\n"
22989 + " .long 2b,100b\n"
22990 + " .long 3b,100b\n"
22991 + " .long 4b,100b\n"
22992 + " .long 5b,100b\n"
22993 + " .long 6b,100b\n"
22994 + " .long 7b,100b\n"
22995 + " .long 8b,100b\n"
22996 + " .long 9b,100b\n"
22997 + " .long 10b,100b\n"
22998 + " .long 11b,100b\n"
22999 + " .long 12b,100b\n"
23000 + " .long 13b,100b\n"
23001 + " .long 14b,100b\n"
23002 + " .long 15b,100b\n"
23003 + " .long 16b,100b\n"
23004 + " .long 17b,100b\n"
23005 + " .long 18b,100b\n"
23006 + " .long 19b,100b\n"
23007 + " .long 20b,100b\n"
23008 + " .long 21b,100b\n"
23009 + " .long 22b,100b\n"
23010 + " .long 23b,100b\n"
23011 + " .long 24b,100b\n"
23012 + " .long 25b,100b\n"
23013 + " .long 26b,100b\n"
23014 + " .long 27b,100b\n"
23015 + " .long 28b,100b\n"
23016 + " .long 29b,100b\n"
23017 + " .long 30b,100b\n"
23018 + " .long 31b,100b\n"
23019 + " .long 32b,100b\n"
23020 + " .long 33b,100b\n"
23021 + " .long 34b,100b\n"
23022 + " .long 35b,100b\n"
23023 + " .long 36b,100b\n"
23024 + " .long 37b,100b\n"
23025 + " .long 99b,101b\n"
23026 + ".previous"
23027 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23028 + : "1"(to), "2"(from), "0"(size)
23029 + : "eax", "edx", "memory");
23030 + return size;
23031 +}
23032 +
23033 +static unsigned long
23034 +__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23035 +static unsigned long
23036 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23037 {
23038 int d0, d1;
23039 __asm__ __volatile__(
23040 " .align 2,0x90\n"
23041 - "0: movl 32(%4), %%eax\n"
23042 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23043 " cmpl $67, %0\n"
23044 " jbe 2f\n"
23045 - "1: movl 64(%4), %%eax\n"
23046 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23047 " .align 2,0x90\n"
23048 - "2: movl 0(%4), %%eax\n"
23049 - "21: movl 4(%4), %%edx\n"
23050 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23051 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23052 " movl %%eax, 0(%3)\n"
23053 " movl %%edx, 4(%3)\n"
23054 - "3: movl 8(%4), %%eax\n"
23055 - "31: movl 12(%4),%%edx\n"
23056 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23057 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23058 " movl %%eax, 8(%3)\n"
23059 " movl %%edx, 12(%3)\n"
23060 - "4: movl 16(%4), %%eax\n"
23061 - "41: movl 20(%4), %%edx\n"
23062 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23063 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23064 " movl %%eax, 16(%3)\n"
23065 " movl %%edx, 20(%3)\n"
23066 - "10: movl 24(%4), %%eax\n"
23067 - "51: movl 28(%4), %%edx\n"
23068 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23069 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23070 " movl %%eax, 24(%3)\n"
23071 " movl %%edx, 28(%3)\n"
23072 - "11: movl 32(%4), %%eax\n"
23073 - "61: movl 36(%4), %%edx\n"
23074 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23075 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23076 " movl %%eax, 32(%3)\n"
23077 " movl %%edx, 36(%3)\n"
23078 - "12: movl 40(%4), %%eax\n"
23079 - "71: movl 44(%4), %%edx\n"
23080 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23081 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23082 " movl %%eax, 40(%3)\n"
23083 " movl %%edx, 44(%3)\n"
23084 - "13: movl 48(%4), %%eax\n"
23085 - "81: movl 52(%4), %%edx\n"
23086 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23087 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23088 " movl %%eax, 48(%3)\n"
23089 " movl %%edx, 52(%3)\n"
23090 - "14: movl 56(%4), %%eax\n"
23091 - "91: movl 60(%4), %%edx\n"
23092 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23093 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23094 " movl %%eax, 56(%3)\n"
23095 " movl %%edx, 60(%3)\n"
23096 " addl $-64, %0\n"
23097 @@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23098 " shrl $2, %0\n"
23099 " andl $3, %%eax\n"
23100 " cld\n"
23101 - "6: rep; movsl\n"
23102 + "6: rep; "__copyuser_seg" movsl\n"
23103 " movl %%eax,%0\n"
23104 - "7: rep; movsb\n"
23105 + "7: rep; "__copyuser_seg" movsb\n"
23106 "8:\n"
23107 ".section .fixup,\"ax\"\n"
23108 "9: lea 0(%%eax,%0,4),%0\n"
23109 @@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23110 */
23111
23112 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23113 + const void __user *from, unsigned long size) __size_overflow(3);
23114 +static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23115 const void __user *from, unsigned long size)
23116 {
23117 int d0, d1;
23118
23119 __asm__ __volatile__(
23120 " .align 2,0x90\n"
23121 - "0: movl 32(%4), %%eax\n"
23122 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23123 " cmpl $67, %0\n"
23124 " jbe 2f\n"
23125 - "1: movl 64(%4), %%eax\n"
23126 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23127 " .align 2,0x90\n"
23128 - "2: movl 0(%4), %%eax\n"
23129 - "21: movl 4(%4), %%edx\n"
23130 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23131 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23132 " movnti %%eax, 0(%3)\n"
23133 " movnti %%edx, 4(%3)\n"
23134 - "3: movl 8(%4), %%eax\n"
23135 - "31: movl 12(%4),%%edx\n"
23136 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23137 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23138 " movnti %%eax, 8(%3)\n"
23139 " movnti %%edx, 12(%3)\n"
23140 - "4: movl 16(%4), %%eax\n"
23141 - "41: movl 20(%4), %%edx\n"
23142 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23143 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23144 " movnti %%eax, 16(%3)\n"
23145 " movnti %%edx, 20(%3)\n"
23146 - "10: movl 24(%4), %%eax\n"
23147 - "51: movl 28(%4), %%edx\n"
23148 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23149 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23150 " movnti %%eax, 24(%3)\n"
23151 " movnti %%edx, 28(%3)\n"
23152 - "11: movl 32(%4), %%eax\n"
23153 - "61: movl 36(%4), %%edx\n"
23154 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23155 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23156 " movnti %%eax, 32(%3)\n"
23157 " movnti %%edx, 36(%3)\n"
23158 - "12: movl 40(%4), %%eax\n"
23159 - "71: movl 44(%4), %%edx\n"
23160 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23161 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23162 " movnti %%eax, 40(%3)\n"
23163 " movnti %%edx, 44(%3)\n"
23164 - "13: movl 48(%4), %%eax\n"
23165 - "81: movl 52(%4), %%edx\n"
23166 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23167 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23168 " movnti %%eax, 48(%3)\n"
23169 " movnti %%edx, 52(%3)\n"
23170 - "14: movl 56(%4), %%eax\n"
23171 - "91: movl 60(%4), %%edx\n"
23172 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23173 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23174 " movnti %%eax, 56(%3)\n"
23175 " movnti %%edx, 60(%3)\n"
23176 " addl $-64, %0\n"
23177 @@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23178 " shrl $2, %0\n"
23179 " andl $3, %%eax\n"
23180 " cld\n"
23181 - "6: rep; movsl\n"
23182 + "6: rep; "__copyuser_seg" movsl\n"
23183 " movl %%eax,%0\n"
23184 - "7: rep; movsb\n"
23185 + "7: rep; "__copyuser_seg" movsb\n"
23186 "8:\n"
23187 ".section .fixup,\"ax\"\n"
23188 "9: lea 0(%%eax,%0,4),%0\n"
23189 @@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23190 }
23191
23192 static unsigned long __copy_user_intel_nocache(void *to,
23193 + const void __user *from, unsigned long size) __size_overflow(3);
23194 +static unsigned long __copy_user_intel_nocache(void *to,
23195 const void __user *from, unsigned long size)
23196 {
23197 int d0, d1;
23198
23199 __asm__ __volatile__(
23200 " .align 2,0x90\n"
23201 - "0: movl 32(%4), %%eax\n"
23202 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23203 " cmpl $67, %0\n"
23204 " jbe 2f\n"
23205 - "1: movl 64(%4), %%eax\n"
23206 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23207 " .align 2,0x90\n"
23208 - "2: movl 0(%4), %%eax\n"
23209 - "21: movl 4(%4), %%edx\n"
23210 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23211 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23212 " movnti %%eax, 0(%3)\n"
23213 " movnti %%edx, 4(%3)\n"
23214 - "3: movl 8(%4), %%eax\n"
23215 - "31: movl 12(%4),%%edx\n"
23216 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23217 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23218 " movnti %%eax, 8(%3)\n"
23219 " movnti %%edx, 12(%3)\n"
23220 - "4: movl 16(%4), %%eax\n"
23221 - "41: movl 20(%4), %%edx\n"
23222 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23223 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23224 " movnti %%eax, 16(%3)\n"
23225 " movnti %%edx, 20(%3)\n"
23226 - "10: movl 24(%4), %%eax\n"
23227 - "51: movl 28(%4), %%edx\n"
23228 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23229 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23230 " movnti %%eax, 24(%3)\n"
23231 " movnti %%edx, 28(%3)\n"
23232 - "11: movl 32(%4), %%eax\n"
23233 - "61: movl 36(%4), %%edx\n"
23234 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23235 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23236 " movnti %%eax, 32(%3)\n"
23237 " movnti %%edx, 36(%3)\n"
23238 - "12: movl 40(%4), %%eax\n"
23239 - "71: movl 44(%4), %%edx\n"
23240 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23241 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23242 " movnti %%eax, 40(%3)\n"
23243 " movnti %%edx, 44(%3)\n"
23244 - "13: movl 48(%4), %%eax\n"
23245 - "81: movl 52(%4), %%edx\n"
23246 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23247 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23248 " movnti %%eax, 48(%3)\n"
23249 " movnti %%edx, 52(%3)\n"
23250 - "14: movl 56(%4), %%eax\n"
23251 - "91: movl 60(%4), %%edx\n"
23252 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23253 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23254 " movnti %%eax, 56(%3)\n"
23255 " movnti %%edx, 60(%3)\n"
23256 " addl $-64, %0\n"
23257 @@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23258 " shrl $2, %0\n"
23259 " andl $3, %%eax\n"
23260 " cld\n"
23261 - "6: rep; movsl\n"
23262 + "6: rep; "__copyuser_seg" movsl\n"
23263 " movl %%eax,%0\n"
23264 - "7: rep; movsb\n"
23265 + "7: rep; "__copyuser_seg" movsb\n"
23266 "8:\n"
23267 ".section .fixup,\"ax\"\n"
23268 "9: lea 0(%%eax,%0,4),%0\n"
23269 @@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23270 */
23271 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23272 unsigned long size);
23273 -unsigned long __copy_user_intel(void __user *to, const void *from,
23274 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23275 + unsigned long size);
23276 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23277 unsigned long size);
23278 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23279 const void __user *from, unsigned long size);
23280 #endif /* CONFIG_X86_INTEL_USERCOPY */
23281
23282 /* Generic arbitrary sized copy. */
23283 -#define __copy_user(to, from, size) \
23284 +#define __copy_user(to, from, size, prefix, set, restore) \
23285 do { \
23286 int __d0, __d1, __d2; \
23287 __asm__ __volatile__( \
23288 + set \
23289 " cmp $7,%0\n" \
23290 " jbe 1f\n" \
23291 " movl %1,%0\n" \
23292 " negl %0\n" \
23293 " andl $7,%0\n" \
23294 " subl %0,%3\n" \
23295 - "4: rep; movsb\n" \
23296 + "4: rep; "prefix"movsb\n" \
23297 " movl %3,%0\n" \
23298 " shrl $2,%0\n" \
23299 " andl $3,%3\n" \
23300 " .align 2,0x90\n" \
23301 - "0: rep; movsl\n" \
23302 + "0: rep; "prefix"movsl\n" \
23303 " movl %3,%0\n" \
23304 - "1: rep; movsb\n" \
23305 + "1: rep; "prefix"movsb\n" \
23306 "2:\n" \
23307 + restore \
23308 ".section .fixup,\"ax\"\n" \
23309 "5: addl %3,%0\n" \
23310 " jmp 2b\n" \
23311 @@ -595,14 +718,14 @@ do { \
23312 " negl %0\n" \
23313 " andl $7,%0\n" \
23314 " subl %0,%3\n" \
23315 - "4: rep; movsb\n" \
23316 + "4: rep; "__copyuser_seg"movsb\n" \
23317 " movl %3,%0\n" \
23318 " shrl $2,%0\n" \
23319 " andl $3,%3\n" \
23320 " .align 2,0x90\n" \
23321 - "0: rep; movsl\n" \
23322 + "0: rep; "__copyuser_seg"movsl\n" \
23323 " movl %3,%0\n" \
23324 - "1: rep; movsb\n" \
23325 + "1: rep; "__copyuser_seg"movsb\n" \
23326 "2:\n" \
23327 ".section .fixup,\"ax\"\n" \
23328 "5: addl %3,%0\n" \
23329 @@ -688,9 +811,9 @@ survive:
23330 }
23331 #endif
23332 if (movsl_is_ok(to, from, n))
23333 - __copy_user(to, from, n);
23334 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23335 else
23336 - n = __copy_user_intel(to, from, n);
23337 + n = __generic_copy_to_user_intel(to, from, n);
23338 return n;
23339 }
23340 EXPORT_SYMBOL(__copy_to_user_ll);
23341 @@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23342 unsigned long n)
23343 {
23344 if (movsl_is_ok(to, from, n))
23345 - __copy_user(to, from, n);
23346 + __copy_user(to, from, n, __copyuser_seg, "", "");
23347 else
23348 - n = __copy_user_intel((void __user *)to,
23349 - (const void *)from, n);
23350 + n = __generic_copy_from_user_intel(to, from, n);
23351 return n;
23352 }
23353 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23354 @@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23355 if (n > 64 && cpu_has_xmm2)
23356 n = __copy_user_intel_nocache(to, from, n);
23357 else
23358 - __copy_user(to, from, n);
23359 + __copy_user(to, from, n, __copyuser_seg, "", "");
23360 #else
23361 - __copy_user(to, from, n);
23362 + __copy_user(to, from, n, __copyuser_seg, "", "");
23363 #endif
23364 return n;
23365 }
23366 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23367
23368 -/**
23369 - * copy_to_user: - Copy a block of data into user space.
23370 - * @to: Destination address, in user space.
23371 - * @from: Source address, in kernel space.
23372 - * @n: Number of bytes to copy.
23373 - *
23374 - * Context: User context only. This function may sleep.
23375 - *
23376 - * Copy data from kernel space to user space.
23377 - *
23378 - * Returns number of bytes that could not be copied.
23379 - * On success, this will be zero.
23380 - */
23381 -unsigned long
23382 -copy_to_user(void __user *to, const void *from, unsigned long n)
23383 -{
23384 - if (access_ok(VERIFY_WRITE, to, n))
23385 - n = __copy_to_user(to, from, n);
23386 - return n;
23387 -}
23388 -EXPORT_SYMBOL(copy_to_user);
23389 -
23390 -/**
23391 - * copy_from_user: - Copy a block of data from user space.
23392 - * @to: Destination address, in kernel space.
23393 - * @from: Source address, in user space.
23394 - * @n: Number of bytes to copy.
23395 - *
23396 - * Context: User context only. This function may sleep.
23397 - *
23398 - * Copy data from user space to kernel space.
23399 - *
23400 - * Returns number of bytes that could not be copied.
23401 - * On success, this will be zero.
23402 - *
23403 - * If some data could not be copied, this function will pad the copied
23404 - * data to the requested size using zero bytes.
23405 - */
23406 -unsigned long
23407 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23408 -{
23409 - if (access_ok(VERIFY_READ, from, n))
23410 - n = __copy_from_user(to, from, n);
23411 - else
23412 - memset(to, 0, n);
23413 - return n;
23414 -}
23415 -EXPORT_SYMBOL(_copy_from_user);
23416 -
23417 void copy_from_user_overflow(void)
23418 {
23419 WARN(1, "Buffer overflow detected!\n");
23420 }
23421 EXPORT_SYMBOL(copy_from_user_overflow);
23422 +
23423 +void copy_to_user_overflow(void)
23424 +{
23425 + WARN(1, "Buffer overflow detected!\n");
23426 +}
23427 +EXPORT_SYMBOL(copy_to_user_overflow);
23428 +
23429 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23430 +void __set_fs(mm_segment_t x)
23431 +{
23432 + switch (x.seg) {
23433 + case 0:
23434 + loadsegment(gs, 0);
23435 + break;
23436 + case TASK_SIZE_MAX:
23437 + loadsegment(gs, __USER_DS);
23438 + break;
23439 + case -1UL:
23440 + loadsegment(gs, __KERNEL_DS);
23441 + break;
23442 + default:
23443 + BUG();
23444 + }
23445 + return;
23446 +}
23447 +EXPORT_SYMBOL(__set_fs);
23448 +
23449 +void set_fs(mm_segment_t x)
23450 +{
23451 + current_thread_info()->addr_limit = x;
23452 + __set_fs(x);
23453 +}
23454 +EXPORT_SYMBOL(set_fs);
23455 +#endif
23456 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23457 index 0d0326f..5c5f91e 100644
23458 --- a/arch/x86/lib/usercopy_64.c
23459 +++ b/arch/x86/lib/usercopy_64.c
23460 @@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23461 {
23462 long __d0;
23463 might_fault();
23464 +
23465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23466 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23467 + addr += PAX_USER_SHADOW_BASE;
23468 +#endif
23469 +
23470 /* no memory constraint because it doesn't change any memory gcc knows
23471 about */
23472 asm volatile(
23473 @@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
23474 }
23475 EXPORT_SYMBOL(strlen_user);
23476
23477 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23478 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23479 {
23480 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23481 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23482 - }
23483 - return len;
23484 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23485 +
23486 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23487 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23488 + to += PAX_USER_SHADOW_BASE;
23489 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23490 + from += PAX_USER_SHADOW_BASE;
23491 +#endif
23492 +
23493 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23494 + }
23495 + return len;
23496 }
23497 EXPORT_SYMBOL(copy_in_user);
23498
23499 @@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
23500 * it is not necessary to optimize tail handling.
23501 */
23502 unsigned long
23503 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23504 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23505 {
23506 char c;
23507 unsigned zero_len;
23508 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23509 index 1fb85db..8b3540b 100644
23510 --- a/arch/x86/mm/extable.c
23511 +++ b/arch/x86/mm/extable.c
23512 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23513 const struct exception_table_entry *fixup;
23514
23515 #ifdef CONFIG_PNPBIOS
23516 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23517 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23518 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23519 extern u32 pnp_bios_is_utter_crap;
23520 pnp_bios_is_utter_crap = 1;
23521 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23522 index 3ecfd1a..304d554 100644
23523 --- a/arch/x86/mm/fault.c
23524 +++ b/arch/x86/mm/fault.c
23525 @@ -13,11 +13,18 @@
23526 #include <linux/perf_event.h> /* perf_sw_event */
23527 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23528 #include <linux/prefetch.h> /* prefetchw */
23529 +#include <linux/unistd.h>
23530 +#include <linux/compiler.h>
23531
23532 #include <asm/traps.h> /* dotraplinkage, ... */
23533 #include <asm/pgalloc.h> /* pgd_*(), ... */
23534 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23535 #include <asm/fixmap.h> /* VSYSCALL_START */
23536 +#include <asm/tlbflush.h>
23537 +
23538 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23539 +#include <asm/stacktrace.h>
23540 +#endif
23541
23542 /*
23543 * Page fault error code bits:
23544 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23545 int ret = 0;
23546
23547 /* kprobe_running() needs smp_processor_id() */
23548 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23549 + if (kprobes_built_in() && !user_mode(regs)) {
23550 preempt_disable();
23551 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23552 ret = 1;
23553 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23554 return !instr_lo || (instr_lo>>1) == 1;
23555 case 0x00:
23556 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23557 - if (probe_kernel_address(instr, opcode))
23558 + if (user_mode(regs)) {
23559 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23560 + return 0;
23561 + } else if (probe_kernel_address(instr, opcode))
23562 return 0;
23563
23564 *prefetch = (instr_lo == 0xF) &&
23565 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23566 while (instr < max_instr) {
23567 unsigned char opcode;
23568
23569 - if (probe_kernel_address(instr, opcode))
23570 + if (user_mode(regs)) {
23571 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23572 + break;
23573 + } else if (probe_kernel_address(instr, opcode))
23574 break;
23575
23576 instr++;
23577 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23578 force_sig_info(si_signo, &info, tsk);
23579 }
23580
23581 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23582 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23583 +#endif
23584 +
23585 +#ifdef CONFIG_PAX_EMUTRAMP
23586 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23587 +#endif
23588 +
23589 +#ifdef CONFIG_PAX_PAGEEXEC
23590 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23591 +{
23592 + pgd_t *pgd;
23593 + pud_t *pud;
23594 + pmd_t *pmd;
23595 +
23596 + pgd = pgd_offset(mm, address);
23597 + if (!pgd_present(*pgd))
23598 + return NULL;
23599 + pud = pud_offset(pgd, address);
23600 + if (!pud_present(*pud))
23601 + return NULL;
23602 + pmd = pmd_offset(pud, address);
23603 + if (!pmd_present(*pmd))
23604 + return NULL;
23605 + return pmd;
23606 +}
23607 +#endif
23608 +
23609 DEFINE_SPINLOCK(pgd_lock);
23610 LIST_HEAD(pgd_list);
23611
23612 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23613 for (address = VMALLOC_START & PMD_MASK;
23614 address >= TASK_SIZE && address < FIXADDR_TOP;
23615 address += PMD_SIZE) {
23616 +
23617 +#ifdef CONFIG_PAX_PER_CPU_PGD
23618 + unsigned long cpu;
23619 +#else
23620 struct page *page;
23621 +#endif
23622
23623 spin_lock(&pgd_lock);
23624 +
23625 +#ifdef CONFIG_PAX_PER_CPU_PGD
23626 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23627 + pgd_t *pgd = get_cpu_pgd(cpu);
23628 + pmd_t *ret;
23629 +#else
23630 list_for_each_entry(page, &pgd_list, lru) {
23631 + pgd_t *pgd = page_address(page);
23632 spinlock_t *pgt_lock;
23633 pmd_t *ret;
23634
23635 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23636 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23637
23638 spin_lock(pgt_lock);
23639 - ret = vmalloc_sync_one(page_address(page), address);
23640 +#endif
23641 +
23642 + ret = vmalloc_sync_one(pgd, address);
23643 +
23644 +#ifndef CONFIG_PAX_PER_CPU_PGD
23645 spin_unlock(pgt_lock);
23646 +#endif
23647
23648 if (!ret)
23649 break;
23650 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23651 * an interrupt in the middle of a task switch..
23652 */
23653 pgd_paddr = read_cr3();
23654 +
23655 +#ifdef CONFIG_PAX_PER_CPU_PGD
23656 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23657 +#endif
23658 +
23659 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23660 if (!pmd_k)
23661 return -1;
23662 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23663 * happen within a race in page table update. In the later
23664 * case just flush:
23665 */
23666 +
23667 +#ifdef CONFIG_PAX_PER_CPU_PGD
23668 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23669 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23670 +#else
23671 pgd = pgd_offset(current->active_mm, address);
23672 +#endif
23673 +
23674 pgd_ref = pgd_offset_k(address);
23675 if (pgd_none(*pgd_ref))
23676 return -1;
23677 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23678 static int is_errata100(struct pt_regs *regs, unsigned long address)
23679 {
23680 #ifdef CONFIG_X86_64
23681 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23682 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23683 return 1;
23684 #endif
23685 return 0;
23686 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23687 }
23688
23689 static const char nx_warning[] = KERN_CRIT
23690 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23691 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23692
23693 static void
23694 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23695 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23696 if (!oops_may_print())
23697 return;
23698
23699 - if (error_code & PF_INSTR) {
23700 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23701 unsigned int level;
23702
23703 pte_t *pte = lookup_address(address, &level);
23704
23705 if (pte && pte_present(*pte) && !pte_exec(*pte))
23706 - printk(nx_warning, current_uid());
23707 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23708 }
23709
23710 +#ifdef CONFIG_PAX_KERNEXEC
23711 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23712 + if (current->signal->curr_ip)
23713 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23714 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23715 + else
23716 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23717 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23718 + }
23719 +#endif
23720 +
23721 printk(KERN_ALERT "BUG: unable to handle kernel ");
23722 if (address < PAGE_SIZE)
23723 printk(KERN_CONT "NULL pointer dereference");
23724 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23725 }
23726 #endif
23727
23728 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23729 + if (pax_is_fetch_fault(regs, error_code, address)) {
23730 +
23731 +#ifdef CONFIG_PAX_EMUTRAMP
23732 + switch (pax_handle_fetch_fault(regs)) {
23733 + case 2:
23734 + return;
23735 + }
23736 +#endif
23737 +
23738 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23739 + do_group_exit(SIGKILL);
23740 + }
23741 +#endif
23742 +
23743 if (unlikely(show_unhandled_signals))
23744 show_signal_msg(regs, error_code, address, tsk);
23745
23746 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23747 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23748 printk(KERN_ERR
23749 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23750 - tsk->comm, tsk->pid, address);
23751 + tsk->comm, task_pid_nr(tsk), address);
23752 code = BUS_MCEERR_AR;
23753 }
23754 #endif
23755 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23756 return 1;
23757 }
23758
23759 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23760 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23761 +{
23762 + pte_t *pte;
23763 + pmd_t *pmd;
23764 + spinlock_t *ptl;
23765 + unsigned char pte_mask;
23766 +
23767 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23768 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23769 + return 0;
23770 +
23771 + /* PaX: it's our fault, let's handle it if we can */
23772 +
23773 + /* PaX: take a look at read faults before acquiring any locks */
23774 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23775 + /* instruction fetch attempt from a protected page in user mode */
23776 + up_read(&mm->mmap_sem);
23777 +
23778 +#ifdef CONFIG_PAX_EMUTRAMP
23779 + switch (pax_handle_fetch_fault(regs)) {
23780 + case 2:
23781 + return 1;
23782 + }
23783 +#endif
23784 +
23785 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23786 + do_group_exit(SIGKILL);
23787 + }
23788 +
23789 + pmd = pax_get_pmd(mm, address);
23790 + if (unlikely(!pmd))
23791 + return 0;
23792 +
23793 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23794 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23795 + pte_unmap_unlock(pte, ptl);
23796 + return 0;
23797 + }
23798 +
23799 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23800 + /* write attempt to a protected page in user mode */
23801 + pte_unmap_unlock(pte, ptl);
23802 + return 0;
23803 + }
23804 +
23805 +#ifdef CONFIG_SMP
23806 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23807 +#else
23808 + if (likely(address > get_limit(regs->cs)))
23809 +#endif
23810 + {
23811 + set_pte(pte, pte_mkread(*pte));
23812 + __flush_tlb_one(address);
23813 + pte_unmap_unlock(pte, ptl);
23814 + up_read(&mm->mmap_sem);
23815 + return 1;
23816 + }
23817 +
23818 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23819 +
23820 + /*
23821 + * PaX: fill DTLB with user rights and retry
23822 + */
23823 + __asm__ __volatile__ (
23824 + "orb %2,(%1)\n"
23825 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23826 +/*
23827 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23828 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23829 + * page fault when examined during a TLB load attempt. this is true not only
23830 + * for PTEs holding a non-present entry but also present entries that will
23831 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23832 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23833 + * for our target pages since their PTEs are simply not in the TLBs at all.
23834 +
23835 + * the best thing in omitting it is that we gain around 15-20% speed in the
23836 + * fast path of the page fault handler and can get rid of tracing since we
23837 + * can no longer flush unintended entries.
23838 + */
23839 + "invlpg (%0)\n"
23840 +#endif
23841 + __copyuser_seg"testb $0,(%0)\n"
23842 + "xorb %3,(%1)\n"
23843 + :
23844 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23845 + : "memory", "cc");
23846 + pte_unmap_unlock(pte, ptl);
23847 + up_read(&mm->mmap_sem);
23848 + return 1;
23849 +}
23850 +#endif
23851 +
23852 /*
23853 * Handle a spurious fault caused by a stale TLB entry.
23854 *
23855 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23856 static inline int
23857 access_error(unsigned long error_code, struct vm_area_struct *vma)
23858 {
23859 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23860 + return 1;
23861 +
23862 if (error_code & PF_WRITE) {
23863 /* write, present and write, not present: */
23864 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23865 @@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23866 {
23867 struct vm_area_struct *vma;
23868 struct task_struct *tsk;
23869 - unsigned long address;
23870 struct mm_struct *mm;
23871 int fault;
23872 int write = error_code & PF_WRITE;
23873 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23874 (write ? FAULT_FLAG_WRITE : 0);
23875
23876 - tsk = current;
23877 - mm = tsk->mm;
23878 -
23879 /* Get the faulting address: */
23880 - address = read_cr2();
23881 + unsigned long address = read_cr2();
23882 +
23883 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23884 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23885 + if (!search_exception_tables(regs->ip)) {
23886 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23887 + bad_area_nosemaphore(regs, error_code, address);
23888 + return;
23889 + }
23890 + if (address < PAX_USER_SHADOW_BASE) {
23891 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23892 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23893 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23894 + } else
23895 + address -= PAX_USER_SHADOW_BASE;
23896 + }
23897 +#endif
23898 +
23899 + tsk = current;
23900 + mm = tsk->mm;
23901
23902 /*
23903 * Detect and handle instructions that would cause a page fault for
23904 @@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23905 * User-mode registers count as a user access even for any
23906 * potential system fault or CPU buglet:
23907 */
23908 - if (user_mode_vm(regs)) {
23909 + if (user_mode(regs)) {
23910 local_irq_enable();
23911 error_code |= PF_USER;
23912 } else {
23913 @@ -1132,6 +1339,11 @@ retry:
23914 might_sleep();
23915 }
23916
23917 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23918 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23919 + return;
23920 +#endif
23921 +
23922 vma = find_vma(mm, address);
23923 if (unlikely(!vma)) {
23924 bad_area(regs, error_code, address);
23925 @@ -1143,18 +1355,24 @@ retry:
23926 bad_area(regs, error_code, address);
23927 return;
23928 }
23929 - if (error_code & PF_USER) {
23930 - /*
23931 - * Accessing the stack below %sp is always a bug.
23932 - * The large cushion allows instructions like enter
23933 - * and pusha to work. ("enter $65535, $31" pushes
23934 - * 32 pointers and then decrements %sp by 65535.)
23935 - */
23936 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23937 - bad_area(regs, error_code, address);
23938 - return;
23939 - }
23940 + /*
23941 + * Accessing the stack below %sp is always a bug.
23942 + * The large cushion allows instructions like enter
23943 + * and pusha to work. ("enter $65535, $31" pushes
23944 + * 32 pointers and then decrements %sp by 65535.)
23945 + */
23946 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23947 + bad_area(regs, error_code, address);
23948 + return;
23949 }
23950 +
23951 +#ifdef CONFIG_PAX_SEGMEXEC
23952 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23953 + bad_area(regs, error_code, address);
23954 + return;
23955 + }
23956 +#endif
23957 +
23958 if (unlikely(expand_stack(vma, address))) {
23959 bad_area(regs, error_code, address);
23960 return;
23961 @@ -1209,3 +1427,292 @@ good_area:
23962
23963 up_read(&mm->mmap_sem);
23964 }
23965 +
23966 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23967 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23968 +{
23969 + struct mm_struct *mm = current->mm;
23970 + unsigned long ip = regs->ip;
23971 +
23972 + if (v8086_mode(regs))
23973 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23974 +
23975 +#ifdef CONFIG_PAX_PAGEEXEC
23976 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23977 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23978 + return true;
23979 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23980 + return true;
23981 + return false;
23982 + }
23983 +#endif
23984 +
23985 +#ifdef CONFIG_PAX_SEGMEXEC
23986 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23987 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23988 + return true;
23989 + return false;
23990 + }
23991 +#endif
23992 +
23993 + return false;
23994 +}
23995 +#endif
23996 +
23997 +#ifdef CONFIG_PAX_EMUTRAMP
23998 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23999 +{
24000 + int err;
24001 +
24002 + do { /* PaX: libffi trampoline emulation */
24003 + unsigned char mov, jmp;
24004 + unsigned int addr1, addr2;
24005 +
24006 +#ifdef CONFIG_X86_64
24007 + if ((regs->ip + 9) >> 32)
24008 + break;
24009 +#endif
24010 +
24011 + err = get_user(mov, (unsigned char __user *)regs->ip);
24012 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24013 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24014 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24015 +
24016 + if (err)
24017 + break;
24018 +
24019 + if (mov == 0xB8 && jmp == 0xE9) {
24020 + regs->ax = addr1;
24021 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24022 + return 2;
24023 + }
24024 + } while (0);
24025 +
24026 + do { /* PaX: gcc trampoline emulation #1 */
24027 + unsigned char mov1, mov2;
24028 + unsigned short jmp;
24029 + unsigned int addr1, addr2;
24030 +
24031 +#ifdef CONFIG_X86_64
24032 + if ((regs->ip + 11) >> 32)
24033 + break;
24034 +#endif
24035 +
24036 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24037 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24038 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24039 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24040 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24041 +
24042 + if (err)
24043 + break;
24044 +
24045 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24046 + regs->cx = addr1;
24047 + regs->ax = addr2;
24048 + regs->ip = addr2;
24049 + return 2;
24050 + }
24051 + } while (0);
24052 +
24053 + do { /* PaX: gcc trampoline emulation #2 */
24054 + unsigned char mov, jmp;
24055 + unsigned int addr1, addr2;
24056 +
24057 +#ifdef CONFIG_X86_64
24058 + if ((regs->ip + 9) >> 32)
24059 + break;
24060 +#endif
24061 +
24062 + err = get_user(mov, (unsigned char __user *)regs->ip);
24063 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24064 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24065 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24066 +
24067 + if (err)
24068 + break;
24069 +
24070 + if (mov == 0xB9 && jmp == 0xE9) {
24071 + regs->cx = addr1;
24072 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24073 + return 2;
24074 + }
24075 + } while (0);
24076 +
24077 + return 1; /* PaX in action */
24078 +}
24079 +
24080 +#ifdef CONFIG_X86_64
24081 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24082 +{
24083 + int err;
24084 +
24085 + do { /* PaX: libffi trampoline emulation */
24086 + unsigned short mov1, mov2, jmp1;
24087 + unsigned char stcclc, jmp2;
24088 + unsigned long addr1, addr2;
24089 +
24090 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24091 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24092 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24093 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24094 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24095 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24096 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24097 +
24098 + if (err)
24099 + break;
24100 +
24101 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24102 + regs->r11 = addr1;
24103 + regs->r10 = addr2;
24104 + if (stcclc == 0xF8)
24105 + regs->flags &= ~X86_EFLAGS_CF;
24106 + else
24107 + regs->flags |= X86_EFLAGS_CF;
24108 + regs->ip = addr1;
24109 + return 2;
24110 + }
24111 + } while (0);
24112 +
24113 + do { /* PaX: gcc trampoline emulation #1 */
24114 + unsigned short mov1, mov2, jmp1;
24115 + unsigned char jmp2;
24116 + unsigned int addr1;
24117 + unsigned long addr2;
24118 +
24119 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24120 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24121 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24122 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24123 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24124 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24125 +
24126 + if (err)
24127 + break;
24128 +
24129 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24130 + regs->r11 = addr1;
24131 + regs->r10 = addr2;
24132 + regs->ip = addr1;
24133 + return 2;
24134 + }
24135 + } while (0);
24136 +
24137 + do { /* PaX: gcc trampoline emulation #2 */
24138 + unsigned short mov1, mov2, jmp1;
24139 + unsigned char jmp2;
24140 + unsigned long addr1, addr2;
24141 +
24142 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24143 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24144 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24145 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24146 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24147 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24148 +
24149 + if (err)
24150 + break;
24151 +
24152 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24153 + regs->r11 = addr1;
24154 + regs->r10 = addr2;
24155 + regs->ip = addr1;
24156 + return 2;
24157 + }
24158 + } while (0);
24159 +
24160 + return 1; /* PaX in action */
24161 +}
24162 +#endif
24163 +
24164 +/*
24165 + * PaX: decide what to do with offenders (regs->ip = fault address)
24166 + *
24167 + * returns 1 when task should be killed
24168 + * 2 when gcc trampoline was detected
24169 + */
24170 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24171 +{
24172 + if (v8086_mode(regs))
24173 + return 1;
24174 +
24175 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24176 + return 1;
24177 +
24178 +#ifdef CONFIG_X86_32
24179 + return pax_handle_fetch_fault_32(regs);
24180 +#else
24181 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24182 + return pax_handle_fetch_fault_32(regs);
24183 + else
24184 + return pax_handle_fetch_fault_64(regs);
24185 +#endif
24186 +}
24187 +#endif
24188 +
24189 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24190 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24191 +{
24192 + long i;
24193 +
24194 + printk(KERN_ERR "PAX: bytes at PC: ");
24195 + for (i = 0; i < 20; i++) {
24196 + unsigned char c;
24197 + if (get_user(c, (unsigned char __force_user *)pc+i))
24198 + printk(KERN_CONT "?? ");
24199 + else
24200 + printk(KERN_CONT "%02x ", c);
24201 + }
24202 + printk("\n");
24203 +
24204 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24205 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24206 + unsigned long c;
24207 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24208 +#ifdef CONFIG_X86_32
24209 + printk(KERN_CONT "???????? ");
24210 +#else
24211 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24212 + printk(KERN_CONT "???????? ???????? ");
24213 + else
24214 + printk(KERN_CONT "???????????????? ");
24215 +#endif
24216 + } else {
24217 +#ifdef CONFIG_X86_64
24218 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24219 + printk(KERN_CONT "%08x ", (unsigned int)c);
24220 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24221 + } else
24222 +#endif
24223 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24224 + }
24225 + }
24226 + printk("\n");
24227 +}
24228 +#endif
24229 +
24230 +/**
24231 + * probe_kernel_write(): safely attempt to write to a location
24232 + * @dst: address to write to
24233 + * @src: pointer to the data that shall be written
24234 + * @size: size of the data chunk
24235 + *
24236 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24237 + * happens, handle that and return -EFAULT.
24238 + */
24239 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24240 +{
24241 + long ret;
24242 + mm_segment_t old_fs = get_fs();
24243 +
24244 + set_fs(KERNEL_DS);
24245 + pagefault_disable();
24246 + pax_open_kernel();
24247 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24248 + pax_close_kernel();
24249 + pagefault_enable();
24250 + set_fs(old_fs);
24251 +
24252 + return ret ? -EFAULT : 0;
24253 +}
24254 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24255 index dd74e46..7d26398 100644
24256 --- a/arch/x86/mm/gup.c
24257 +++ b/arch/x86/mm/gup.c
24258 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24259 addr = start;
24260 len = (unsigned long) nr_pages << PAGE_SHIFT;
24261 end = start + len;
24262 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24263 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24264 (void __user *)start, len)))
24265 return 0;
24266
24267 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24268 index 6f31ee5..8ee4164 100644
24269 --- a/arch/x86/mm/highmem_32.c
24270 +++ b/arch/x86/mm/highmem_32.c
24271 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24272 idx = type + KM_TYPE_NR*smp_processor_id();
24273 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24274 BUG_ON(!pte_none(*(kmap_pte-idx)));
24275 +
24276 + pax_open_kernel();
24277 set_pte(kmap_pte-idx, mk_pte(page, prot));
24278 + pax_close_kernel();
24279 +
24280 arch_flush_lazy_mmu_mode();
24281
24282 return (void *)vaddr;
24283 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24284 index f6679a7..8f795a3 100644
24285 --- a/arch/x86/mm/hugetlbpage.c
24286 +++ b/arch/x86/mm/hugetlbpage.c
24287 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24288 struct hstate *h = hstate_file(file);
24289 struct mm_struct *mm = current->mm;
24290 struct vm_area_struct *vma;
24291 - unsigned long start_addr;
24292 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24293 +
24294 +#ifdef CONFIG_PAX_SEGMEXEC
24295 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24296 + pax_task_size = SEGMEXEC_TASK_SIZE;
24297 +#endif
24298 +
24299 + pax_task_size -= PAGE_SIZE;
24300
24301 if (len > mm->cached_hole_size) {
24302 - start_addr = mm->free_area_cache;
24303 + start_addr = mm->free_area_cache;
24304 } else {
24305 - start_addr = TASK_UNMAPPED_BASE;
24306 - mm->cached_hole_size = 0;
24307 + start_addr = mm->mmap_base;
24308 + mm->cached_hole_size = 0;
24309 }
24310
24311 full_search:
24312 @@ -280,26 +287,27 @@ full_search:
24313
24314 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24315 /* At this point: (!vma || addr < vma->vm_end). */
24316 - if (TASK_SIZE - len < addr) {
24317 + if (pax_task_size - len < addr) {
24318 /*
24319 * Start a new search - just in case we missed
24320 * some holes.
24321 */
24322 - if (start_addr != TASK_UNMAPPED_BASE) {
24323 - start_addr = TASK_UNMAPPED_BASE;
24324 + if (start_addr != mm->mmap_base) {
24325 + start_addr = mm->mmap_base;
24326 mm->cached_hole_size = 0;
24327 goto full_search;
24328 }
24329 return -ENOMEM;
24330 }
24331 - if (!vma || addr + len <= vma->vm_start) {
24332 - mm->free_area_cache = addr + len;
24333 - return addr;
24334 - }
24335 + if (check_heap_stack_gap(vma, addr, len))
24336 + break;
24337 if (addr + mm->cached_hole_size < vma->vm_start)
24338 mm->cached_hole_size = vma->vm_start - addr;
24339 addr = ALIGN(vma->vm_end, huge_page_size(h));
24340 }
24341 +
24342 + mm->free_area_cache = addr + len;
24343 + return addr;
24344 }
24345
24346 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24347 @@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24348 struct mm_struct *mm = current->mm;
24349 struct vm_area_struct *vma;
24350 unsigned long base = mm->mmap_base;
24351 - unsigned long addr = addr0;
24352 + unsigned long addr;
24353 unsigned long largest_hole = mm->cached_hole_size;
24354 - unsigned long start_addr;
24355
24356 /* don't allow allocations above current base */
24357 if (mm->free_area_cache > base)
24358 @@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24359 largest_hole = 0;
24360 mm->free_area_cache = base;
24361 }
24362 -try_again:
24363 - start_addr = mm->free_area_cache;
24364
24365 /* make sure it can fit in the remaining address space */
24366 if (mm->free_area_cache < len)
24367 goto fail;
24368
24369 /* either no address requested or can't fit in requested address hole */
24370 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24371 + addr = mm->free_area_cache - len;
24372 do {
24373 + addr &= huge_page_mask(h);
24374 /*
24375 * Lookup failure means no vma is above this address,
24376 * i.e. return with success:
24377 @@ -340,10 +346,10 @@ try_again:
24378 if (!vma)
24379 return addr;
24380
24381 - if (addr + len <= vma->vm_start) {
24382 + if (check_heap_stack_gap(vma, addr, len)) {
24383 /* remember the address as a hint for next time */
24384 - mm->cached_hole_size = largest_hole;
24385 - return (mm->free_area_cache = addr);
24386 + mm->cached_hole_size = largest_hole;
24387 + return (mm->free_area_cache = addr);
24388 } else if (mm->free_area_cache == vma->vm_end) {
24389 /* pull free_area_cache down to the first hole */
24390 mm->free_area_cache = vma->vm_start;
24391 @@ -352,29 +358,34 @@ try_again:
24392
24393 /* remember the largest hole we saw so far */
24394 if (addr + largest_hole < vma->vm_start)
24395 - largest_hole = vma->vm_start - addr;
24396 + largest_hole = vma->vm_start - addr;
24397
24398 /* try just below the current vma->vm_start */
24399 - addr = (vma->vm_start - len) & huge_page_mask(h);
24400 - } while (len <= vma->vm_start);
24401 + addr = skip_heap_stack_gap(vma, len);
24402 + } while (!IS_ERR_VALUE(addr));
24403
24404 fail:
24405 /*
24406 - * if hint left us with no space for the requested
24407 - * mapping then try again:
24408 - */
24409 - if (start_addr != base) {
24410 - mm->free_area_cache = base;
24411 - largest_hole = 0;
24412 - goto try_again;
24413 - }
24414 - /*
24415 * A failed mmap() very likely causes application failure,
24416 * so fall back to the bottom-up function here. This scenario
24417 * can happen with large stack limits and large mmap()
24418 * allocations.
24419 */
24420 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24421 +
24422 +#ifdef CONFIG_PAX_SEGMEXEC
24423 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24424 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24425 + else
24426 +#endif
24427 +
24428 + mm->mmap_base = TASK_UNMAPPED_BASE;
24429 +
24430 +#ifdef CONFIG_PAX_RANDMMAP
24431 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24432 + mm->mmap_base += mm->delta_mmap;
24433 +#endif
24434 +
24435 + mm->free_area_cache = mm->mmap_base;
24436 mm->cached_hole_size = ~0UL;
24437 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24438 len, pgoff, flags);
24439 @@ -382,6 +393,7 @@ fail:
24440 /*
24441 * Restore the topdown base:
24442 */
24443 + mm->mmap_base = base;
24444 mm->free_area_cache = base;
24445 mm->cached_hole_size = ~0UL;
24446
24447 @@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24448 struct hstate *h = hstate_file(file);
24449 struct mm_struct *mm = current->mm;
24450 struct vm_area_struct *vma;
24451 + unsigned long pax_task_size = TASK_SIZE;
24452
24453 if (len & ~huge_page_mask(h))
24454 return -EINVAL;
24455 - if (len > TASK_SIZE)
24456 +
24457 +#ifdef CONFIG_PAX_SEGMEXEC
24458 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24459 + pax_task_size = SEGMEXEC_TASK_SIZE;
24460 +#endif
24461 +
24462 + pax_task_size -= PAGE_SIZE;
24463 +
24464 + if (len > pax_task_size)
24465 return -ENOMEM;
24466
24467 if (flags & MAP_FIXED) {
24468 @@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24469 if (addr) {
24470 addr = ALIGN(addr, huge_page_size(h));
24471 vma = find_vma(mm, addr);
24472 - if (TASK_SIZE - len >= addr &&
24473 - (!vma || addr + len <= vma->vm_start))
24474 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24475 return addr;
24476 }
24477 if (mm->get_unmapped_area == arch_get_unmapped_area)
24478 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24479 index 4f0cec7..00976ce 100644
24480 --- a/arch/x86/mm/init.c
24481 +++ b/arch/x86/mm/init.c
24482 @@ -16,6 +16,8 @@
24483 #include <asm/tlb.h>
24484 #include <asm/proto.h>
24485 #include <asm/dma.h> /* for MAX_DMA_PFN */
24486 +#include <asm/desc.h>
24487 +#include <asm/bios_ebda.h>
24488
24489 unsigned long __initdata pgt_buf_start;
24490 unsigned long __meminitdata pgt_buf_end;
24491 @@ -32,7 +34,7 @@ int direct_gbpages
24492 static void __init find_early_table_space(unsigned long end, int use_pse,
24493 int use_gbpages)
24494 {
24495 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24496 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24497 phys_addr_t base;
24498
24499 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24500 @@ -311,10 +313,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24501 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24502 * mmio resources as well as potential bios/acpi data regions.
24503 */
24504 +
24505 +#ifdef CONFIG_GRKERNSEC_KMEM
24506 +static unsigned int ebda_start __read_only;
24507 +static unsigned int ebda_end __read_only;
24508 +#endif
24509 +
24510 int devmem_is_allowed(unsigned long pagenr)
24511 {
24512 +#ifdef CONFIG_GRKERNSEC_KMEM
24513 + /* allow BDA */
24514 + if (!pagenr)
24515 + return 1;
24516 + /* allow EBDA */
24517 + if (pagenr >= ebda_start && pagenr < ebda_end)
24518 + return 1;
24519 +#else
24520 + if (!pagenr)
24521 + return 1;
24522 +#ifdef CONFIG_VM86
24523 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24524 + return 1;
24525 +#endif
24526 +#endif
24527 +
24528 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24529 + return 1;
24530 +#ifdef CONFIG_GRKERNSEC_KMEM
24531 + /* throw out everything else below 1MB */
24532 if (pagenr <= 256)
24533 - return 1;
24534 + return 0;
24535 +#endif
24536 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24537 return 0;
24538 if (!page_is_ram(pagenr))
24539 @@ -371,8 +400,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24540 #endif
24541 }
24542
24543 +#ifdef CONFIG_GRKERNSEC_KMEM
24544 +static inline void gr_init_ebda(void)
24545 +{
24546 + unsigned int ebda_addr;
24547 + unsigned int ebda_size = 0;
24548 +
24549 + ebda_addr = get_bios_ebda();
24550 + if (ebda_addr) {
24551 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24552 + ebda_size <<= 10;
24553 + }
24554 + if (ebda_addr && ebda_size) {
24555 + ebda_start = ebda_addr >> PAGE_SHIFT;
24556 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24557 + } else {
24558 + ebda_start = 0x9f000 >> PAGE_SHIFT;
24559 + ebda_end = 0xa0000 >> PAGE_SHIFT;
24560 + }
24561 +}
24562 +#else
24563 +static inline void gr_init_ebda(void) { }
24564 +#endif
24565 +
24566 void free_initmem(void)
24567 {
24568 +#ifdef CONFIG_PAX_KERNEXEC
24569 +#ifdef CONFIG_X86_32
24570 + /* PaX: limit KERNEL_CS to actual size */
24571 + unsigned long addr, limit;
24572 + struct desc_struct d;
24573 + int cpu;
24574 +#else
24575 + pgd_t *pgd;
24576 + pud_t *pud;
24577 + pmd_t *pmd;
24578 + unsigned long addr, end;
24579 +#endif
24580 +#endif
24581 +
24582 + gr_init_ebda();
24583 +
24584 +#ifdef CONFIG_PAX_KERNEXEC
24585 +#ifdef CONFIG_X86_32
24586 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24587 + limit = (limit - 1UL) >> PAGE_SHIFT;
24588 +
24589 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24590 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24591 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24592 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24593 + }
24594 +
24595 + /* PaX: make KERNEL_CS read-only */
24596 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24597 + if (!paravirt_enabled())
24598 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24599 +/*
24600 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24601 + pgd = pgd_offset_k(addr);
24602 + pud = pud_offset(pgd, addr);
24603 + pmd = pmd_offset(pud, addr);
24604 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24605 + }
24606 +*/
24607 +#ifdef CONFIG_X86_PAE
24608 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24609 +/*
24610 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24611 + pgd = pgd_offset_k(addr);
24612 + pud = pud_offset(pgd, addr);
24613 + pmd = pmd_offset(pud, addr);
24614 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24615 + }
24616 +*/
24617 +#endif
24618 +
24619 +#ifdef CONFIG_MODULES
24620 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24621 +#endif
24622 +
24623 +#else
24624 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24625 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24626 + pgd = pgd_offset_k(addr);
24627 + pud = pud_offset(pgd, addr);
24628 + pmd = pmd_offset(pud, addr);
24629 + if (!pmd_present(*pmd))
24630 + continue;
24631 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24632 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24633 + else
24634 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24635 + }
24636 +
24637 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24638 + end = addr + KERNEL_IMAGE_SIZE;
24639 + for (; addr < end; addr += PMD_SIZE) {
24640 + pgd = pgd_offset_k(addr);
24641 + pud = pud_offset(pgd, addr);
24642 + pmd = pmd_offset(pud, addr);
24643 + if (!pmd_present(*pmd))
24644 + continue;
24645 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24646 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24647 + }
24648 +#endif
24649 +
24650 + flush_tlb_all();
24651 +#endif
24652 +
24653 free_init_pages("unused kernel memory",
24654 (unsigned long)(&__init_begin),
24655 (unsigned long)(&__init_end));
24656 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24657 index 575d86f..4987469 100644
24658 --- a/arch/x86/mm/init_32.c
24659 +++ b/arch/x86/mm/init_32.c
24660 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24661 }
24662
24663 /*
24664 - * Creates a middle page table and puts a pointer to it in the
24665 - * given global directory entry. This only returns the gd entry
24666 - * in non-PAE compilation mode, since the middle layer is folded.
24667 - */
24668 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24669 -{
24670 - pud_t *pud;
24671 - pmd_t *pmd_table;
24672 -
24673 -#ifdef CONFIG_X86_PAE
24674 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24675 - if (after_bootmem)
24676 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24677 - else
24678 - pmd_table = (pmd_t *)alloc_low_page();
24679 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24680 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24681 - pud = pud_offset(pgd, 0);
24682 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24683 -
24684 - return pmd_table;
24685 - }
24686 -#endif
24687 - pud = pud_offset(pgd, 0);
24688 - pmd_table = pmd_offset(pud, 0);
24689 -
24690 - return pmd_table;
24691 -}
24692 -
24693 -/*
24694 * Create a page table and place a pointer to it in a middle page
24695 * directory entry:
24696 */
24697 @@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24698 page_table = (pte_t *)alloc_low_page();
24699
24700 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24701 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24702 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24703 +#else
24704 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24705 +#endif
24706 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24707 }
24708
24709 return pte_offset_kernel(pmd, 0);
24710 }
24711
24712 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24713 +{
24714 + pud_t *pud;
24715 + pmd_t *pmd_table;
24716 +
24717 + pud = pud_offset(pgd, 0);
24718 + pmd_table = pmd_offset(pud, 0);
24719 +
24720 + return pmd_table;
24721 +}
24722 +
24723 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24724 {
24725 int pgd_idx = pgd_index(vaddr);
24726 @@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24727 int pgd_idx, pmd_idx;
24728 unsigned long vaddr;
24729 pgd_t *pgd;
24730 + pud_t *pud;
24731 pmd_t *pmd;
24732 pte_t *pte = NULL;
24733
24734 @@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24735 pgd = pgd_base + pgd_idx;
24736
24737 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24738 - pmd = one_md_table_init(pgd);
24739 - pmd = pmd + pmd_index(vaddr);
24740 + pud = pud_offset(pgd, vaddr);
24741 + pmd = pmd_offset(pud, vaddr);
24742 +
24743 +#ifdef CONFIG_X86_PAE
24744 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24745 +#endif
24746 +
24747 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24748 pmd++, pmd_idx++) {
24749 pte = page_table_kmap_check(one_page_table_init(pmd),
24750 @@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24751 }
24752 }
24753
24754 -static inline int is_kernel_text(unsigned long addr)
24755 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24756 {
24757 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24758 - return 1;
24759 - return 0;
24760 + if ((start > ktla_ktva((unsigned long)_etext) ||
24761 + end <= ktla_ktva((unsigned long)_stext)) &&
24762 + (start > ktla_ktva((unsigned long)_einittext) ||
24763 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24764 +
24765 +#ifdef CONFIG_ACPI_SLEEP
24766 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24767 +#endif
24768 +
24769 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24770 + return 0;
24771 + return 1;
24772 }
24773
24774 /*
24775 @@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24776 unsigned long last_map_addr = end;
24777 unsigned long start_pfn, end_pfn;
24778 pgd_t *pgd_base = swapper_pg_dir;
24779 - int pgd_idx, pmd_idx, pte_ofs;
24780 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24781 unsigned long pfn;
24782 pgd_t *pgd;
24783 + pud_t *pud;
24784 pmd_t *pmd;
24785 pte_t *pte;
24786 unsigned pages_2m, pages_4k;
24787 @@ -280,8 +281,13 @@ repeat:
24788 pfn = start_pfn;
24789 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24790 pgd = pgd_base + pgd_idx;
24791 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24792 - pmd = one_md_table_init(pgd);
24793 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24794 + pud = pud_offset(pgd, 0);
24795 + pmd = pmd_offset(pud, 0);
24796 +
24797 +#ifdef CONFIG_X86_PAE
24798 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24799 +#endif
24800
24801 if (pfn >= end_pfn)
24802 continue;
24803 @@ -293,14 +299,13 @@ repeat:
24804 #endif
24805 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24806 pmd++, pmd_idx++) {
24807 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24808 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24809
24810 /*
24811 * Map with big pages if possible, otherwise
24812 * create normal page tables:
24813 */
24814 if (use_pse) {
24815 - unsigned int addr2;
24816 pgprot_t prot = PAGE_KERNEL_LARGE;
24817 /*
24818 * first pass will use the same initial
24819 @@ -310,11 +315,7 @@ repeat:
24820 __pgprot(PTE_IDENT_ATTR |
24821 _PAGE_PSE);
24822
24823 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24824 - PAGE_OFFSET + PAGE_SIZE-1;
24825 -
24826 - if (is_kernel_text(addr) ||
24827 - is_kernel_text(addr2))
24828 + if (is_kernel_text(address, address + PMD_SIZE))
24829 prot = PAGE_KERNEL_LARGE_EXEC;
24830
24831 pages_2m++;
24832 @@ -331,7 +332,7 @@ repeat:
24833 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24834 pte += pte_ofs;
24835 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24836 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24837 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24838 pgprot_t prot = PAGE_KERNEL;
24839 /*
24840 * first pass will use the same initial
24841 @@ -339,7 +340,7 @@ repeat:
24842 */
24843 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24844
24845 - if (is_kernel_text(addr))
24846 + if (is_kernel_text(address, address + PAGE_SIZE))
24847 prot = PAGE_KERNEL_EXEC;
24848
24849 pages_4k++;
24850 @@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24851
24852 pud = pud_offset(pgd, va);
24853 pmd = pmd_offset(pud, va);
24854 - if (!pmd_present(*pmd))
24855 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24856 break;
24857
24858 pte = pte_offset_kernel(pmd, va);
24859 @@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24860
24861 static void __init pagetable_init(void)
24862 {
24863 - pgd_t *pgd_base = swapper_pg_dir;
24864 -
24865 - permanent_kmaps_init(pgd_base);
24866 + permanent_kmaps_init(swapper_pg_dir);
24867 }
24868
24869 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24870 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24871 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24872
24873 /* user-defined highmem size */
24874 @@ -734,6 +733,12 @@ void __init mem_init(void)
24875
24876 pci_iommu_alloc();
24877
24878 +#ifdef CONFIG_PAX_PER_CPU_PGD
24879 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24880 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24881 + KERNEL_PGD_PTRS);
24882 +#endif
24883 +
24884 #ifdef CONFIG_FLATMEM
24885 BUG_ON(!mem_map);
24886 #endif
24887 @@ -760,7 +765,7 @@ void __init mem_init(void)
24888 reservedpages++;
24889
24890 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24891 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24892 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24893 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24894
24895 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24896 @@ -801,10 +806,10 @@ void __init mem_init(void)
24897 ((unsigned long)&__init_end -
24898 (unsigned long)&__init_begin) >> 10,
24899
24900 - (unsigned long)&_etext, (unsigned long)&_edata,
24901 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24902 + (unsigned long)&_sdata, (unsigned long)&_edata,
24903 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24904
24905 - (unsigned long)&_text, (unsigned long)&_etext,
24906 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24907 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24908
24909 /*
24910 @@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24911 if (!kernel_set_to_readonly)
24912 return;
24913
24914 + start = ktla_ktva(start);
24915 pr_debug("Set kernel text: %lx - %lx for read write\n",
24916 start, start+size);
24917
24918 @@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
24919 if (!kernel_set_to_readonly)
24920 return;
24921
24922 + start = ktla_ktva(start);
24923 pr_debug("Set kernel text: %lx - %lx for read only\n",
24924 start, start+size);
24925
24926 @@ -924,6 +931,7 @@ void mark_rodata_ro(void)
24927 unsigned long start = PFN_ALIGN(_text);
24928 unsigned long size = PFN_ALIGN(_etext) - start;
24929
24930 + start = ktla_ktva(start);
24931 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24932 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24933 size >> 10);
24934 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24935 index fc18be0..e539653 100644
24936 --- a/arch/x86/mm/init_64.c
24937 +++ b/arch/x86/mm/init_64.c
24938 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24939 * around without checking the pgd every time.
24940 */
24941
24942 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24943 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24944 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24945
24946 int force_personality32;
24947 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24948
24949 for (address = start; address <= end; address += PGDIR_SIZE) {
24950 const pgd_t *pgd_ref = pgd_offset_k(address);
24951 +
24952 +#ifdef CONFIG_PAX_PER_CPU_PGD
24953 + unsigned long cpu;
24954 +#else
24955 struct page *page;
24956 +#endif
24957
24958 if (pgd_none(*pgd_ref))
24959 continue;
24960
24961 spin_lock(&pgd_lock);
24962 +
24963 +#ifdef CONFIG_PAX_PER_CPU_PGD
24964 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24965 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24966 +#else
24967 list_for_each_entry(page, &pgd_list, lru) {
24968 pgd_t *pgd;
24969 spinlock_t *pgt_lock;
24970 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24971 /* the pgt_lock only for Xen */
24972 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24973 spin_lock(pgt_lock);
24974 +#endif
24975
24976 if (pgd_none(*pgd))
24977 set_pgd(pgd, *pgd_ref);
24978 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24979 BUG_ON(pgd_page_vaddr(*pgd)
24980 != pgd_page_vaddr(*pgd_ref));
24981
24982 +#ifndef CONFIG_PAX_PER_CPU_PGD
24983 spin_unlock(pgt_lock);
24984 +#endif
24985 +
24986 }
24987 spin_unlock(&pgd_lock);
24988 }
24989 @@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
24990 {
24991 if (pgd_none(*pgd)) {
24992 pud_t *pud = (pud_t *)spp_getpage();
24993 - pgd_populate(&init_mm, pgd, pud);
24994 + pgd_populate_kernel(&init_mm, pgd, pud);
24995 if (pud != pud_offset(pgd, 0))
24996 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
24997 pud, pud_offset(pgd, 0));
24998 @@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
24999 {
25000 if (pud_none(*pud)) {
25001 pmd_t *pmd = (pmd_t *) spp_getpage();
25002 - pud_populate(&init_mm, pud, pmd);
25003 + pud_populate_kernel(&init_mm, pud, pmd);
25004 if (pmd != pmd_offset(pud, 0))
25005 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25006 pmd, pmd_offset(pud, 0));
25007 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25008 pmd = fill_pmd(pud, vaddr);
25009 pte = fill_pte(pmd, vaddr);
25010
25011 + pax_open_kernel();
25012 set_pte(pte, new_pte);
25013 + pax_close_kernel();
25014
25015 /*
25016 * It's enough to flush this one mapping.
25017 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25018 pgd = pgd_offset_k((unsigned long)__va(phys));
25019 if (pgd_none(*pgd)) {
25020 pud = (pud_t *) spp_getpage();
25021 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25022 - _PAGE_USER));
25023 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25024 }
25025 pud = pud_offset(pgd, (unsigned long)__va(phys));
25026 if (pud_none(*pud)) {
25027 pmd = (pmd_t *) spp_getpage();
25028 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25029 - _PAGE_USER));
25030 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25031 }
25032 pmd = pmd_offset(pud, phys);
25033 BUG_ON(!pmd_none(*pmd));
25034 @@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25035 if (pfn >= pgt_buf_top)
25036 panic("alloc_low_page: ran out of memory");
25037
25038 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25039 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25040 clear_page(adr);
25041 *phys = pfn * PAGE_SIZE;
25042 return adr;
25043 @@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25044
25045 phys = __pa(virt);
25046 left = phys & (PAGE_SIZE - 1);
25047 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25048 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25049 adr = (void *)(((unsigned long)adr) | left);
25050
25051 return adr;
25052 @@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25053 unmap_low_page(pmd);
25054
25055 spin_lock(&init_mm.page_table_lock);
25056 - pud_populate(&init_mm, pud, __va(pmd_phys));
25057 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25058 spin_unlock(&init_mm.page_table_lock);
25059 }
25060 __flush_tlb_all();
25061 @@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
25062 unmap_low_page(pud);
25063
25064 spin_lock(&init_mm.page_table_lock);
25065 - pgd_populate(&init_mm, pgd, __va(pud_phys));
25066 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25067 spin_unlock(&init_mm.page_table_lock);
25068 pgd_changed = true;
25069 }
25070 @@ -683,6 +697,12 @@ void __init mem_init(void)
25071
25072 pci_iommu_alloc();
25073
25074 +#ifdef CONFIG_PAX_PER_CPU_PGD
25075 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25076 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25077 + KERNEL_PGD_PTRS);
25078 +#endif
25079 +
25080 /* clear_bss() already clear the empty_zero_page */
25081
25082 reservedpages = 0;
25083 @@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
25084 static struct vm_area_struct gate_vma = {
25085 .vm_start = VSYSCALL_START,
25086 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25087 - .vm_page_prot = PAGE_READONLY_EXEC,
25088 - .vm_flags = VM_READ | VM_EXEC
25089 + .vm_page_prot = PAGE_READONLY,
25090 + .vm_flags = VM_READ
25091 };
25092
25093 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25094 @@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long addr)
25095
25096 const char *arch_vma_name(struct vm_area_struct *vma)
25097 {
25098 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25099 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25100 return "[vdso]";
25101 if (vma == &gate_vma)
25102 return "[vsyscall]";
25103 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25104 index 7b179b4..6bd1777 100644
25105 --- a/arch/x86/mm/iomap_32.c
25106 +++ b/arch/x86/mm/iomap_32.c
25107 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25108 type = kmap_atomic_idx_push();
25109 idx = type + KM_TYPE_NR * smp_processor_id();
25110 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25111 +
25112 + pax_open_kernel();
25113 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25114 + pax_close_kernel();
25115 +
25116 arch_flush_lazy_mmu_mode();
25117
25118 return (void *)vaddr;
25119 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25120 index be1ef57..55f0160 100644
25121 --- a/arch/x86/mm/ioremap.c
25122 +++ b/arch/x86/mm/ioremap.c
25123 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25124 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25125 int is_ram = page_is_ram(pfn);
25126
25127 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25128 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25129 return NULL;
25130 WARN_ON_ONCE(is_ram);
25131 }
25132 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25133
25134 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25135 if (page_is_ram(start >> PAGE_SHIFT))
25136 +#ifdef CONFIG_HIGHMEM
25137 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25138 +#endif
25139 return __va(phys);
25140
25141 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25142 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25143 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25144
25145 static __initdata int after_paging_init;
25146 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25147 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25148
25149 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25150 {
25151 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25152 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25153
25154 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25155 - memset(bm_pte, 0, sizeof(bm_pte));
25156 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25157 + pmd_populate_user(&init_mm, pmd, bm_pte);
25158
25159 /*
25160 * The boot-ioremap range spans multiple pmds, for which
25161 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25162 index d87dd6d..bf3fa66 100644
25163 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25164 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25165 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25166 * memory (e.g. tracked pages)? For now, we need this to avoid
25167 * invoking kmemcheck for PnP BIOS calls.
25168 */
25169 - if (regs->flags & X86_VM_MASK)
25170 + if (v8086_mode(regs))
25171 return false;
25172 - if (regs->cs != __KERNEL_CS)
25173 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25174 return false;
25175
25176 pte = kmemcheck_pte_lookup(address);
25177 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25178 index 845df68..1d8d29f 100644
25179 --- a/arch/x86/mm/mmap.c
25180 +++ b/arch/x86/mm/mmap.c
25181 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25182 * Leave an at least ~128 MB hole with possible stack randomization.
25183 */
25184 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25185 -#define MAX_GAP (TASK_SIZE/6*5)
25186 +#define MAX_GAP (pax_task_size/6*5)
25187
25188 static int mmap_is_legacy(void)
25189 {
25190 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25191 return rnd << PAGE_SHIFT;
25192 }
25193
25194 -static unsigned long mmap_base(void)
25195 +static unsigned long mmap_base(struct mm_struct *mm)
25196 {
25197 unsigned long gap = rlimit(RLIMIT_STACK);
25198 + unsigned long pax_task_size = TASK_SIZE;
25199 +
25200 +#ifdef CONFIG_PAX_SEGMEXEC
25201 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25202 + pax_task_size = SEGMEXEC_TASK_SIZE;
25203 +#endif
25204
25205 if (gap < MIN_GAP)
25206 gap = MIN_GAP;
25207 else if (gap > MAX_GAP)
25208 gap = MAX_GAP;
25209
25210 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25211 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25212 }
25213
25214 /*
25215 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25216 * does, but not when emulating X86_32
25217 */
25218 -static unsigned long mmap_legacy_base(void)
25219 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25220 {
25221 - if (mmap_is_ia32())
25222 + if (mmap_is_ia32()) {
25223 +
25224 +#ifdef CONFIG_PAX_SEGMEXEC
25225 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25226 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25227 + else
25228 +#endif
25229 +
25230 return TASK_UNMAPPED_BASE;
25231 - else
25232 + } else
25233 return TASK_UNMAPPED_BASE + mmap_rnd();
25234 }
25235
25236 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25237 void arch_pick_mmap_layout(struct mm_struct *mm)
25238 {
25239 if (mmap_is_legacy()) {
25240 - mm->mmap_base = mmap_legacy_base();
25241 + mm->mmap_base = mmap_legacy_base(mm);
25242 +
25243 +#ifdef CONFIG_PAX_RANDMMAP
25244 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25245 + mm->mmap_base += mm->delta_mmap;
25246 +#endif
25247 +
25248 mm->get_unmapped_area = arch_get_unmapped_area;
25249 mm->unmap_area = arch_unmap_area;
25250 } else {
25251 - mm->mmap_base = mmap_base();
25252 + mm->mmap_base = mmap_base(mm);
25253 +
25254 +#ifdef CONFIG_PAX_RANDMMAP
25255 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25256 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25257 +#endif
25258 +
25259 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25260 mm->unmap_area = arch_unmap_area_topdown;
25261 }
25262 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25263 index dc0b727..dc9d71a 100644
25264 --- a/arch/x86/mm/mmio-mod.c
25265 +++ b/arch/x86/mm/mmio-mod.c
25266 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25267 break;
25268 default:
25269 {
25270 - unsigned char *ip = (unsigned char *)instptr;
25271 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25272 my_trace->opcode = MMIO_UNKNOWN_OP;
25273 my_trace->width = 0;
25274 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25275 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25276 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25277 void __iomem *addr)
25278 {
25279 - static atomic_t next_id;
25280 + static atomic_unchecked_t next_id;
25281 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25282 /* These are page-unaligned. */
25283 struct mmiotrace_map map = {
25284 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25285 .private = trace
25286 },
25287 .phys = offset,
25288 - .id = atomic_inc_return(&next_id)
25289 + .id = atomic_inc_return_unchecked(&next_id)
25290 };
25291 map.map_id = trace->id;
25292
25293 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25294 index b008656..773eac2 100644
25295 --- a/arch/x86/mm/pageattr-test.c
25296 +++ b/arch/x86/mm/pageattr-test.c
25297 @@ -36,7 +36,7 @@ enum {
25298
25299 static int pte_testbit(pte_t pte)
25300 {
25301 - return pte_flags(pte) & _PAGE_UNUSED1;
25302 + return pte_flags(pte) & _PAGE_CPA_TEST;
25303 }
25304
25305 struct split_state {
25306 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25307 index e1ebde3..b1e1db38 100644
25308 --- a/arch/x86/mm/pageattr.c
25309 +++ b/arch/x86/mm/pageattr.c
25310 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25311 */
25312 #ifdef CONFIG_PCI_BIOS
25313 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25314 - pgprot_val(forbidden) |= _PAGE_NX;
25315 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25316 #endif
25317
25318 /*
25319 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25320 * Does not cover __inittext since that is gone later on. On
25321 * 64bit we do not enforce !NX on the low mapping
25322 */
25323 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25324 - pgprot_val(forbidden) |= _PAGE_NX;
25325 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25326 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25327
25328 +#ifdef CONFIG_DEBUG_RODATA
25329 /*
25330 * The .rodata section needs to be read-only. Using the pfn
25331 * catches all aliases.
25332 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25333 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25334 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25335 pgprot_val(forbidden) |= _PAGE_RW;
25336 +#endif
25337
25338 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25339 /*
25340 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25341 }
25342 #endif
25343
25344 +#ifdef CONFIG_PAX_KERNEXEC
25345 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25346 + pgprot_val(forbidden) |= _PAGE_RW;
25347 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25348 + }
25349 +#endif
25350 +
25351 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25352
25353 return prot;
25354 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25355 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25356 {
25357 /* change init_mm */
25358 + pax_open_kernel();
25359 set_pte_atomic(kpte, pte);
25360 +
25361 #ifdef CONFIG_X86_32
25362 if (!SHARED_KERNEL_PMD) {
25363 +
25364 +#ifdef CONFIG_PAX_PER_CPU_PGD
25365 + unsigned long cpu;
25366 +#else
25367 struct page *page;
25368 +#endif
25369
25370 +#ifdef CONFIG_PAX_PER_CPU_PGD
25371 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25372 + pgd_t *pgd = get_cpu_pgd(cpu);
25373 +#else
25374 list_for_each_entry(page, &pgd_list, lru) {
25375 - pgd_t *pgd;
25376 + pgd_t *pgd = (pgd_t *)page_address(page);
25377 +#endif
25378 +
25379 pud_t *pud;
25380 pmd_t *pmd;
25381
25382 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25383 + pgd += pgd_index(address);
25384 pud = pud_offset(pgd, address);
25385 pmd = pmd_offset(pud, address);
25386 set_pte_atomic((pte_t *)pmd, pte);
25387 }
25388 }
25389 #endif
25390 + pax_close_kernel();
25391 }
25392
25393 static int
25394 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25395 index f6ff57b..481690f 100644
25396 --- a/arch/x86/mm/pat.c
25397 +++ b/arch/x86/mm/pat.c
25398 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25399
25400 if (!entry) {
25401 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25402 - current->comm, current->pid, start, end);
25403 + current->comm, task_pid_nr(current), start, end);
25404 return -EINVAL;
25405 }
25406
25407 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25408 while (cursor < to) {
25409 if (!devmem_is_allowed(pfn)) {
25410 printk(KERN_INFO
25411 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25412 - current->comm, from, to);
25413 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25414 + current->comm, from, to, cursor);
25415 return 0;
25416 }
25417 cursor += PAGE_SIZE;
25418 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25419 printk(KERN_INFO
25420 "%s:%d ioremap_change_attr failed %s "
25421 "for %Lx-%Lx\n",
25422 - current->comm, current->pid,
25423 + current->comm, task_pid_nr(current),
25424 cattr_name(flags),
25425 base, (unsigned long long)(base + size));
25426 return -EINVAL;
25427 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25428 if (want_flags != flags) {
25429 printk(KERN_WARNING
25430 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25431 - current->comm, current->pid,
25432 + current->comm, task_pid_nr(current),
25433 cattr_name(want_flags),
25434 (unsigned long long)paddr,
25435 (unsigned long long)(paddr + size),
25436 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25437 free_memtype(paddr, paddr + size);
25438 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25439 " for %Lx-%Lx, got %s\n",
25440 - current->comm, current->pid,
25441 + current->comm, task_pid_nr(current),
25442 cattr_name(want_flags),
25443 (unsigned long long)paddr,
25444 (unsigned long long)(paddr + size),
25445 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25446 index 9f0614d..92ae64a 100644
25447 --- a/arch/x86/mm/pf_in.c
25448 +++ b/arch/x86/mm/pf_in.c
25449 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25450 int i;
25451 enum reason_type rv = OTHERS;
25452
25453 - p = (unsigned char *)ins_addr;
25454 + p = (unsigned char *)ktla_ktva(ins_addr);
25455 p += skip_prefix(p, &prf);
25456 p += get_opcode(p, &opcode);
25457
25458 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25459 struct prefix_bits prf;
25460 int i;
25461
25462 - p = (unsigned char *)ins_addr;
25463 + p = (unsigned char *)ktla_ktva(ins_addr);
25464 p += skip_prefix(p, &prf);
25465 p += get_opcode(p, &opcode);
25466
25467 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25468 struct prefix_bits prf;
25469 int i;
25470
25471 - p = (unsigned char *)ins_addr;
25472 + p = (unsigned char *)ktla_ktva(ins_addr);
25473 p += skip_prefix(p, &prf);
25474 p += get_opcode(p, &opcode);
25475
25476 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25477 struct prefix_bits prf;
25478 int i;
25479
25480 - p = (unsigned char *)ins_addr;
25481 + p = (unsigned char *)ktla_ktva(ins_addr);
25482 p += skip_prefix(p, &prf);
25483 p += get_opcode(p, &opcode);
25484 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25485 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25486 struct prefix_bits prf;
25487 int i;
25488
25489 - p = (unsigned char *)ins_addr;
25490 + p = (unsigned char *)ktla_ktva(ins_addr);
25491 p += skip_prefix(p, &prf);
25492 p += get_opcode(p, &opcode);
25493 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25494 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25495 index 8573b83..4f3ed7e 100644
25496 --- a/arch/x86/mm/pgtable.c
25497 +++ b/arch/x86/mm/pgtable.c
25498 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25499 list_del(&page->lru);
25500 }
25501
25502 -#define UNSHARED_PTRS_PER_PGD \
25503 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25504 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25505 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25506
25507 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25508 +{
25509 + unsigned int count = USER_PGD_PTRS;
25510
25511 + while (count--)
25512 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25513 +}
25514 +#endif
25515 +
25516 +#ifdef CONFIG_PAX_PER_CPU_PGD
25517 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25518 +{
25519 + unsigned int count = USER_PGD_PTRS;
25520 +
25521 + while (count--) {
25522 + pgd_t pgd;
25523 +
25524 +#ifdef CONFIG_X86_64
25525 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25526 +#else
25527 + pgd = *src++;
25528 +#endif
25529 +
25530 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25531 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25532 +#endif
25533 +
25534 + *dst++ = pgd;
25535 + }
25536 +
25537 +}
25538 +#endif
25539 +
25540 +#ifdef CONFIG_X86_64
25541 +#define pxd_t pud_t
25542 +#define pyd_t pgd_t
25543 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25544 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25545 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25546 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25547 +#define PYD_SIZE PGDIR_SIZE
25548 +#else
25549 +#define pxd_t pmd_t
25550 +#define pyd_t pud_t
25551 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25552 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25553 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25554 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25555 +#define PYD_SIZE PUD_SIZE
25556 +#endif
25557 +
25558 +#ifdef CONFIG_PAX_PER_CPU_PGD
25559 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25560 +static inline void pgd_dtor(pgd_t *pgd) {}
25561 +#else
25562 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25563 {
25564 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25565 @@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25566 pgd_list_del(pgd);
25567 spin_unlock(&pgd_lock);
25568 }
25569 +#endif
25570
25571 /*
25572 * List of all pgd's needed for non-PAE so it can invalidate entries
25573 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25574 * -- wli
25575 */
25576
25577 -#ifdef CONFIG_X86_PAE
25578 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25579 /*
25580 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25581 * updating the top-level pagetable entries to guarantee the
25582 @@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25583 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25584 * and initialize the kernel pmds here.
25585 */
25586 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25587 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25588
25589 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25590 {
25591 @@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25592 */
25593 flush_tlb_mm(mm);
25594 }
25595 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25596 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25597 #else /* !CONFIG_X86_PAE */
25598
25599 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25600 -#define PREALLOCATED_PMDS 0
25601 +#define PREALLOCATED_PXDS 0
25602
25603 #endif /* CONFIG_X86_PAE */
25604
25605 -static void free_pmds(pmd_t *pmds[])
25606 +static void free_pxds(pxd_t *pxds[])
25607 {
25608 int i;
25609
25610 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25611 - if (pmds[i])
25612 - free_page((unsigned long)pmds[i]);
25613 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25614 + if (pxds[i])
25615 + free_page((unsigned long)pxds[i]);
25616 }
25617
25618 -static int preallocate_pmds(pmd_t *pmds[])
25619 +static int preallocate_pxds(pxd_t *pxds[])
25620 {
25621 int i;
25622 bool failed = false;
25623
25624 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25625 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25626 - if (pmd == NULL)
25627 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25628 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25629 + if (pxd == NULL)
25630 failed = true;
25631 - pmds[i] = pmd;
25632 + pxds[i] = pxd;
25633 }
25634
25635 if (failed) {
25636 - free_pmds(pmds);
25637 + free_pxds(pxds);
25638 return -ENOMEM;
25639 }
25640
25641 @@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25642 * preallocate which never got a corresponding vma will need to be
25643 * freed manually.
25644 */
25645 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25646 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25647 {
25648 int i;
25649
25650 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25651 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25652 pgd_t pgd = pgdp[i];
25653
25654 if (pgd_val(pgd) != 0) {
25655 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25656 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25657
25658 - pgdp[i] = native_make_pgd(0);
25659 + set_pgd(pgdp + i, native_make_pgd(0));
25660
25661 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25662 - pmd_free(mm, pmd);
25663 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25664 + pxd_free(mm, pxd);
25665 }
25666 }
25667 }
25668
25669 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25670 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25671 {
25672 - pud_t *pud;
25673 + pyd_t *pyd;
25674 unsigned long addr;
25675 int i;
25676
25677 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25678 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25679 return;
25680
25681 - pud = pud_offset(pgd, 0);
25682 +#ifdef CONFIG_X86_64
25683 + pyd = pyd_offset(mm, 0L);
25684 +#else
25685 + pyd = pyd_offset(pgd, 0L);
25686 +#endif
25687
25688 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25689 - i++, pud++, addr += PUD_SIZE) {
25690 - pmd_t *pmd = pmds[i];
25691 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25692 + i++, pyd++, addr += PYD_SIZE) {
25693 + pxd_t *pxd = pxds[i];
25694
25695 if (i >= KERNEL_PGD_BOUNDARY)
25696 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25697 - sizeof(pmd_t) * PTRS_PER_PMD);
25698 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25699 + sizeof(pxd_t) * PTRS_PER_PMD);
25700
25701 - pud_populate(mm, pud, pmd);
25702 + pyd_populate(mm, pyd, pxd);
25703 }
25704 }
25705
25706 pgd_t *pgd_alloc(struct mm_struct *mm)
25707 {
25708 pgd_t *pgd;
25709 - pmd_t *pmds[PREALLOCATED_PMDS];
25710 + pxd_t *pxds[PREALLOCATED_PXDS];
25711
25712 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25713
25714 @@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25715
25716 mm->pgd = pgd;
25717
25718 - if (preallocate_pmds(pmds) != 0)
25719 + if (preallocate_pxds(pxds) != 0)
25720 goto out_free_pgd;
25721
25722 if (paravirt_pgd_alloc(mm) != 0)
25723 - goto out_free_pmds;
25724 + goto out_free_pxds;
25725
25726 /*
25727 * Make sure that pre-populating the pmds is atomic with
25728 @@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25729 spin_lock(&pgd_lock);
25730
25731 pgd_ctor(mm, pgd);
25732 - pgd_prepopulate_pmd(mm, pgd, pmds);
25733 + pgd_prepopulate_pxd(mm, pgd, pxds);
25734
25735 spin_unlock(&pgd_lock);
25736
25737 return pgd;
25738
25739 -out_free_pmds:
25740 - free_pmds(pmds);
25741 +out_free_pxds:
25742 + free_pxds(pxds);
25743 out_free_pgd:
25744 free_page((unsigned long)pgd);
25745 out:
25746 @@ -295,7 +356,7 @@ out:
25747
25748 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25749 {
25750 - pgd_mop_up_pmds(mm, pgd);
25751 + pgd_mop_up_pxds(mm, pgd);
25752 pgd_dtor(pgd);
25753 paravirt_pgd_free(mm, pgd);
25754 free_page((unsigned long)pgd);
25755 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25756 index a69bcb8..19068ab 100644
25757 --- a/arch/x86/mm/pgtable_32.c
25758 +++ b/arch/x86/mm/pgtable_32.c
25759 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25760 return;
25761 }
25762 pte = pte_offset_kernel(pmd, vaddr);
25763 +
25764 + pax_open_kernel();
25765 if (pte_val(pteval))
25766 set_pte_at(&init_mm, vaddr, pte, pteval);
25767 else
25768 pte_clear(&init_mm, vaddr, pte);
25769 + pax_close_kernel();
25770
25771 /*
25772 * It's enough to flush this one mapping.
25773 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25774 index 410531d..0f16030 100644
25775 --- a/arch/x86/mm/setup_nx.c
25776 +++ b/arch/x86/mm/setup_nx.c
25777 @@ -5,8 +5,10 @@
25778 #include <asm/pgtable.h>
25779 #include <asm/proto.h>
25780
25781 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25782 static int disable_nx __cpuinitdata;
25783
25784 +#ifndef CONFIG_PAX_PAGEEXEC
25785 /*
25786 * noexec = on|off
25787 *
25788 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25789 return 0;
25790 }
25791 early_param("noexec", noexec_setup);
25792 +#endif
25793 +
25794 +#endif
25795
25796 void __cpuinit x86_configure_nx(void)
25797 {
25798 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25799 if (cpu_has_nx && !disable_nx)
25800 __supported_pte_mask |= _PAGE_NX;
25801 else
25802 +#endif
25803 __supported_pte_mask &= ~_PAGE_NX;
25804 }
25805
25806 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25807 index d6c0418..06a0ad5 100644
25808 --- a/arch/x86/mm/tlb.c
25809 +++ b/arch/x86/mm/tlb.c
25810 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25811 BUG();
25812 cpumask_clear_cpu(cpu,
25813 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25814 +
25815 +#ifndef CONFIG_PAX_PER_CPU_PGD
25816 load_cr3(swapper_pg_dir);
25817 +#endif
25818 +
25819 }
25820 EXPORT_SYMBOL_GPL(leave_mm);
25821
25822 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25823 index 877b9a1..a8ecf42 100644
25824 --- a/arch/x86/net/bpf_jit.S
25825 +++ b/arch/x86/net/bpf_jit.S
25826 @@ -9,6 +9,7 @@
25827 */
25828 #include <linux/linkage.h>
25829 #include <asm/dwarf2.h>
25830 +#include <asm/alternative-asm.h>
25831
25832 /*
25833 * Calling convention :
25834 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25835 jle bpf_slow_path_word
25836 mov (SKBDATA,%rsi),%eax
25837 bswap %eax /* ntohl() */
25838 + pax_force_retaddr
25839 ret
25840
25841 sk_load_half:
25842 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25843 jle bpf_slow_path_half
25844 movzwl (SKBDATA,%rsi),%eax
25845 rol $8,%ax # ntohs()
25846 + pax_force_retaddr
25847 ret
25848
25849 sk_load_byte:
25850 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25851 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25852 jle bpf_slow_path_byte
25853 movzbl (SKBDATA,%rsi),%eax
25854 + pax_force_retaddr
25855 ret
25856
25857 /**
25858 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25859 movzbl (SKBDATA,%rsi),%ebx
25860 and $15,%bl
25861 shl $2,%bl
25862 + pax_force_retaddr
25863 ret
25864
25865 /* rsi contains offset and can be scratched */
25866 @@ -109,6 +114,7 @@ bpf_slow_path_word:
25867 js bpf_error
25868 mov -12(%rbp),%eax
25869 bswap %eax
25870 + pax_force_retaddr
25871 ret
25872
25873 bpf_slow_path_half:
25874 @@ -117,12 +123,14 @@ bpf_slow_path_half:
25875 mov -12(%rbp),%ax
25876 rol $8,%ax
25877 movzwl %ax,%eax
25878 + pax_force_retaddr
25879 ret
25880
25881 bpf_slow_path_byte:
25882 bpf_slow_path_common(1)
25883 js bpf_error
25884 movzbl -12(%rbp),%eax
25885 + pax_force_retaddr
25886 ret
25887
25888 bpf_slow_path_byte_msh:
25889 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25890 and $15,%al
25891 shl $2,%al
25892 xchg %eax,%ebx
25893 + pax_force_retaddr
25894 ret
25895
25896 #define sk_negative_common(SIZE) \
25897 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25898 sk_negative_common(4)
25899 mov (%rax), %eax
25900 bswap %eax
25901 + pax_force_retaddr
25902 ret
25903
25904 bpf_slow_path_half_neg:
25905 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25906 mov (%rax),%ax
25907 rol $8,%ax
25908 movzwl %ax,%eax
25909 + pax_force_retaddr
25910 ret
25911
25912 bpf_slow_path_byte_neg:
25913 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
25914 .globl sk_load_byte_negative_offset
25915 sk_negative_common(1)
25916 movzbl (%rax), %eax
25917 + pax_force_retaddr
25918 ret
25919
25920 bpf_slow_path_byte_msh_neg:
25921 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
25922 and $15,%al
25923 shl $2,%al
25924 xchg %eax,%ebx
25925 + pax_force_retaddr
25926 ret
25927
25928 bpf_error:
25929 @@ -197,4 +210,5 @@ bpf_error:
25930 xor %eax,%eax
25931 mov -8(%rbp),%rbx
25932 leaveq
25933 + pax_force_retaddr
25934 ret
25935 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25936 index 0597f95..a12c36e 100644
25937 --- a/arch/x86/net/bpf_jit_comp.c
25938 +++ b/arch/x86/net/bpf_jit_comp.c
25939 @@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
25940 set_fs(old_fs);
25941 }
25942
25943 +struct bpf_jit_work {
25944 + struct work_struct work;
25945 + void *image;
25946 +};
25947 +
25948 #define CHOOSE_LOAD_FUNC(K, func) \
25949 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
25950
25951 @@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25952 if (addrs == NULL)
25953 return;
25954
25955 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25956 + if (!fp->work)
25957 + goto out;
25958 +
25959 /* Before first pass, make a rough estimation of addrs[]
25960 * each bpf instruction is translated to less than 64 bytes
25961 */
25962 @@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25963 break;
25964 default:
25965 /* hmm, too complex filter, give up with jit compiler */
25966 - goto out;
25967 + goto error;
25968 }
25969 ilen = prog - temp;
25970 if (image) {
25971 if (unlikely(proglen + ilen > oldproglen)) {
25972 pr_err("bpb_jit_compile fatal error\n");
25973 - kfree(addrs);
25974 - module_free(NULL, image);
25975 - return;
25976 + module_free_exec(NULL, image);
25977 + goto error;
25978 }
25979 + pax_open_kernel();
25980 memcpy(image + proglen, temp, ilen);
25981 + pax_close_kernel();
25982 }
25983 proglen += ilen;
25984 addrs[i] = proglen;
25985 @@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25986 break;
25987 }
25988 if (proglen == oldproglen) {
25989 - image = module_alloc(max_t(unsigned int,
25990 - proglen,
25991 - sizeof(struct work_struct)));
25992 + image = module_alloc_exec(proglen);
25993 if (!image)
25994 - goto out;
25995 + goto error;
25996 }
25997 oldproglen = proglen;
25998 }
25999 @@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26000 bpf_flush_icache(image, image + proglen);
26001
26002 fp->bpf_func = (void *)image;
26003 - }
26004 + } else
26005 +error:
26006 + kfree(fp->work);
26007 +
26008 out:
26009 kfree(addrs);
26010 return;
26011 @@ -648,18 +659,20 @@ out:
26012
26013 static void jit_free_defer(struct work_struct *arg)
26014 {
26015 - module_free(NULL, arg);
26016 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26017 + kfree(arg);
26018 }
26019
26020 /* run from softirq, we must use a work_struct to call
26021 - * module_free() from process context
26022 + * module_free_exec() from process context
26023 */
26024 void bpf_jit_free(struct sk_filter *fp)
26025 {
26026 if (fp->bpf_func != sk_run_filter) {
26027 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
26028 + struct work_struct *work = &fp->work->work;
26029
26030 INIT_WORK(work, jit_free_defer);
26031 + fp->work->image = fp->bpf_func;
26032 schedule_work(work);
26033 }
26034 }
26035 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26036 index d6aa6e8..266395a 100644
26037 --- a/arch/x86/oprofile/backtrace.c
26038 +++ b/arch/x86/oprofile/backtrace.c
26039 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26040 struct stack_frame_ia32 *fp;
26041 unsigned long bytes;
26042
26043 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26044 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26045 if (bytes != sizeof(bufhead))
26046 return NULL;
26047
26048 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26049 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26050
26051 oprofile_add_trace(bufhead[0].return_address);
26052
26053 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26054 struct stack_frame bufhead[2];
26055 unsigned long bytes;
26056
26057 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26058 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26059 if (bytes != sizeof(bufhead))
26060 return NULL;
26061
26062 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26063 {
26064 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26065
26066 - if (!user_mode_vm(regs)) {
26067 + if (!user_mode(regs)) {
26068 unsigned long stack = kernel_stack_pointer(regs);
26069 if (depth)
26070 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26071 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26072 index 140942f..8a5cc55 100644
26073 --- a/arch/x86/pci/mrst.c
26074 +++ b/arch/x86/pci/mrst.c
26075 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26076 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26077 pci_mmcfg_late_init();
26078 pcibios_enable_irq = mrst_pci_irq_enable;
26079 - pci_root_ops = pci_mrst_ops;
26080 + pax_open_kernel();
26081 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26082 + pax_close_kernel();
26083 pci_soc_mode = 1;
26084 /* Continue with standard init */
26085 return 1;
26086 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26087 index da8fe05..7ee6704 100644
26088 --- a/arch/x86/pci/pcbios.c
26089 +++ b/arch/x86/pci/pcbios.c
26090 @@ -79,50 +79,93 @@ union bios32 {
26091 static struct {
26092 unsigned long address;
26093 unsigned short segment;
26094 -} bios32_indirect = { 0, __KERNEL_CS };
26095 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26096
26097 /*
26098 * Returns the entry point for the given service, NULL on error
26099 */
26100
26101 -static unsigned long bios32_service(unsigned long service)
26102 +static unsigned long __devinit bios32_service(unsigned long service)
26103 {
26104 unsigned char return_code; /* %al */
26105 unsigned long address; /* %ebx */
26106 unsigned long length; /* %ecx */
26107 unsigned long entry; /* %edx */
26108 unsigned long flags;
26109 + struct desc_struct d, *gdt;
26110
26111 local_irq_save(flags);
26112 - __asm__("lcall *(%%edi); cld"
26113 +
26114 + gdt = get_cpu_gdt_table(smp_processor_id());
26115 +
26116 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26117 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26118 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26119 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26120 +
26121 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26122 : "=a" (return_code),
26123 "=b" (address),
26124 "=c" (length),
26125 "=d" (entry)
26126 : "0" (service),
26127 "1" (0),
26128 - "D" (&bios32_indirect));
26129 + "D" (&bios32_indirect),
26130 + "r"(__PCIBIOS_DS)
26131 + : "memory");
26132 +
26133 + pax_open_kernel();
26134 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26135 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26136 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26137 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26138 + pax_close_kernel();
26139 +
26140 local_irq_restore(flags);
26141
26142 switch (return_code) {
26143 - case 0:
26144 - return address + entry;
26145 - case 0x80: /* Not present */
26146 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26147 - return 0;
26148 - default: /* Shouldn't happen */
26149 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26150 - service, return_code);
26151 + case 0: {
26152 + int cpu;
26153 + unsigned char flags;
26154 +
26155 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26156 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26157 + printk(KERN_WARNING "bios32_service: not valid\n");
26158 return 0;
26159 + }
26160 + address = address + PAGE_OFFSET;
26161 + length += 16UL; /* some BIOSs underreport this... */
26162 + flags = 4;
26163 + if (length >= 64*1024*1024) {
26164 + length >>= PAGE_SHIFT;
26165 + flags |= 8;
26166 + }
26167 +
26168 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26169 + gdt = get_cpu_gdt_table(cpu);
26170 + pack_descriptor(&d, address, length, 0x9b, flags);
26171 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26172 + pack_descriptor(&d, address, length, 0x93, flags);
26173 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26174 + }
26175 + return entry;
26176 + }
26177 + case 0x80: /* Not present */
26178 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26179 + return 0;
26180 + default: /* Shouldn't happen */
26181 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26182 + service, return_code);
26183 + return 0;
26184 }
26185 }
26186
26187 static struct {
26188 unsigned long address;
26189 unsigned short segment;
26190 -} pci_indirect = { 0, __KERNEL_CS };
26191 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26192
26193 -static int pci_bios_present;
26194 +static int pci_bios_present __read_only;
26195
26196 static int __devinit check_pcibios(void)
26197 {
26198 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26199 unsigned long flags, pcibios_entry;
26200
26201 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26202 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26203 + pci_indirect.address = pcibios_entry;
26204
26205 local_irq_save(flags);
26206 - __asm__(
26207 - "lcall *(%%edi); cld\n\t"
26208 + __asm__("movw %w6, %%ds\n\t"
26209 + "lcall *%%ss:(%%edi); cld\n\t"
26210 + "push %%ss\n\t"
26211 + "pop %%ds\n\t"
26212 "jc 1f\n\t"
26213 "xor %%ah, %%ah\n"
26214 "1:"
26215 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26216 "=b" (ebx),
26217 "=c" (ecx)
26218 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26219 - "D" (&pci_indirect)
26220 + "D" (&pci_indirect),
26221 + "r" (__PCIBIOS_DS)
26222 : "memory");
26223 local_irq_restore(flags);
26224
26225 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26226
26227 switch (len) {
26228 case 1:
26229 - __asm__("lcall *(%%esi); cld\n\t"
26230 + __asm__("movw %w6, %%ds\n\t"
26231 + "lcall *%%ss:(%%esi); cld\n\t"
26232 + "push %%ss\n\t"
26233 + "pop %%ds\n\t"
26234 "jc 1f\n\t"
26235 "xor %%ah, %%ah\n"
26236 "1:"
26237 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26238 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26239 "b" (bx),
26240 "D" ((long)reg),
26241 - "S" (&pci_indirect));
26242 + "S" (&pci_indirect),
26243 + "r" (__PCIBIOS_DS));
26244 /*
26245 * Zero-extend the result beyond 8 bits, do not trust the
26246 * BIOS having done it:
26247 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26248 *value &= 0xff;
26249 break;
26250 case 2:
26251 - __asm__("lcall *(%%esi); cld\n\t"
26252 + __asm__("movw %w6, %%ds\n\t"
26253 + "lcall *%%ss:(%%esi); cld\n\t"
26254 + "push %%ss\n\t"
26255 + "pop %%ds\n\t"
26256 "jc 1f\n\t"
26257 "xor %%ah, %%ah\n"
26258 "1:"
26259 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26260 : "1" (PCIBIOS_READ_CONFIG_WORD),
26261 "b" (bx),
26262 "D" ((long)reg),
26263 - "S" (&pci_indirect));
26264 + "S" (&pci_indirect),
26265 + "r" (__PCIBIOS_DS));
26266 /*
26267 * Zero-extend the result beyond 16 bits, do not trust the
26268 * BIOS having done it:
26269 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26270 *value &= 0xffff;
26271 break;
26272 case 4:
26273 - __asm__("lcall *(%%esi); cld\n\t"
26274 + __asm__("movw %w6, %%ds\n\t"
26275 + "lcall *%%ss:(%%esi); cld\n\t"
26276 + "push %%ss\n\t"
26277 + "pop %%ds\n\t"
26278 "jc 1f\n\t"
26279 "xor %%ah, %%ah\n"
26280 "1:"
26281 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26282 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26283 "b" (bx),
26284 "D" ((long)reg),
26285 - "S" (&pci_indirect));
26286 + "S" (&pci_indirect),
26287 + "r" (__PCIBIOS_DS));
26288 break;
26289 }
26290
26291 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26292
26293 switch (len) {
26294 case 1:
26295 - __asm__("lcall *(%%esi); cld\n\t"
26296 + __asm__("movw %w6, %%ds\n\t"
26297 + "lcall *%%ss:(%%esi); cld\n\t"
26298 + "push %%ss\n\t"
26299 + "pop %%ds\n\t"
26300 "jc 1f\n\t"
26301 "xor %%ah, %%ah\n"
26302 "1:"
26303 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26304 "c" (value),
26305 "b" (bx),
26306 "D" ((long)reg),
26307 - "S" (&pci_indirect));
26308 + "S" (&pci_indirect),
26309 + "r" (__PCIBIOS_DS));
26310 break;
26311 case 2:
26312 - __asm__("lcall *(%%esi); cld\n\t"
26313 + __asm__("movw %w6, %%ds\n\t"
26314 + "lcall *%%ss:(%%esi); cld\n\t"
26315 + "push %%ss\n\t"
26316 + "pop %%ds\n\t"
26317 "jc 1f\n\t"
26318 "xor %%ah, %%ah\n"
26319 "1:"
26320 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26321 "c" (value),
26322 "b" (bx),
26323 "D" ((long)reg),
26324 - "S" (&pci_indirect));
26325 + "S" (&pci_indirect),
26326 + "r" (__PCIBIOS_DS));
26327 break;
26328 case 4:
26329 - __asm__("lcall *(%%esi); cld\n\t"
26330 + __asm__("movw %w6, %%ds\n\t"
26331 + "lcall *%%ss:(%%esi); cld\n\t"
26332 + "push %%ss\n\t"
26333 + "pop %%ds\n\t"
26334 "jc 1f\n\t"
26335 "xor %%ah, %%ah\n"
26336 "1:"
26337 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26338 "c" (value),
26339 "b" (bx),
26340 "D" ((long)reg),
26341 - "S" (&pci_indirect));
26342 + "S" (&pci_indirect),
26343 + "r" (__PCIBIOS_DS));
26344 break;
26345 }
26346
26347 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26348
26349 DBG("PCI: Fetching IRQ routing table... ");
26350 __asm__("push %%es\n\t"
26351 + "movw %w8, %%ds\n\t"
26352 "push %%ds\n\t"
26353 "pop %%es\n\t"
26354 - "lcall *(%%esi); cld\n\t"
26355 + "lcall *%%ss:(%%esi); cld\n\t"
26356 "pop %%es\n\t"
26357 + "push %%ss\n\t"
26358 + "pop %%ds\n"
26359 "jc 1f\n\t"
26360 "xor %%ah, %%ah\n"
26361 "1:"
26362 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26363 "1" (0),
26364 "D" ((long) &opt),
26365 "S" (&pci_indirect),
26366 - "m" (opt)
26367 + "m" (opt),
26368 + "r" (__PCIBIOS_DS)
26369 : "memory");
26370 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26371 if (ret & 0xff00)
26372 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26373 {
26374 int ret;
26375
26376 - __asm__("lcall *(%%esi); cld\n\t"
26377 + __asm__("movw %w5, %%ds\n\t"
26378 + "lcall *%%ss:(%%esi); cld\n\t"
26379 + "push %%ss\n\t"
26380 + "pop %%ds\n"
26381 "jc 1f\n\t"
26382 "xor %%ah, %%ah\n"
26383 "1:"
26384 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26385 : "0" (PCIBIOS_SET_PCI_HW_INT),
26386 "b" ((dev->bus->number << 8) | dev->devfn),
26387 "c" ((irq << 8) | (pin + 10)),
26388 - "S" (&pci_indirect));
26389 + "S" (&pci_indirect),
26390 + "r" (__PCIBIOS_DS));
26391 return !(ret & 0xff00);
26392 }
26393 EXPORT_SYMBOL(pcibios_set_irq_routing);
26394 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26395 index 40e4469..1ab536e 100644
26396 --- a/arch/x86/platform/efi/efi_32.c
26397 +++ b/arch/x86/platform/efi/efi_32.c
26398 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26399 {
26400 struct desc_ptr gdt_descr;
26401
26402 +#ifdef CONFIG_PAX_KERNEXEC
26403 + struct desc_struct d;
26404 +#endif
26405 +
26406 local_irq_save(efi_rt_eflags);
26407
26408 load_cr3(initial_page_table);
26409 __flush_tlb_all();
26410
26411 +#ifdef CONFIG_PAX_KERNEXEC
26412 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26413 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26414 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26415 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26416 +#endif
26417 +
26418 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26419 gdt_descr.size = GDT_SIZE - 1;
26420 load_gdt(&gdt_descr);
26421 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26422 {
26423 struct desc_ptr gdt_descr;
26424
26425 +#ifdef CONFIG_PAX_KERNEXEC
26426 + struct desc_struct d;
26427 +
26428 + memset(&d, 0, sizeof d);
26429 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26430 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26431 +#endif
26432 +
26433 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26434 gdt_descr.size = GDT_SIZE - 1;
26435 load_gdt(&gdt_descr);
26436 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26437 index fbe66e6..c5c0dd2 100644
26438 --- a/arch/x86/platform/efi/efi_stub_32.S
26439 +++ b/arch/x86/platform/efi/efi_stub_32.S
26440 @@ -6,7 +6,9 @@
26441 */
26442
26443 #include <linux/linkage.h>
26444 +#include <linux/init.h>
26445 #include <asm/page_types.h>
26446 +#include <asm/segment.h>
26447
26448 /*
26449 * efi_call_phys(void *, ...) is a function with variable parameters.
26450 @@ -20,7 +22,7 @@
26451 * service functions will comply with gcc calling convention, too.
26452 */
26453
26454 -.text
26455 +__INIT
26456 ENTRY(efi_call_phys)
26457 /*
26458 * 0. The function can only be called in Linux kernel. So CS has been
26459 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26460 * The mapping of lower virtual memory has been created in prelog and
26461 * epilog.
26462 */
26463 - movl $1f, %edx
26464 - subl $__PAGE_OFFSET, %edx
26465 - jmp *%edx
26466 + movl $(__KERNEXEC_EFI_DS), %edx
26467 + mov %edx, %ds
26468 + mov %edx, %es
26469 + mov %edx, %ss
26470 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26471 1:
26472
26473 /*
26474 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26475 * parameter 2, ..., param n. To make things easy, we save the return
26476 * address of efi_call_phys in a global variable.
26477 */
26478 - popl %edx
26479 - movl %edx, saved_return_addr
26480 - /* get the function pointer into ECX*/
26481 - popl %ecx
26482 - movl %ecx, efi_rt_function_ptr
26483 - movl $2f, %edx
26484 - subl $__PAGE_OFFSET, %edx
26485 - pushl %edx
26486 + popl (saved_return_addr)
26487 + popl (efi_rt_function_ptr)
26488
26489 /*
26490 * 3. Clear PG bit in %CR0.
26491 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26492 /*
26493 * 5. Call the physical function.
26494 */
26495 - jmp *%ecx
26496 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26497
26498 -2:
26499 /*
26500 * 6. After EFI runtime service returns, control will return to
26501 * following instruction. We'd better readjust stack pointer first.
26502 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26503 movl %cr0, %edx
26504 orl $0x80000000, %edx
26505 movl %edx, %cr0
26506 - jmp 1f
26507 -1:
26508 +
26509 /*
26510 * 8. Now restore the virtual mode from flat mode by
26511 * adding EIP with PAGE_OFFSET.
26512 */
26513 - movl $1f, %edx
26514 - jmp *%edx
26515 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26516 1:
26517 + movl $(__KERNEL_DS), %edx
26518 + mov %edx, %ds
26519 + mov %edx, %es
26520 + mov %edx, %ss
26521
26522 /*
26523 * 9. Balance the stack. And because EAX contain the return value,
26524 * we'd better not clobber it.
26525 */
26526 - leal efi_rt_function_ptr, %edx
26527 - movl (%edx), %ecx
26528 - pushl %ecx
26529 + pushl (efi_rt_function_ptr)
26530
26531 /*
26532 - * 10. Push the saved return address onto the stack and return.
26533 + * 10. Return to the saved return address.
26534 */
26535 - leal saved_return_addr, %edx
26536 - movl (%edx), %ecx
26537 - pushl %ecx
26538 - ret
26539 + jmpl *(saved_return_addr)
26540 ENDPROC(efi_call_phys)
26541 .previous
26542
26543 -.data
26544 +__INITDATA
26545 saved_return_addr:
26546 .long 0
26547 efi_rt_function_ptr:
26548 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26549 index 4c07cca..2c8427d 100644
26550 --- a/arch/x86/platform/efi/efi_stub_64.S
26551 +++ b/arch/x86/platform/efi/efi_stub_64.S
26552 @@ -7,6 +7,7 @@
26553 */
26554
26555 #include <linux/linkage.h>
26556 +#include <asm/alternative-asm.h>
26557
26558 #define SAVE_XMM \
26559 mov %rsp, %rax; \
26560 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26561 call *%rdi
26562 addq $32, %rsp
26563 RESTORE_XMM
26564 + pax_force_retaddr 0, 1
26565 ret
26566 ENDPROC(efi_call0)
26567
26568 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26569 call *%rdi
26570 addq $32, %rsp
26571 RESTORE_XMM
26572 + pax_force_retaddr 0, 1
26573 ret
26574 ENDPROC(efi_call1)
26575
26576 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26577 call *%rdi
26578 addq $32, %rsp
26579 RESTORE_XMM
26580 + pax_force_retaddr 0, 1
26581 ret
26582 ENDPROC(efi_call2)
26583
26584 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26585 call *%rdi
26586 addq $32, %rsp
26587 RESTORE_XMM
26588 + pax_force_retaddr 0, 1
26589 ret
26590 ENDPROC(efi_call3)
26591
26592 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26593 call *%rdi
26594 addq $32, %rsp
26595 RESTORE_XMM
26596 + pax_force_retaddr 0, 1
26597 ret
26598 ENDPROC(efi_call4)
26599
26600 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26601 call *%rdi
26602 addq $48, %rsp
26603 RESTORE_XMM
26604 + pax_force_retaddr 0, 1
26605 ret
26606 ENDPROC(efi_call5)
26607
26608 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26609 call *%rdi
26610 addq $48, %rsp
26611 RESTORE_XMM
26612 + pax_force_retaddr 0, 1
26613 ret
26614 ENDPROC(efi_call6)
26615 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26616 index e31bcd8..f12dc46 100644
26617 --- a/arch/x86/platform/mrst/mrst.c
26618 +++ b/arch/x86/platform/mrst/mrst.c
26619 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26620 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26621 int sfi_mrtc_num;
26622
26623 -static void mrst_power_off(void)
26624 +static __noreturn void mrst_power_off(void)
26625 {
26626 + BUG();
26627 }
26628
26629 -static void mrst_reboot(void)
26630 +static __noreturn void mrst_reboot(void)
26631 {
26632 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26633 + BUG();
26634 }
26635
26636 /* parse all the mtimer info to a static mtimer array */
26637 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26638 index 218cdb1..fd55c08 100644
26639 --- a/arch/x86/power/cpu.c
26640 +++ b/arch/x86/power/cpu.c
26641 @@ -132,7 +132,7 @@ static void do_fpu_end(void)
26642 static void fix_processor_context(void)
26643 {
26644 int cpu = smp_processor_id();
26645 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26646 + struct tss_struct *t = init_tss + cpu;
26647
26648 set_tss_desc(cpu, t); /*
26649 * This just modifies memory; should not be
26650 @@ -142,7 +142,9 @@ static void fix_processor_context(void)
26651 */
26652
26653 #ifdef CONFIG_X86_64
26654 + pax_open_kernel();
26655 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26656 + pax_close_kernel();
26657
26658 syscall_init(); /* This sets MSR_*STAR and related */
26659 #endif
26660 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26661 index b685296..0180fa9 100644
26662 --- a/arch/x86/tools/relocs.c
26663 +++ b/arch/x86/tools/relocs.c
26664 @@ -12,10 +12,13 @@
26665 #include <regex.h>
26666 #include <tools/le_byteshift.h>
26667
26668 +#include "../../../include/generated/autoconf.h"
26669 +
26670 static void die(char *fmt, ...);
26671
26672 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26673 static Elf32_Ehdr ehdr;
26674 +static Elf32_Phdr *phdr;
26675 static unsigned long reloc_count, reloc_idx;
26676 static unsigned long *relocs;
26677 static unsigned long reloc16_count, reloc16_idx;
26678 @@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
26679 }
26680 }
26681
26682 +static void read_phdrs(FILE *fp)
26683 +{
26684 + unsigned int i;
26685 +
26686 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26687 + if (!phdr) {
26688 + die("Unable to allocate %d program headers\n",
26689 + ehdr.e_phnum);
26690 + }
26691 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26692 + die("Seek to %d failed: %s\n",
26693 + ehdr.e_phoff, strerror(errno));
26694 + }
26695 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26696 + die("Cannot read ELF program headers: %s\n",
26697 + strerror(errno));
26698 + }
26699 + for(i = 0; i < ehdr.e_phnum; i++) {
26700 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26701 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26702 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26703 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26704 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26705 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26706 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26707 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26708 + }
26709 +
26710 +}
26711 +
26712 static void read_shdrs(FILE *fp)
26713 {
26714 - int i;
26715 + unsigned int i;
26716 Elf32_Shdr shdr;
26717
26718 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26719 @@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
26720
26721 static void read_strtabs(FILE *fp)
26722 {
26723 - int i;
26724 + unsigned int i;
26725 for (i = 0; i < ehdr.e_shnum; i++) {
26726 struct section *sec = &secs[i];
26727 if (sec->shdr.sh_type != SHT_STRTAB) {
26728 @@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
26729
26730 static void read_symtabs(FILE *fp)
26731 {
26732 - int i,j;
26733 + unsigned int i,j;
26734 for (i = 0; i < ehdr.e_shnum; i++) {
26735 struct section *sec = &secs[i];
26736 if (sec->shdr.sh_type != SHT_SYMTAB) {
26737 @@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
26738
26739 static void read_relocs(FILE *fp)
26740 {
26741 - int i,j;
26742 + unsigned int i,j;
26743 + uint32_t base;
26744 +
26745 for (i = 0; i < ehdr.e_shnum; i++) {
26746 struct section *sec = &secs[i];
26747 if (sec->shdr.sh_type != SHT_REL) {
26748 @@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
26749 die("Cannot read symbol table: %s\n",
26750 strerror(errno));
26751 }
26752 + base = 0;
26753 +
26754 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26755 + for (j = 0; j < ehdr.e_phnum; j++) {
26756 + if (phdr[j].p_type != PT_LOAD )
26757 + continue;
26758 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26759 + continue;
26760 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26761 + break;
26762 + }
26763 +#endif
26764 +
26765 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26766 Elf32_Rel *rel = &sec->reltab[j];
26767 - rel->r_offset = elf32_to_cpu(rel->r_offset);
26768 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26769 rel->r_info = elf32_to_cpu(rel->r_info);
26770 }
26771 }
26772 @@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
26773
26774 static void print_absolute_symbols(void)
26775 {
26776 - int i;
26777 + unsigned int i;
26778 printf("Absolute symbols\n");
26779 printf(" Num: Value Size Type Bind Visibility Name\n");
26780 for (i = 0; i < ehdr.e_shnum; i++) {
26781 struct section *sec = &secs[i];
26782 char *sym_strtab;
26783 - int j;
26784 + unsigned int j;
26785
26786 if (sec->shdr.sh_type != SHT_SYMTAB) {
26787 continue;
26788 @@ -482,7 +530,7 @@ static void print_absolute_symbols(void)
26789
26790 static void print_absolute_relocs(void)
26791 {
26792 - int i, printed = 0;
26793 + unsigned int i, printed = 0;
26794
26795 for (i = 0; i < ehdr.e_shnum; i++) {
26796 struct section *sec = &secs[i];
26797 @@ -551,7 +599,7 @@ static void print_absolute_relocs(void)
26798 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26799 int use_real_mode)
26800 {
26801 - int i;
26802 + unsigned int i;
26803 /* Walk through the relocations */
26804 for (i = 0; i < ehdr.e_shnum; i++) {
26805 char *sym_strtab;
26806 @@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26807 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26808 r_type = ELF32_R_TYPE(rel->r_info);
26809
26810 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26811 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26812 + continue;
26813 +
26814 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26815 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26816 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26817 + continue;
26818 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26819 + continue;
26820 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26821 + continue;
26822 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26823 + continue;
26824 +#endif
26825 +
26826 shn_abs = sym->st_shndx == SHN_ABS;
26827
26828 switch (r_type) {
26829 @@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
26830
26831 static void emit_relocs(int as_text, int use_real_mode)
26832 {
26833 - int i;
26834 + unsigned int i;
26835 /* Count how many relocations I have and allocate space for them. */
26836 reloc_count = 0;
26837 walk_relocs(count_reloc, use_real_mode);
26838 @@ -801,6 +865,7 @@ int main(int argc, char **argv)
26839 fname, strerror(errno));
26840 }
26841 read_ehdr(fp);
26842 + read_phdrs(fp);
26843 read_shdrs(fp);
26844 read_strtabs(fp);
26845 read_symtabs(fp);
26846 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26847 index fd14be1..e3c79c0 100644
26848 --- a/arch/x86/vdso/Makefile
26849 +++ b/arch/x86/vdso/Makefile
26850 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
26851 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26852 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26853
26854 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26855 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26856 GCOV_PROFILE := n
26857
26858 #
26859 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26860 index 66e6d93..587f435 100644
26861 --- a/arch/x86/vdso/vdso32-setup.c
26862 +++ b/arch/x86/vdso/vdso32-setup.c
26863 @@ -25,6 +25,7 @@
26864 #include <asm/tlbflush.h>
26865 #include <asm/vdso.h>
26866 #include <asm/proto.h>
26867 +#include <asm/mman.h>
26868
26869 enum {
26870 VDSO_DISABLED = 0,
26871 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26872 void enable_sep_cpu(void)
26873 {
26874 int cpu = get_cpu();
26875 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26876 + struct tss_struct *tss = init_tss + cpu;
26877
26878 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26879 put_cpu();
26880 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26881 gate_vma.vm_start = FIXADDR_USER_START;
26882 gate_vma.vm_end = FIXADDR_USER_END;
26883 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26884 - gate_vma.vm_page_prot = __P101;
26885 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26886
26887 return 0;
26888 }
26889 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26890 if (compat)
26891 addr = VDSO_HIGH_BASE;
26892 else {
26893 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26894 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26895 if (IS_ERR_VALUE(addr)) {
26896 ret = addr;
26897 goto up_fail;
26898 }
26899 }
26900
26901 - current->mm->context.vdso = (void *)addr;
26902 + current->mm->context.vdso = addr;
26903
26904 if (compat_uses_vma || !compat) {
26905 /*
26906 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26907 }
26908
26909 current_thread_info()->sysenter_return =
26910 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26911 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26912
26913 up_fail:
26914 if (ret)
26915 - current->mm->context.vdso = NULL;
26916 + current->mm->context.vdso = 0;
26917
26918 up_write(&mm->mmap_sem);
26919
26920 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
26921
26922 const char *arch_vma_name(struct vm_area_struct *vma)
26923 {
26924 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26925 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26926 return "[vdso]";
26927 +
26928 +#ifdef CONFIG_PAX_SEGMEXEC
26929 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26930 + return "[vdso]";
26931 +#endif
26932 +
26933 return NULL;
26934 }
26935
26936 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26937 * Check to see if the corresponding task was created in compat vdso
26938 * mode.
26939 */
26940 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26941 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26942 return &gate_vma;
26943 return NULL;
26944 }
26945 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26946 index 00aaf04..4a26505 100644
26947 --- a/arch/x86/vdso/vma.c
26948 +++ b/arch/x86/vdso/vma.c
26949 @@ -16,8 +16,6 @@
26950 #include <asm/vdso.h>
26951 #include <asm/page.h>
26952
26953 -unsigned int __read_mostly vdso_enabled = 1;
26954 -
26955 extern char vdso_start[], vdso_end[];
26956 extern unsigned short vdso_sync_cpuid;
26957
26958 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26959 * unaligned here as a result of stack start randomization.
26960 */
26961 addr = PAGE_ALIGN(addr);
26962 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26963
26964 return addr;
26965 }
26966 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
26967 unsigned size)
26968 {
26969 struct mm_struct *mm = current->mm;
26970 - unsigned long addr;
26971 + unsigned long addr = 0;
26972 int ret;
26973
26974 - if (!vdso_enabled)
26975 - return 0;
26976 -
26977 down_write(&mm->mmap_sem);
26978 +
26979 +#ifdef CONFIG_PAX_RANDMMAP
26980 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26981 +#endif
26982 +
26983 addr = vdso_addr(mm->start_stack, size);
26984 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26985 addr = get_unmapped_area(NULL, addr, size, 0, 0);
26986 if (IS_ERR_VALUE(addr)) {
26987 ret = addr;
26988 goto up_fail;
26989 }
26990
26991 - current->mm->context.vdso = (void *)addr;
26992 + mm->context.vdso = addr;
26993
26994 ret = install_special_mapping(mm, addr, size,
26995 VM_READ|VM_EXEC|
26996 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
26997 pages);
26998 - if (ret) {
26999 - current->mm->context.vdso = NULL;
27000 - goto up_fail;
27001 - }
27002 + if (ret)
27003 + mm->context.vdso = 0;
27004
27005 up_fail:
27006 up_write(&mm->mmap_sem);
27007 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27008 vdsox32_size);
27009 }
27010 #endif
27011 -
27012 -static __init int vdso_setup(char *s)
27013 -{
27014 - vdso_enabled = simple_strtoul(s, NULL, 0);
27015 - return 0;
27016 -}
27017 -__setup("vdso=", vdso_setup);
27018 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27019 index 6c7f1e8..de96944 100644
27020 --- a/arch/x86/xen/enlighten.c
27021 +++ b/arch/x86/xen/enlighten.c
27022 @@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27023
27024 struct shared_info xen_dummy_shared_info;
27025
27026 -void *xen_initial_gdt;
27027 -
27028 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27029 __read_mostly int xen_have_vector_callback;
27030 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27031 @@ -1157,30 +1155,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27032 #endif
27033 };
27034
27035 -static void xen_reboot(int reason)
27036 +static __noreturn void xen_reboot(int reason)
27037 {
27038 struct sched_shutdown r = { .reason = reason };
27039
27040 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27041 - BUG();
27042 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27043 + BUG();
27044 }
27045
27046 -static void xen_restart(char *msg)
27047 +static __noreturn void xen_restart(char *msg)
27048 {
27049 xen_reboot(SHUTDOWN_reboot);
27050 }
27051
27052 -static void xen_emergency_restart(void)
27053 +static __noreturn void xen_emergency_restart(void)
27054 {
27055 xen_reboot(SHUTDOWN_reboot);
27056 }
27057
27058 -static void xen_machine_halt(void)
27059 +static __noreturn void xen_machine_halt(void)
27060 {
27061 xen_reboot(SHUTDOWN_poweroff);
27062 }
27063
27064 -static void xen_machine_power_off(void)
27065 +static __noreturn void xen_machine_power_off(void)
27066 {
27067 if (pm_power_off)
27068 pm_power_off();
27069 @@ -1283,7 +1281,17 @@ asmlinkage void __init xen_start_kernel(void)
27070 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27071
27072 /* Work out if we support NX */
27073 - x86_configure_nx();
27074 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27075 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27076 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27077 + unsigned l, h;
27078 +
27079 + __supported_pte_mask |= _PAGE_NX;
27080 + rdmsr(MSR_EFER, l, h);
27081 + l |= EFER_NX;
27082 + wrmsr(MSR_EFER, l, h);
27083 + }
27084 +#endif
27085
27086 xen_setup_features();
27087
27088 @@ -1314,13 +1322,6 @@ asmlinkage void __init xen_start_kernel(void)
27089
27090 machine_ops = xen_machine_ops;
27091
27092 - /*
27093 - * The only reliable way to retain the initial address of the
27094 - * percpu gdt_page is to remember it here, so we can go and
27095 - * mark it RW later, when the initial percpu area is freed.
27096 - */
27097 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27098 -
27099 xen_smp_init();
27100
27101 #ifdef CONFIG_ACPI_NUMA
27102 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27103 index 69f5857..0699dc5 100644
27104 --- a/arch/x86/xen/mmu.c
27105 +++ b/arch/x86/xen/mmu.c
27106 @@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27107 convert_pfn_mfn(init_level4_pgt);
27108 convert_pfn_mfn(level3_ident_pgt);
27109 convert_pfn_mfn(level3_kernel_pgt);
27110 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27111 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27112 + convert_pfn_mfn(level3_vmemmap_pgt);
27113
27114 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27115 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27116 @@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27117 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27118 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27119 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27120 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27121 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27122 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27123 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27124 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27125 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27126 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27127
27128 @@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_init(void)
27129 pv_mmu_ops.set_pud = xen_set_pud;
27130 #if PAGETABLE_LEVELS == 4
27131 pv_mmu_ops.set_pgd = xen_set_pgd;
27132 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27133 #endif
27134
27135 /* This will work as long as patching hasn't happened yet
27136 @@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27137 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27138 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27139 .set_pgd = xen_set_pgd_hyper,
27140 + .set_pgd_batched = xen_set_pgd_hyper,
27141
27142 .alloc_pud = xen_alloc_pmd_init,
27143 .release_pud = xen_release_pmd_init,
27144 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27145 index 0503c0c..ceb2d16 100644
27146 --- a/arch/x86/xen/smp.c
27147 +++ b/arch/x86/xen/smp.c
27148 @@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27149 {
27150 BUG_ON(smp_processor_id() != 0);
27151 native_smp_prepare_boot_cpu();
27152 -
27153 - /* We've switched to the "real" per-cpu gdt, so make sure the
27154 - old memory can be recycled */
27155 - make_lowmem_page_readwrite(xen_initial_gdt);
27156 -
27157 xen_filter_cpu_maps();
27158 xen_setup_vcpu_info_placement();
27159 }
27160 @@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27161 gdt = get_cpu_gdt_table(cpu);
27162
27163 ctxt->flags = VGCF_IN_KERNEL;
27164 - ctxt->user_regs.ds = __USER_DS;
27165 - ctxt->user_regs.es = __USER_DS;
27166 + ctxt->user_regs.ds = __KERNEL_DS;
27167 + ctxt->user_regs.es = __KERNEL_DS;
27168 ctxt->user_regs.ss = __KERNEL_DS;
27169 #ifdef CONFIG_X86_32
27170 ctxt->user_regs.fs = __KERNEL_PERCPU;
27171 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27172 + savesegment(gs, ctxt->user_regs.gs);
27173 #else
27174 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27175 #endif
27176 @@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27177 int rc;
27178
27179 per_cpu(current_task, cpu) = idle;
27180 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27181 #ifdef CONFIG_X86_32
27182 irq_ctx_init(cpu);
27183 #else
27184 clear_tsk_thread_flag(idle, TIF_FORK);
27185 - per_cpu(kernel_stack, cpu) =
27186 - (unsigned long)task_stack_page(idle) -
27187 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27188 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27189 #endif
27190 xen_setup_runstate_info(cpu);
27191 xen_setup_timer(cpu);
27192 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27193 index b040b0e..8cc4fe0 100644
27194 --- a/arch/x86/xen/xen-asm_32.S
27195 +++ b/arch/x86/xen/xen-asm_32.S
27196 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27197 ESP_OFFSET=4 # bytes pushed onto stack
27198
27199 /*
27200 - * Store vcpu_info pointer for easy access. Do it this way to
27201 - * avoid having to reload %fs
27202 + * Store vcpu_info pointer for easy access.
27203 */
27204 #ifdef CONFIG_SMP
27205 - GET_THREAD_INFO(%eax)
27206 - movl TI_cpu(%eax), %eax
27207 - movl __per_cpu_offset(,%eax,4), %eax
27208 - mov xen_vcpu(%eax), %eax
27209 + push %fs
27210 + mov $(__KERNEL_PERCPU), %eax
27211 + mov %eax, %fs
27212 + mov PER_CPU_VAR(xen_vcpu), %eax
27213 + pop %fs
27214 #else
27215 movl xen_vcpu, %eax
27216 #endif
27217 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27218 index aaa7291..3f77960 100644
27219 --- a/arch/x86/xen/xen-head.S
27220 +++ b/arch/x86/xen/xen-head.S
27221 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27222 #ifdef CONFIG_X86_32
27223 mov %esi,xen_start_info
27224 mov $init_thread_union+THREAD_SIZE,%esp
27225 +#ifdef CONFIG_SMP
27226 + movl $cpu_gdt_table,%edi
27227 + movl $__per_cpu_load,%eax
27228 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27229 + rorl $16,%eax
27230 + movb %al,__KERNEL_PERCPU + 4(%edi)
27231 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27232 + movl $__per_cpu_end - 1,%eax
27233 + subl $__per_cpu_start,%eax
27234 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27235 +#endif
27236 #else
27237 mov %rsi,xen_start_info
27238 mov $init_thread_union+THREAD_SIZE,%rsp
27239 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27240 index b095739..8c17bcd 100644
27241 --- a/arch/x86/xen/xen-ops.h
27242 +++ b/arch/x86/xen/xen-ops.h
27243 @@ -10,8 +10,6 @@
27244 extern const char xen_hypervisor_callback[];
27245 extern const char xen_failsafe_callback[];
27246
27247 -extern void *xen_initial_gdt;
27248 -
27249 struct trap_info;
27250 void xen_copy_trap_info(struct trap_info *traps);
27251
27252 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27253 index 525bd3d..ef888b1 100644
27254 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27255 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27256 @@ -119,9 +119,9 @@
27257 ----------------------------------------------------------------------*/
27258
27259 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27260 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27261 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27262 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27263 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27264
27265 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27266 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27267 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27268 index 2f33760..835e50a 100644
27269 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27270 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27271 @@ -11,6 +11,7 @@
27272 #ifndef _XTENSA_CORE_H
27273 #define _XTENSA_CORE_H
27274
27275 +#include <linux/const.h>
27276
27277 /****************************************************************************
27278 Parameters Useful for Any Code, USER or PRIVILEGED
27279 @@ -112,9 +113,9 @@
27280 ----------------------------------------------------------------------*/
27281
27282 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27283 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27284 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27285 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27286 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27287
27288 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27289 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27290 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27291 index af00795..2bb8105 100644
27292 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27293 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27294 @@ -11,6 +11,7 @@
27295 #ifndef _XTENSA_CORE_CONFIGURATION_H
27296 #define _XTENSA_CORE_CONFIGURATION_H
27297
27298 +#include <linux/const.h>
27299
27300 /****************************************************************************
27301 Parameters Useful for Any Code, USER or PRIVILEGED
27302 @@ -118,9 +119,9 @@
27303 ----------------------------------------------------------------------*/
27304
27305 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27306 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27307 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27308 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27309 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27310
27311 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27312 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27313 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27314 index 58916af..9cb880b 100644
27315 --- a/block/blk-iopoll.c
27316 +++ b/block/blk-iopoll.c
27317 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27318 }
27319 EXPORT_SYMBOL(blk_iopoll_complete);
27320
27321 -static void blk_iopoll_softirq(struct softirq_action *h)
27322 +static void blk_iopoll_softirq(void)
27323 {
27324 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27325 int rearm = 0, budget = blk_iopoll_budget;
27326 diff --git a/block/blk-map.c b/block/blk-map.c
27327 index 623e1cd..ca1e109 100644
27328 --- a/block/blk-map.c
27329 +++ b/block/blk-map.c
27330 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27331 if (!len || !kbuf)
27332 return -EINVAL;
27333
27334 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27335 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27336 if (do_copy)
27337 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27338 else
27339 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27340 index 467c8de..4bddc6d 100644
27341 --- a/block/blk-softirq.c
27342 +++ b/block/blk-softirq.c
27343 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27344 * Softirq action handler - move entries to local list and loop over them
27345 * while passing them to the queue registered handler.
27346 */
27347 -static void blk_done_softirq(struct softirq_action *h)
27348 +static void blk_done_softirq(void)
27349 {
27350 struct list_head *cpu_list, local_list;
27351
27352 diff --git a/block/bsg.c b/block/bsg.c
27353 index ff64ae3..593560c 100644
27354 --- a/block/bsg.c
27355 +++ b/block/bsg.c
27356 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27357 struct sg_io_v4 *hdr, struct bsg_device *bd,
27358 fmode_t has_write_perm)
27359 {
27360 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27361 + unsigned char *cmdptr;
27362 +
27363 if (hdr->request_len > BLK_MAX_CDB) {
27364 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27365 if (!rq->cmd)
27366 return -ENOMEM;
27367 - }
27368 + cmdptr = rq->cmd;
27369 + } else
27370 + cmdptr = tmpcmd;
27371
27372 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27373 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27374 hdr->request_len))
27375 return -EFAULT;
27376
27377 + if (cmdptr != rq->cmd)
27378 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27379 +
27380 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27381 if (blk_verify_command(rq->cmd, has_write_perm))
27382 return -EPERM;
27383 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27384 index 7c668c8..db3521c 100644
27385 --- a/block/compat_ioctl.c
27386 +++ b/block/compat_ioctl.c
27387 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27388 err |= __get_user(f->spec1, &uf->spec1);
27389 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27390 err |= __get_user(name, &uf->name);
27391 - f->name = compat_ptr(name);
27392 + f->name = (void __force_kernel *)compat_ptr(name);
27393 if (err) {
27394 err = -EFAULT;
27395 goto out;
27396 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27397 index 6296b40..417c00f 100644
27398 --- a/block/partitions/efi.c
27399 +++ b/block/partitions/efi.c
27400 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27401 if (!gpt)
27402 return NULL;
27403
27404 + if (!le32_to_cpu(gpt->num_partition_entries))
27405 + return NULL;
27406 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27407 + if (!pte)
27408 + return NULL;
27409 +
27410 count = le32_to_cpu(gpt->num_partition_entries) *
27411 le32_to_cpu(gpt->sizeof_partition_entry);
27412 - if (!count)
27413 - return NULL;
27414 - pte = kzalloc(count, GFP_KERNEL);
27415 - if (!pte)
27416 - return NULL;
27417 -
27418 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27419 (u8 *) pte,
27420 count) < count) {
27421 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27422 index 260fa80..e8f3caf 100644
27423 --- a/block/scsi_ioctl.c
27424 +++ b/block/scsi_ioctl.c
27425 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27426 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27427 struct sg_io_hdr *hdr, fmode_t mode)
27428 {
27429 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27430 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27431 + unsigned char *cmdptr;
27432 +
27433 + if (rq->cmd != rq->__cmd)
27434 + cmdptr = rq->cmd;
27435 + else
27436 + cmdptr = tmpcmd;
27437 +
27438 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27439 return -EFAULT;
27440 +
27441 + if (cmdptr != rq->cmd)
27442 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27443 +
27444 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27445 return -EPERM;
27446
27447 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27448 int err;
27449 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27450 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27451 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27452 + unsigned char *cmdptr;
27453
27454 if (!sic)
27455 return -EINVAL;
27456 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27457 */
27458 err = -EFAULT;
27459 rq->cmd_len = cmdlen;
27460 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27461 +
27462 + if (rq->cmd != rq->__cmd)
27463 + cmdptr = rq->cmd;
27464 + else
27465 + cmdptr = tmpcmd;
27466 +
27467 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27468 goto error;
27469
27470 + if (rq->cmd != cmdptr)
27471 + memcpy(rq->cmd, cmdptr, cmdlen);
27472 +
27473 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27474 goto error;
27475
27476 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27477 index 671d4d6..5f24030 100644
27478 --- a/crypto/cryptd.c
27479 +++ b/crypto/cryptd.c
27480 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27481
27482 struct cryptd_blkcipher_request_ctx {
27483 crypto_completion_t complete;
27484 -};
27485 +} __no_const;
27486
27487 struct cryptd_hash_ctx {
27488 struct crypto_shash *child;
27489 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27490
27491 struct cryptd_aead_request_ctx {
27492 crypto_completion_t complete;
27493 -};
27494 +} __no_const;
27495
27496 static void cryptd_queue_worker(struct work_struct *work);
27497
27498 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27499 index e6defd8..c26a225 100644
27500 --- a/drivers/acpi/apei/cper.c
27501 +++ b/drivers/acpi/apei/cper.c
27502 @@ -38,12 +38,12 @@
27503 */
27504 u64 cper_next_record_id(void)
27505 {
27506 - static atomic64_t seq;
27507 + static atomic64_unchecked_t seq;
27508
27509 - if (!atomic64_read(&seq))
27510 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27511 + if (!atomic64_read_unchecked(&seq))
27512 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27513
27514 - return atomic64_inc_return(&seq);
27515 + return atomic64_inc_return_unchecked(&seq);
27516 }
27517 EXPORT_SYMBOL_GPL(cper_next_record_id);
27518
27519 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27520 index 7586544..636a2f0 100644
27521 --- a/drivers/acpi/ec_sys.c
27522 +++ b/drivers/acpi/ec_sys.c
27523 @@ -12,6 +12,7 @@
27524 #include <linux/acpi.h>
27525 #include <linux/debugfs.h>
27526 #include <linux/module.h>
27527 +#include <linux/uaccess.h>
27528 #include "internal.h"
27529
27530 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27531 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27532 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27533 */
27534 unsigned int size = EC_SPACE_SIZE;
27535 - u8 *data = (u8 *) buf;
27536 + u8 data;
27537 loff_t init_off = *off;
27538 int err = 0;
27539
27540 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27541 size = count;
27542
27543 while (size) {
27544 - err = ec_read(*off, &data[*off - init_off]);
27545 + err = ec_read(*off, &data);
27546 if (err)
27547 return err;
27548 + if (put_user(data, &buf[*off - init_off]))
27549 + return -EFAULT;
27550 *off += 1;
27551 size--;
27552 }
27553 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27554
27555 unsigned int size = count;
27556 loff_t init_off = *off;
27557 - u8 *data = (u8 *) buf;
27558 int err = 0;
27559
27560 if (*off >= EC_SPACE_SIZE)
27561 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27562 }
27563
27564 while (size) {
27565 - u8 byte_write = data[*off - init_off];
27566 + u8 byte_write;
27567 + if (get_user(byte_write, &buf[*off - init_off]))
27568 + return -EFAULT;
27569 err = ec_write(*off, byte_write);
27570 if (err)
27571 return err;
27572 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27573 index 251c7b62..000462d 100644
27574 --- a/drivers/acpi/proc.c
27575 +++ b/drivers/acpi/proc.c
27576 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27577 size_t count, loff_t * ppos)
27578 {
27579 struct list_head *node, *next;
27580 - char strbuf[5];
27581 - char str[5] = "";
27582 - unsigned int len = count;
27583 + char strbuf[5] = {0};
27584
27585 - if (len > 4)
27586 - len = 4;
27587 - if (len < 0)
27588 + if (count > 4)
27589 + count = 4;
27590 + if (copy_from_user(strbuf, buffer, count))
27591 return -EFAULT;
27592 -
27593 - if (copy_from_user(strbuf, buffer, len))
27594 - return -EFAULT;
27595 - strbuf[len] = '\0';
27596 - sscanf(strbuf, "%s", str);
27597 + strbuf[count] = '\0';
27598
27599 mutex_lock(&acpi_device_lock);
27600 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27601 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27602 if (!dev->wakeup.flags.valid)
27603 continue;
27604
27605 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27606 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27607 if (device_can_wakeup(&dev->dev)) {
27608 bool enable = !device_may_wakeup(&dev->dev);
27609 device_set_wakeup_enable(&dev->dev, enable);
27610 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27611 index 0734086..3ad3e4c 100644
27612 --- a/drivers/acpi/processor_driver.c
27613 +++ b/drivers/acpi/processor_driver.c
27614 @@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27615 return 0;
27616 #endif
27617
27618 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27619 + BUG_ON(pr->id >= nr_cpu_ids);
27620
27621 /*
27622 * Buggy BIOS check
27623 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27624 index 23763a1..6375e67 100644
27625 --- a/drivers/ata/libata-core.c
27626 +++ b/drivers/ata/libata-core.c
27627 @@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27628 struct ata_port *ap;
27629 unsigned int tag;
27630
27631 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27632 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27633 ap = qc->ap;
27634
27635 qc->flags = 0;
27636 @@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27637 struct ata_port *ap;
27638 struct ata_link *link;
27639
27640 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27641 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27642 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27643 ap = qc->ap;
27644 link = qc->dev->link;
27645 @@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27646 return;
27647
27648 spin_lock(&lock);
27649 + pax_open_kernel();
27650
27651 for (cur = ops->inherits; cur; cur = cur->inherits) {
27652 void **inherit = (void **)cur;
27653 @@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27654 if (IS_ERR(*pp))
27655 *pp = NULL;
27656
27657 - ops->inherits = NULL;
27658 + *(struct ata_port_operations **)&ops->inherits = NULL;
27659
27660 + pax_close_kernel();
27661 spin_unlock(&lock);
27662 }
27663
27664 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27665 index 3239517..343b5f6 100644
27666 --- a/drivers/ata/pata_arasan_cf.c
27667 +++ b/drivers/ata/pata_arasan_cf.c
27668 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27669 /* Handle platform specific quirks */
27670 if (pdata->quirk) {
27671 if (pdata->quirk & CF_BROKEN_PIO) {
27672 - ap->ops->set_piomode = NULL;
27673 + pax_open_kernel();
27674 + *(void **)&ap->ops->set_piomode = NULL;
27675 + pax_close_kernel();
27676 ap->pio_mask = 0;
27677 }
27678 if (pdata->quirk & CF_BROKEN_MWDMA)
27679 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27680 index f9b983a..887b9d8 100644
27681 --- a/drivers/atm/adummy.c
27682 +++ b/drivers/atm/adummy.c
27683 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27684 vcc->pop(vcc, skb);
27685 else
27686 dev_kfree_skb_any(skb);
27687 - atomic_inc(&vcc->stats->tx);
27688 + atomic_inc_unchecked(&vcc->stats->tx);
27689
27690 return 0;
27691 }
27692 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27693 index f8f41e0..1f987dd 100644
27694 --- a/drivers/atm/ambassador.c
27695 +++ b/drivers/atm/ambassador.c
27696 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27697 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27698
27699 // VC layer stats
27700 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27701 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27702
27703 // free the descriptor
27704 kfree (tx_descr);
27705 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27706 dump_skb ("<<<", vc, skb);
27707
27708 // VC layer stats
27709 - atomic_inc(&atm_vcc->stats->rx);
27710 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27711 __net_timestamp(skb);
27712 // end of our responsibility
27713 atm_vcc->push (atm_vcc, skb);
27714 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27715 } else {
27716 PRINTK (KERN_INFO, "dropped over-size frame");
27717 // should we count this?
27718 - atomic_inc(&atm_vcc->stats->rx_drop);
27719 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27720 }
27721
27722 } else {
27723 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27724 }
27725
27726 if (check_area (skb->data, skb->len)) {
27727 - atomic_inc(&atm_vcc->stats->tx_err);
27728 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27729 return -ENOMEM; // ?
27730 }
27731
27732 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27733 index b22d71c..d6e1049 100644
27734 --- a/drivers/atm/atmtcp.c
27735 +++ b/drivers/atm/atmtcp.c
27736 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27737 if (vcc->pop) vcc->pop(vcc,skb);
27738 else dev_kfree_skb(skb);
27739 if (dev_data) return 0;
27740 - atomic_inc(&vcc->stats->tx_err);
27741 + atomic_inc_unchecked(&vcc->stats->tx_err);
27742 return -ENOLINK;
27743 }
27744 size = skb->len+sizeof(struct atmtcp_hdr);
27745 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27746 if (!new_skb) {
27747 if (vcc->pop) vcc->pop(vcc,skb);
27748 else dev_kfree_skb(skb);
27749 - atomic_inc(&vcc->stats->tx_err);
27750 + atomic_inc_unchecked(&vcc->stats->tx_err);
27751 return -ENOBUFS;
27752 }
27753 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27754 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27755 if (vcc->pop) vcc->pop(vcc,skb);
27756 else dev_kfree_skb(skb);
27757 out_vcc->push(out_vcc,new_skb);
27758 - atomic_inc(&vcc->stats->tx);
27759 - atomic_inc(&out_vcc->stats->rx);
27760 + atomic_inc_unchecked(&vcc->stats->tx);
27761 + atomic_inc_unchecked(&out_vcc->stats->rx);
27762 return 0;
27763 }
27764
27765 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27766 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27767 read_unlock(&vcc_sklist_lock);
27768 if (!out_vcc) {
27769 - atomic_inc(&vcc->stats->tx_err);
27770 + atomic_inc_unchecked(&vcc->stats->tx_err);
27771 goto done;
27772 }
27773 skb_pull(skb,sizeof(struct atmtcp_hdr));
27774 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27775 __net_timestamp(new_skb);
27776 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27777 out_vcc->push(out_vcc,new_skb);
27778 - atomic_inc(&vcc->stats->tx);
27779 - atomic_inc(&out_vcc->stats->rx);
27780 + atomic_inc_unchecked(&vcc->stats->tx);
27781 + atomic_inc_unchecked(&out_vcc->stats->rx);
27782 done:
27783 if (vcc->pop) vcc->pop(vcc,skb);
27784 else dev_kfree_skb(skb);
27785 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27786 index 2059ee4..faf51c7 100644
27787 --- a/drivers/atm/eni.c
27788 +++ b/drivers/atm/eni.c
27789 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27790 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27791 vcc->dev->number);
27792 length = 0;
27793 - atomic_inc(&vcc->stats->rx_err);
27794 + atomic_inc_unchecked(&vcc->stats->rx_err);
27795 }
27796 else {
27797 length = ATM_CELL_SIZE-1; /* no HEC */
27798 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27799 size);
27800 }
27801 eff = length = 0;
27802 - atomic_inc(&vcc->stats->rx_err);
27803 + atomic_inc_unchecked(&vcc->stats->rx_err);
27804 }
27805 else {
27806 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27807 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27808 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27809 vcc->dev->number,vcc->vci,length,size << 2,descr);
27810 length = eff = 0;
27811 - atomic_inc(&vcc->stats->rx_err);
27812 + atomic_inc_unchecked(&vcc->stats->rx_err);
27813 }
27814 }
27815 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27816 @@ -767,7 +767,7 @@ rx_dequeued++;
27817 vcc->push(vcc,skb);
27818 pushed++;
27819 }
27820 - atomic_inc(&vcc->stats->rx);
27821 + atomic_inc_unchecked(&vcc->stats->rx);
27822 }
27823 wake_up(&eni_dev->rx_wait);
27824 }
27825 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
27826 PCI_DMA_TODEVICE);
27827 if (vcc->pop) vcc->pop(vcc,skb);
27828 else dev_kfree_skb_irq(skb);
27829 - atomic_inc(&vcc->stats->tx);
27830 + atomic_inc_unchecked(&vcc->stats->tx);
27831 wake_up(&eni_dev->tx_wait);
27832 dma_complete++;
27833 }
27834 @@ -1567,7 +1567,7 @@ tx_complete++;
27835 /*--------------------------------- entries ---------------------------------*/
27836
27837
27838 -static const char *media_name[] __devinitdata = {
27839 +static const char *media_name[] __devinitconst = {
27840 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27841 "UTP", "05?", "06?", "07?", /* 4- 7 */
27842 "TAXI","09?", "10?", "11?", /* 8-11 */
27843 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27844 index 86fed1b..6dc4721 100644
27845 --- a/drivers/atm/firestream.c
27846 +++ b/drivers/atm/firestream.c
27847 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27848 }
27849 }
27850
27851 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27852 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27853
27854 fs_dprintk (FS_DEBUG_TXMEM, "i");
27855 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27856 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27857 #endif
27858 skb_put (skb, qe->p1 & 0xffff);
27859 ATM_SKB(skb)->vcc = atm_vcc;
27860 - atomic_inc(&atm_vcc->stats->rx);
27861 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27862 __net_timestamp(skb);
27863 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27864 atm_vcc->push (atm_vcc, skb);
27865 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27866 kfree (pe);
27867 }
27868 if (atm_vcc)
27869 - atomic_inc(&atm_vcc->stats->rx_drop);
27870 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27871 break;
27872 case 0x1f: /* Reassembly abort: no buffers. */
27873 /* Silently increment error counter. */
27874 if (atm_vcc)
27875 - atomic_inc(&atm_vcc->stats->rx_drop);
27876 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27877 break;
27878 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27879 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27880 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27881 index 361f5ae..7fc552d 100644
27882 --- a/drivers/atm/fore200e.c
27883 +++ b/drivers/atm/fore200e.c
27884 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27885 #endif
27886 /* check error condition */
27887 if (*entry->status & STATUS_ERROR)
27888 - atomic_inc(&vcc->stats->tx_err);
27889 + atomic_inc_unchecked(&vcc->stats->tx_err);
27890 else
27891 - atomic_inc(&vcc->stats->tx);
27892 + atomic_inc_unchecked(&vcc->stats->tx);
27893 }
27894 }
27895
27896 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27897 if (skb == NULL) {
27898 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27899
27900 - atomic_inc(&vcc->stats->rx_drop);
27901 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27902 return -ENOMEM;
27903 }
27904
27905 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27906
27907 dev_kfree_skb_any(skb);
27908
27909 - atomic_inc(&vcc->stats->rx_drop);
27910 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27911 return -ENOMEM;
27912 }
27913
27914 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27915
27916 vcc->push(vcc, skb);
27917 - atomic_inc(&vcc->stats->rx);
27918 + atomic_inc_unchecked(&vcc->stats->rx);
27919
27920 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27921
27922 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27923 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27924 fore200e->atm_dev->number,
27925 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27926 - atomic_inc(&vcc->stats->rx_err);
27927 + atomic_inc_unchecked(&vcc->stats->rx_err);
27928 }
27929 }
27930
27931 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27932 goto retry_here;
27933 }
27934
27935 - atomic_inc(&vcc->stats->tx_err);
27936 + atomic_inc_unchecked(&vcc->stats->tx_err);
27937
27938 fore200e->tx_sat++;
27939 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27940 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27941 index b182c2f..1c6fa8a 100644
27942 --- a/drivers/atm/he.c
27943 +++ b/drivers/atm/he.c
27944 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27945
27946 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27947 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27948 - atomic_inc(&vcc->stats->rx_drop);
27949 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27950 goto return_host_buffers;
27951 }
27952
27953 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27954 RBRQ_LEN_ERR(he_dev->rbrq_head)
27955 ? "LEN_ERR" : "",
27956 vcc->vpi, vcc->vci);
27957 - atomic_inc(&vcc->stats->rx_err);
27958 + atomic_inc_unchecked(&vcc->stats->rx_err);
27959 goto return_host_buffers;
27960 }
27961
27962 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27963 vcc->push(vcc, skb);
27964 spin_lock(&he_dev->global_lock);
27965
27966 - atomic_inc(&vcc->stats->rx);
27967 + atomic_inc_unchecked(&vcc->stats->rx);
27968
27969 return_host_buffers:
27970 ++pdus_assembled;
27971 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27972 tpd->vcc->pop(tpd->vcc, tpd->skb);
27973 else
27974 dev_kfree_skb_any(tpd->skb);
27975 - atomic_inc(&tpd->vcc->stats->tx_err);
27976 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27977 }
27978 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27979 return;
27980 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27981 vcc->pop(vcc, skb);
27982 else
27983 dev_kfree_skb_any(skb);
27984 - atomic_inc(&vcc->stats->tx_err);
27985 + atomic_inc_unchecked(&vcc->stats->tx_err);
27986 return -EINVAL;
27987 }
27988
27989 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27990 vcc->pop(vcc, skb);
27991 else
27992 dev_kfree_skb_any(skb);
27993 - atomic_inc(&vcc->stats->tx_err);
27994 + atomic_inc_unchecked(&vcc->stats->tx_err);
27995 return -EINVAL;
27996 }
27997 #endif
27998 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27999 vcc->pop(vcc, skb);
28000 else
28001 dev_kfree_skb_any(skb);
28002 - atomic_inc(&vcc->stats->tx_err);
28003 + atomic_inc_unchecked(&vcc->stats->tx_err);
28004 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28005 return -ENOMEM;
28006 }
28007 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28008 vcc->pop(vcc, skb);
28009 else
28010 dev_kfree_skb_any(skb);
28011 - atomic_inc(&vcc->stats->tx_err);
28012 + atomic_inc_unchecked(&vcc->stats->tx_err);
28013 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28014 return -ENOMEM;
28015 }
28016 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28017 __enqueue_tpd(he_dev, tpd, cid);
28018 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28019
28020 - atomic_inc(&vcc->stats->tx);
28021 + atomic_inc_unchecked(&vcc->stats->tx);
28022
28023 return 0;
28024 }
28025 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28026 index 75fd691..2d20b14 100644
28027 --- a/drivers/atm/horizon.c
28028 +++ b/drivers/atm/horizon.c
28029 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28030 {
28031 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28032 // VC layer stats
28033 - atomic_inc(&vcc->stats->rx);
28034 + atomic_inc_unchecked(&vcc->stats->rx);
28035 __net_timestamp(skb);
28036 // end of our responsibility
28037 vcc->push (vcc, skb);
28038 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28039 dev->tx_iovec = NULL;
28040
28041 // VC layer stats
28042 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28043 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28044
28045 // free the skb
28046 hrz_kfree_skb (skb);
28047 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28048 index 1c05212..c28e200 100644
28049 --- a/drivers/atm/idt77252.c
28050 +++ b/drivers/atm/idt77252.c
28051 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28052 else
28053 dev_kfree_skb(skb);
28054
28055 - atomic_inc(&vcc->stats->tx);
28056 + atomic_inc_unchecked(&vcc->stats->tx);
28057 }
28058
28059 atomic_dec(&scq->used);
28060 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28061 if ((sb = dev_alloc_skb(64)) == NULL) {
28062 printk("%s: Can't allocate buffers for aal0.\n",
28063 card->name);
28064 - atomic_add(i, &vcc->stats->rx_drop);
28065 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28066 break;
28067 }
28068 if (!atm_charge(vcc, sb->truesize)) {
28069 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28070 card->name);
28071 - atomic_add(i - 1, &vcc->stats->rx_drop);
28072 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28073 dev_kfree_skb(sb);
28074 break;
28075 }
28076 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28077 ATM_SKB(sb)->vcc = vcc;
28078 __net_timestamp(sb);
28079 vcc->push(vcc, sb);
28080 - atomic_inc(&vcc->stats->rx);
28081 + atomic_inc_unchecked(&vcc->stats->rx);
28082
28083 cell += ATM_CELL_PAYLOAD;
28084 }
28085 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28086 "(CDC: %08x)\n",
28087 card->name, len, rpp->len, readl(SAR_REG_CDC));
28088 recycle_rx_pool_skb(card, rpp);
28089 - atomic_inc(&vcc->stats->rx_err);
28090 + atomic_inc_unchecked(&vcc->stats->rx_err);
28091 return;
28092 }
28093 if (stat & SAR_RSQE_CRC) {
28094 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28095 recycle_rx_pool_skb(card, rpp);
28096 - atomic_inc(&vcc->stats->rx_err);
28097 + atomic_inc_unchecked(&vcc->stats->rx_err);
28098 return;
28099 }
28100 if (skb_queue_len(&rpp->queue) > 1) {
28101 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28102 RXPRINTK("%s: Can't alloc RX skb.\n",
28103 card->name);
28104 recycle_rx_pool_skb(card, rpp);
28105 - atomic_inc(&vcc->stats->rx_err);
28106 + atomic_inc_unchecked(&vcc->stats->rx_err);
28107 return;
28108 }
28109 if (!atm_charge(vcc, skb->truesize)) {
28110 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28111 __net_timestamp(skb);
28112
28113 vcc->push(vcc, skb);
28114 - atomic_inc(&vcc->stats->rx);
28115 + atomic_inc_unchecked(&vcc->stats->rx);
28116
28117 return;
28118 }
28119 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28120 __net_timestamp(skb);
28121
28122 vcc->push(vcc, skb);
28123 - atomic_inc(&vcc->stats->rx);
28124 + atomic_inc_unchecked(&vcc->stats->rx);
28125
28126 if (skb->truesize > SAR_FB_SIZE_3)
28127 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28128 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28129 if (vcc->qos.aal != ATM_AAL0) {
28130 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28131 card->name, vpi, vci);
28132 - atomic_inc(&vcc->stats->rx_drop);
28133 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28134 goto drop;
28135 }
28136
28137 if ((sb = dev_alloc_skb(64)) == NULL) {
28138 printk("%s: Can't allocate buffers for AAL0.\n",
28139 card->name);
28140 - atomic_inc(&vcc->stats->rx_err);
28141 + atomic_inc_unchecked(&vcc->stats->rx_err);
28142 goto drop;
28143 }
28144
28145 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28146 ATM_SKB(sb)->vcc = vcc;
28147 __net_timestamp(sb);
28148 vcc->push(vcc, sb);
28149 - atomic_inc(&vcc->stats->rx);
28150 + atomic_inc_unchecked(&vcc->stats->rx);
28151
28152 drop:
28153 skb_pull(queue, 64);
28154 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28155
28156 if (vc == NULL) {
28157 printk("%s: NULL connection in send().\n", card->name);
28158 - atomic_inc(&vcc->stats->tx_err);
28159 + atomic_inc_unchecked(&vcc->stats->tx_err);
28160 dev_kfree_skb(skb);
28161 return -EINVAL;
28162 }
28163 if (!test_bit(VCF_TX, &vc->flags)) {
28164 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28165 - atomic_inc(&vcc->stats->tx_err);
28166 + atomic_inc_unchecked(&vcc->stats->tx_err);
28167 dev_kfree_skb(skb);
28168 return -EINVAL;
28169 }
28170 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28171 break;
28172 default:
28173 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28174 - atomic_inc(&vcc->stats->tx_err);
28175 + atomic_inc_unchecked(&vcc->stats->tx_err);
28176 dev_kfree_skb(skb);
28177 return -EINVAL;
28178 }
28179
28180 if (skb_shinfo(skb)->nr_frags != 0) {
28181 printk("%s: No scatter-gather yet.\n", card->name);
28182 - atomic_inc(&vcc->stats->tx_err);
28183 + atomic_inc_unchecked(&vcc->stats->tx_err);
28184 dev_kfree_skb(skb);
28185 return -EINVAL;
28186 }
28187 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28188
28189 err = queue_skb(card, vc, skb, oam);
28190 if (err) {
28191 - atomic_inc(&vcc->stats->tx_err);
28192 + atomic_inc_unchecked(&vcc->stats->tx_err);
28193 dev_kfree_skb(skb);
28194 return err;
28195 }
28196 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28197 skb = dev_alloc_skb(64);
28198 if (!skb) {
28199 printk("%s: Out of memory in send_oam().\n", card->name);
28200 - atomic_inc(&vcc->stats->tx_err);
28201 + atomic_inc_unchecked(&vcc->stats->tx_err);
28202 return -ENOMEM;
28203 }
28204 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28205 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28206 index d438601..8b98495 100644
28207 --- a/drivers/atm/iphase.c
28208 +++ b/drivers/atm/iphase.c
28209 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28210 status = (u_short) (buf_desc_ptr->desc_mode);
28211 if (status & (RX_CER | RX_PTE | RX_OFL))
28212 {
28213 - atomic_inc(&vcc->stats->rx_err);
28214 + atomic_inc_unchecked(&vcc->stats->rx_err);
28215 IF_ERR(printk("IA: bad packet, dropping it");)
28216 if (status & RX_CER) {
28217 IF_ERR(printk(" cause: packet CRC error\n");)
28218 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28219 len = dma_addr - buf_addr;
28220 if (len > iadev->rx_buf_sz) {
28221 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28222 - atomic_inc(&vcc->stats->rx_err);
28223 + atomic_inc_unchecked(&vcc->stats->rx_err);
28224 goto out_free_desc;
28225 }
28226
28227 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28228 ia_vcc = INPH_IA_VCC(vcc);
28229 if (ia_vcc == NULL)
28230 {
28231 - atomic_inc(&vcc->stats->rx_err);
28232 + atomic_inc_unchecked(&vcc->stats->rx_err);
28233 atm_return(vcc, skb->truesize);
28234 dev_kfree_skb_any(skb);
28235 goto INCR_DLE;
28236 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28237 if ((length > iadev->rx_buf_sz) || (length >
28238 (skb->len - sizeof(struct cpcs_trailer))))
28239 {
28240 - atomic_inc(&vcc->stats->rx_err);
28241 + atomic_inc_unchecked(&vcc->stats->rx_err);
28242 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28243 length, skb->len);)
28244 atm_return(vcc, skb->truesize);
28245 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28246
28247 IF_RX(printk("rx_dle_intr: skb push");)
28248 vcc->push(vcc,skb);
28249 - atomic_inc(&vcc->stats->rx);
28250 + atomic_inc_unchecked(&vcc->stats->rx);
28251 iadev->rx_pkt_cnt++;
28252 }
28253 INCR_DLE:
28254 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28255 {
28256 struct k_sonet_stats *stats;
28257 stats = &PRIV(_ia_dev[board])->sonet_stats;
28258 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28259 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28260 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28261 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28262 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28263 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28264 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28265 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28266 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28267 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28268 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28269 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28270 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28271 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28272 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28273 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28274 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28275 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28276 }
28277 ia_cmds.status = 0;
28278 break;
28279 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28280 if ((desc == 0) || (desc > iadev->num_tx_desc))
28281 {
28282 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28283 - atomic_inc(&vcc->stats->tx);
28284 + atomic_inc_unchecked(&vcc->stats->tx);
28285 if (vcc->pop)
28286 vcc->pop(vcc, skb);
28287 else
28288 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28289 ATM_DESC(skb) = vcc->vci;
28290 skb_queue_tail(&iadev->tx_dma_q, skb);
28291
28292 - atomic_inc(&vcc->stats->tx);
28293 + atomic_inc_unchecked(&vcc->stats->tx);
28294 iadev->tx_pkt_cnt++;
28295 /* Increment transaction counter */
28296 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28297
28298 #if 0
28299 /* add flow control logic */
28300 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28301 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28302 if (iavcc->vc_desc_cnt > 10) {
28303 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28304 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28305 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28306 index 68c7588..7036683 100644
28307 --- a/drivers/atm/lanai.c
28308 +++ b/drivers/atm/lanai.c
28309 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28310 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28311 lanai_endtx(lanai, lvcc);
28312 lanai_free_skb(lvcc->tx.atmvcc, skb);
28313 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28314 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28315 }
28316
28317 /* Try to fill the buffer - don't call unless there is backlog */
28318 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28319 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28320 __net_timestamp(skb);
28321 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28322 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28323 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28324 out:
28325 lvcc->rx.buf.ptr = end;
28326 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28327 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28328 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28329 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28330 lanai->stats.service_rxnotaal5++;
28331 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28332 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28333 return 0;
28334 }
28335 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28336 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28337 int bytes;
28338 read_unlock(&vcc_sklist_lock);
28339 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28340 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28341 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28342 lvcc->stats.x.aal5.service_trash++;
28343 bytes = (SERVICE_GET_END(s) * 16) -
28344 (((unsigned long) lvcc->rx.buf.ptr) -
28345 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28346 }
28347 if (s & SERVICE_STREAM) {
28348 read_unlock(&vcc_sklist_lock);
28349 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28350 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28351 lvcc->stats.x.aal5.service_stream++;
28352 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28353 "PDU on VCI %d!\n", lanai->number, vci);
28354 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28355 return 0;
28356 }
28357 DPRINTK("got rx crc error on vci %d\n", vci);
28358 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28359 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28360 lvcc->stats.x.aal5.service_rxcrc++;
28361 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28362 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28363 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28364 index 1c70c45..300718d 100644
28365 --- a/drivers/atm/nicstar.c
28366 +++ b/drivers/atm/nicstar.c
28367 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28368 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28369 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28370 card->index);
28371 - atomic_inc(&vcc->stats->tx_err);
28372 + atomic_inc_unchecked(&vcc->stats->tx_err);
28373 dev_kfree_skb_any(skb);
28374 return -EINVAL;
28375 }
28376 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28377 if (!vc->tx) {
28378 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28379 card->index);
28380 - atomic_inc(&vcc->stats->tx_err);
28381 + atomic_inc_unchecked(&vcc->stats->tx_err);
28382 dev_kfree_skb_any(skb);
28383 return -EINVAL;
28384 }
28385 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28386 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28387 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28388 card->index);
28389 - atomic_inc(&vcc->stats->tx_err);
28390 + atomic_inc_unchecked(&vcc->stats->tx_err);
28391 dev_kfree_skb_any(skb);
28392 return -EINVAL;
28393 }
28394
28395 if (skb_shinfo(skb)->nr_frags != 0) {
28396 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28397 - atomic_inc(&vcc->stats->tx_err);
28398 + atomic_inc_unchecked(&vcc->stats->tx_err);
28399 dev_kfree_skb_any(skb);
28400 return -EINVAL;
28401 }
28402 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28403 }
28404
28405 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28406 - atomic_inc(&vcc->stats->tx_err);
28407 + atomic_inc_unchecked(&vcc->stats->tx_err);
28408 dev_kfree_skb_any(skb);
28409 return -EIO;
28410 }
28411 - atomic_inc(&vcc->stats->tx);
28412 + atomic_inc_unchecked(&vcc->stats->tx);
28413
28414 return 0;
28415 }
28416 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28417 printk
28418 ("nicstar%d: Can't allocate buffers for aal0.\n",
28419 card->index);
28420 - atomic_add(i, &vcc->stats->rx_drop);
28421 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28422 break;
28423 }
28424 if (!atm_charge(vcc, sb->truesize)) {
28425 RXPRINTK
28426 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28427 card->index);
28428 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28429 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28430 dev_kfree_skb_any(sb);
28431 break;
28432 }
28433 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28434 ATM_SKB(sb)->vcc = vcc;
28435 __net_timestamp(sb);
28436 vcc->push(vcc, sb);
28437 - atomic_inc(&vcc->stats->rx);
28438 + atomic_inc_unchecked(&vcc->stats->rx);
28439 cell += ATM_CELL_PAYLOAD;
28440 }
28441
28442 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28443 if (iovb == NULL) {
28444 printk("nicstar%d: Out of iovec buffers.\n",
28445 card->index);
28446 - atomic_inc(&vcc->stats->rx_drop);
28447 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28448 recycle_rx_buf(card, skb);
28449 return;
28450 }
28451 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28452 small or large buffer itself. */
28453 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28454 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28455 - atomic_inc(&vcc->stats->rx_err);
28456 + atomic_inc_unchecked(&vcc->stats->rx_err);
28457 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28458 NS_MAX_IOVECS);
28459 NS_PRV_IOVCNT(iovb) = 0;
28460 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28461 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28462 card->index);
28463 which_list(card, skb);
28464 - atomic_inc(&vcc->stats->rx_err);
28465 + atomic_inc_unchecked(&vcc->stats->rx_err);
28466 recycle_rx_buf(card, skb);
28467 vc->rx_iov = NULL;
28468 recycle_iov_buf(card, iovb);
28469 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28470 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28471 card->index);
28472 which_list(card, skb);
28473 - atomic_inc(&vcc->stats->rx_err);
28474 + atomic_inc_unchecked(&vcc->stats->rx_err);
28475 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28476 NS_PRV_IOVCNT(iovb));
28477 vc->rx_iov = NULL;
28478 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28479 printk(" - PDU size mismatch.\n");
28480 else
28481 printk(".\n");
28482 - atomic_inc(&vcc->stats->rx_err);
28483 + atomic_inc_unchecked(&vcc->stats->rx_err);
28484 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28485 NS_PRV_IOVCNT(iovb));
28486 vc->rx_iov = NULL;
28487 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28488 /* skb points to a small buffer */
28489 if (!atm_charge(vcc, skb->truesize)) {
28490 push_rxbufs(card, skb);
28491 - atomic_inc(&vcc->stats->rx_drop);
28492 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28493 } else {
28494 skb_put(skb, len);
28495 dequeue_sm_buf(card, skb);
28496 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28497 ATM_SKB(skb)->vcc = vcc;
28498 __net_timestamp(skb);
28499 vcc->push(vcc, skb);
28500 - atomic_inc(&vcc->stats->rx);
28501 + atomic_inc_unchecked(&vcc->stats->rx);
28502 }
28503 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28504 struct sk_buff *sb;
28505 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28506 if (len <= NS_SMBUFSIZE) {
28507 if (!atm_charge(vcc, sb->truesize)) {
28508 push_rxbufs(card, sb);
28509 - atomic_inc(&vcc->stats->rx_drop);
28510 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28511 } else {
28512 skb_put(sb, len);
28513 dequeue_sm_buf(card, sb);
28514 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28515 ATM_SKB(sb)->vcc = vcc;
28516 __net_timestamp(sb);
28517 vcc->push(vcc, sb);
28518 - atomic_inc(&vcc->stats->rx);
28519 + atomic_inc_unchecked(&vcc->stats->rx);
28520 }
28521
28522 push_rxbufs(card, skb);
28523 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28524
28525 if (!atm_charge(vcc, skb->truesize)) {
28526 push_rxbufs(card, skb);
28527 - atomic_inc(&vcc->stats->rx_drop);
28528 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28529 } else {
28530 dequeue_lg_buf(card, skb);
28531 #ifdef NS_USE_DESTRUCTORS
28532 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28533 ATM_SKB(skb)->vcc = vcc;
28534 __net_timestamp(skb);
28535 vcc->push(vcc, skb);
28536 - atomic_inc(&vcc->stats->rx);
28537 + atomic_inc_unchecked(&vcc->stats->rx);
28538 }
28539
28540 push_rxbufs(card, sb);
28541 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28542 printk
28543 ("nicstar%d: Out of huge buffers.\n",
28544 card->index);
28545 - atomic_inc(&vcc->stats->rx_drop);
28546 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28547 recycle_iovec_rx_bufs(card,
28548 (struct iovec *)
28549 iovb->data,
28550 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28551 card->hbpool.count++;
28552 } else
28553 dev_kfree_skb_any(hb);
28554 - atomic_inc(&vcc->stats->rx_drop);
28555 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28556 } else {
28557 /* Copy the small buffer to the huge buffer */
28558 sb = (struct sk_buff *)iov->iov_base;
28559 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28560 #endif /* NS_USE_DESTRUCTORS */
28561 __net_timestamp(hb);
28562 vcc->push(vcc, hb);
28563 - atomic_inc(&vcc->stats->rx);
28564 + atomic_inc_unchecked(&vcc->stats->rx);
28565 }
28566 }
28567
28568 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28569 index 9851093..adb2b1e 100644
28570 --- a/drivers/atm/solos-pci.c
28571 +++ b/drivers/atm/solos-pci.c
28572 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28573 }
28574 atm_charge(vcc, skb->truesize);
28575 vcc->push(vcc, skb);
28576 - atomic_inc(&vcc->stats->rx);
28577 + atomic_inc_unchecked(&vcc->stats->rx);
28578 break;
28579
28580 case PKT_STATUS:
28581 @@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28582 vcc = SKB_CB(oldskb)->vcc;
28583
28584 if (vcc) {
28585 - atomic_inc(&vcc->stats->tx);
28586 + atomic_inc_unchecked(&vcc->stats->tx);
28587 solos_pop(vcc, oldskb);
28588 } else
28589 dev_kfree_skb_irq(oldskb);
28590 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28591 index 0215934..ce9f5b1 100644
28592 --- a/drivers/atm/suni.c
28593 +++ b/drivers/atm/suni.c
28594 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28595
28596
28597 #define ADD_LIMITED(s,v) \
28598 - atomic_add((v),&stats->s); \
28599 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28600 + atomic_add_unchecked((v),&stats->s); \
28601 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28602
28603
28604 static void suni_hz(unsigned long from_timer)
28605 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28606 index 5120a96..e2572bd 100644
28607 --- a/drivers/atm/uPD98402.c
28608 +++ b/drivers/atm/uPD98402.c
28609 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28610 struct sonet_stats tmp;
28611 int error = 0;
28612
28613 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28614 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28615 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28616 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28617 if (zero && !error) {
28618 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28619
28620
28621 #define ADD_LIMITED(s,v) \
28622 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28623 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28624 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28625 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28626 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28627 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28628
28629
28630 static void stat_event(struct atm_dev *dev)
28631 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28632 if (reason & uPD98402_INT_PFM) stat_event(dev);
28633 if (reason & uPD98402_INT_PCO) {
28634 (void) GET(PCOCR); /* clear interrupt cause */
28635 - atomic_add(GET(HECCT),
28636 + atomic_add_unchecked(GET(HECCT),
28637 &PRIV(dev)->sonet_stats.uncorr_hcs);
28638 }
28639 if ((reason & uPD98402_INT_RFO) &&
28640 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28641 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28642 uPD98402_INT_LOS),PIMR); /* enable them */
28643 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28644 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28645 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28646 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28647 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28648 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28649 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28650 return 0;
28651 }
28652
28653 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28654 index abe4e20..83c4727 100644
28655 --- a/drivers/atm/zatm.c
28656 +++ b/drivers/atm/zatm.c
28657 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28658 }
28659 if (!size) {
28660 dev_kfree_skb_irq(skb);
28661 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28662 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28663 continue;
28664 }
28665 if (!atm_charge(vcc,skb->truesize)) {
28666 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28667 skb->len = size;
28668 ATM_SKB(skb)->vcc = vcc;
28669 vcc->push(vcc,skb);
28670 - atomic_inc(&vcc->stats->rx);
28671 + atomic_inc_unchecked(&vcc->stats->rx);
28672 }
28673 zout(pos & 0xffff,MTA(mbx));
28674 #if 0 /* probably a stupid idea */
28675 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28676 skb_queue_head(&zatm_vcc->backlog,skb);
28677 break;
28678 }
28679 - atomic_inc(&vcc->stats->tx);
28680 + atomic_inc_unchecked(&vcc->stats->tx);
28681 wake_up(&zatm_vcc->tx_wait);
28682 }
28683
28684 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28685 index 8493536..31adee0 100644
28686 --- a/drivers/base/devtmpfs.c
28687 +++ b/drivers/base/devtmpfs.c
28688 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28689 if (!thread)
28690 return 0;
28691
28692 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28693 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28694 if (err)
28695 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28696 else
28697 diff --git a/drivers/base/node.c b/drivers/base/node.c
28698 index 90aa2a1..af1a177 100644
28699 --- a/drivers/base/node.c
28700 +++ b/drivers/base/node.c
28701 @@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
28702 {
28703 int n;
28704
28705 - n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
28706 - if (n > 0 && PAGE_SIZE > n + 1) {
28707 - *(buf + n++) = '\n';
28708 - *(buf + n++) = '\0';
28709 - }
28710 + n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
28711 + buf[n++] = '\n';
28712 + buf[n] = '\0';
28713 return n;
28714 }
28715
28716 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28717 index 2a3e581..3d6a73f 100644
28718 --- a/drivers/base/power/wakeup.c
28719 +++ b/drivers/base/power/wakeup.c
28720 @@ -30,14 +30,14 @@ bool events_check_enabled;
28721 * They need to be modified together atomically, so it's better to use one
28722 * atomic variable to hold them both.
28723 */
28724 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28725 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28726
28727 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28728 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28729
28730 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28731 {
28732 - unsigned int comb = atomic_read(&combined_event_count);
28733 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28734
28735 *cnt = (comb >> IN_PROGRESS_BITS);
28736 *inpr = comb & MAX_IN_PROGRESS;
28737 @@ -379,7 +379,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28738 ws->last_time = ktime_get();
28739
28740 /* Increment the counter of events in progress. */
28741 - atomic_inc(&combined_event_count);
28742 + atomic_inc_unchecked(&combined_event_count);
28743 }
28744
28745 /**
28746 @@ -475,7 +475,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28747 * Increment the counter of registered wakeup events and decrement the
28748 * couter of wakeup events in progress simultaneously.
28749 */
28750 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28751 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28752 }
28753
28754 /**
28755 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28756 index b0f553b..77b928b 100644
28757 --- a/drivers/block/cciss.c
28758 +++ b/drivers/block/cciss.c
28759 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28760 int err;
28761 u32 cp;
28762
28763 + memset(&arg64, 0, sizeof(arg64));
28764 +
28765 err = 0;
28766 err |=
28767 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28768 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28769 while (!list_empty(&h->reqQ)) {
28770 c = list_entry(h->reqQ.next, CommandList_struct, list);
28771 /* can't do anything if fifo is full */
28772 - if ((h->access.fifo_full(h))) {
28773 + if ((h->access->fifo_full(h))) {
28774 dev_warn(&h->pdev->dev, "fifo full\n");
28775 break;
28776 }
28777 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28778 h->Qdepth--;
28779
28780 /* Tell the controller execute command */
28781 - h->access.submit_command(h, c);
28782 + h->access->submit_command(h, c);
28783
28784 /* Put job onto the completed Q */
28785 addQ(&h->cmpQ, c);
28786 @@ -3443,17 +3445,17 @@ startio:
28787
28788 static inline unsigned long get_next_completion(ctlr_info_t *h)
28789 {
28790 - return h->access.command_completed(h);
28791 + return h->access->command_completed(h);
28792 }
28793
28794 static inline int interrupt_pending(ctlr_info_t *h)
28795 {
28796 - return h->access.intr_pending(h);
28797 + return h->access->intr_pending(h);
28798 }
28799
28800 static inline long interrupt_not_for_us(ctlr_info_t *h)
28801 {
28802 - return ((h->access.intr_pending(h) == 0) ||
28803 + return ((h->access->intr_pending(h) == 0) ||
28804 (h->interrupts_enabled == 0));
28805 }
28806
28807 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28808 u32 a;
28809
28810 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28811 - return h->access.command_completed(h);
28812 + return h->access->command_completed(h);
28813
28814 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28815 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28816 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28817 trans_support & CFGTBL_Trans_use_short_tags);
28818
28819 /* Change the access methods to the performant access methods */
28820 - h->access = SA5_performant_access;
28821 + h->access = &SA5_performant_access;
28822 h->transMethod = CFGTBL_Trans_Performant;
28823
28824 return;
28825 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28826 if (prod_index < 0)
28827 return -ENODEV;
28828 h->product_name = products[prod_index].product_name;
28829 - h->access = *(products[prod_index].access);
28830 + h->access = products[prod_index].access;
28831
28832 if (cciss_board_disabled(h)) {
28833 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28834 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28835 }
28836
28837 /* make sure the board interrupts are off */
28838 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28839 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28840 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28841 if (rc)
28842 goto clean2;
28843 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28844 * fake ones to scoop up any residual completions.
28845 */
28846 spin_lock_irqsave(&h->lock, flags);
28847 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28848 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28849 spin_unlock_irqrestore(&h->lock, flags);
28850 free_irq(h->intr[h->intr_mode], h);
28851 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28852 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28853 dev_info(&h->pdev->dev, "Board READY.\n");
28854 dev_info(&h->pdev->dev,
28855 "Waiting for stale completions to drain.\n");
28856 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28857 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28858 msleep(10000);
28859 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28860 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28861
28862 rc = controller_reset_failed(h->cfgtable);
28863 if (rc)
28864 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28865 cciss_scsi_setup(h);
28866
28867 /* Turn the interrupts on so we can service requests */
28868 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28869 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28870
28871 /* Get the firmware version */
28872 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28873 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28874 kfree(flush_buf);
28875 if (return_code != IO_OK)
28876 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28877 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28878 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28879 free_irq(h->intr[h->intr_mode], h);
28880 }
28881
28882 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28883 index 7fda30e..eb5dfe0 100644
28884 --- a/drivers/block/cciss.h
28885 +++ b/drivers/block/cciss.h
28886 @@ -101,7 +101,7 @@ struct ctlr_info
28887 /* information about each logical volume */
28888 drive_info_struct *drv[CISS_MAX_LUN];
28889
28890 - struct access_method access;
28891 + struct access_method *access;
28892
28893 /* queue and queue Info */
28894 struct list_head reqQ;
28895 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28896 index 9125bbe..eede5c8 100644
28897 --- a/drivers/block/cpqarray.c
28898 +++ b/drivers/block/cpqarray.c
28899 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28900 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28901 goto Enomem4;
28902 }
28903 - hba[i]->access.set_intr_mask(hba[i], 0);
28904 + hba[i]->access->set_intr_mask(hba[i], 0);
28905 if (request_irq(hba[i]->intr, do_ida_intr,
28906 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28907 {
28908 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28909 add_timer(&hba[i]->timer);
28910
28911 /* Enable IRQ now that spinlock and rate limit timer are set up */
28912 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28913 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28914
28915 for(j=0; j<NWD; j++) {
28916 struct gendisk *disk = ida_gendisk[i][j];
28917 @@ -694,7 +694,7 @@ DBGINFO(
28918 for(i=0; i<NR_PRODUCTS; i++) {
28919 if (board_id == products[i].board_id) {
28920 c->product_name = products[i].product_name;
28921 - c->access = *(products[i].access);
28922 + c->access = products[i].access;
28923 break;
28924 }
28925 }
28926 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28927 hba[ctlr]->intr = intr;
28928 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28929 hba[ctlr]->product_name = products[j].product_name;
28930 - hba[ctlr]->access = *(products[j].access);
28931 + hba[ctlr]->access = products[j].access;
28932 hba[ctlr]->ctlr = ctlr;
28933 hba[ctlr]->board_id = board_id;
28934 hba[ctlr]->pci_dev = NULL; /* not PCI */
28935 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28936
28937 while((c = h->reqQ) != NULL) {
28938 /* Can't do anything if we're busy */
28939 - if (h->access.fifo_full(h) == 0)
28940 + if (h->access->fifo_full(h) == 0)
28941 return;
28942
28943 /* Get the first entry from the request Q */
28944 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28945 h->Qdepth--;
28946
28947 /* Tell the controller to do our bidding */
28948 - h->access.submit_command(h, c);
28949 + h->access->submit_command(h, c);
28950
28951 /* Get onto the completion Q */
28952 addQ(&h->cmpQ, c);
28953 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28954 unsigned long flags;
28955 __u32 a,a1;
28956
28957 - istat = h->access.intr_pending(h);
28958 + istat = h->access->intr_pending(h);
28959 /* Is this interrupt for us? */
28960 if (istat == 0)
28961 return IRQ_NONE;
28962 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28963 */
28964 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28965 if (istat & FIFO_NOT_EMPTY) {
28966 - while((a = h->access.command_completed(h))) {
28967 + while((a = h->access->command_completed(h))) {
28968 a1 = a; a &= ~3;
28969 if ((c = h->cmpQ) == NULL)
28970 {
28971 @@ -1449,11 +1449,11 @@ static int sendcmd(
28972 /*
28973 * Disable interrupt
28974 */
28975 - info_p->access.set_intr_mask(info_p, 0);
28976 + info_p->access->set_intr_mask(info_p, 0);
28977 /* Make sure there is room in the command FIFO */
28978 /* Actually it should be completely empty at this time. */
28979 for (i = 200000; i > 0; i--) {
28980 - temp = info_p->access.fifo_full(info_p);
28981 + temp = info_p->access->fifo_full(info_p);
28982 if (temp != 0) {
28983 break;
28984 }
28985 @@ -1466,7 +1466,7 @@ DBG(
28986 /*
28987 * Send the cmd
28988 */
28989 - info_p->access.submit_command(info_p, c);
28990 + info_p->access->submit_command(info_p, c);
28991 complete = pollcomplete(ctlr);
28992
28993 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28994 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28995 * we check the new geometry. Then turn interrupts back on when
28996 * we're done.
28997 */
28998 - host->access.set_intr_mask(host, 0);
28999 + host->access->set_intr_mask(host, 0);
29000 getgeometry(ctlr);
29001 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29002 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29003
29004 for(i=0; i<NWD; i++) {
29005 struct gendisk *disk = ida_gendisk[ctlr][i];
29006 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29007 /* Wait (up to 2 seconds) for a command to complete */
29008
29009 for (i = 200000; i > 0; i--) {
29010 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
29011 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
29012 if (done == 0) {
29013 udelay(10); /* a short fixed delay */
29014 } else
29015 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29016 index be73e9d..7fbf140 100644
29017 --- a/drivers/block/cpqarray.h
29018 +++ b/drivers/block/cpqarray.h
29019 @@ -99,7 +99,7 @@ struct ctlr_info {
29020 drv_info_t drv[NWD];
29021 struct proc_dir_entry *proc;
29022
29023 - struct access_method access;
29024 + struct access_method *access;
29025
29026 cmdlist_t *reqQ;
29027 cmdlist_t *cmpQ;
29028 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29029 index 8d68056..e67050f 100644
29030 --- a/drivers/block/drbd/drbd_int.h
29031 +++ b/drivers/block/drbd/drbd_int.h
29032 @@ -736,7 +736,7 @@ struct drbd_request;
29033 struct drbd_epoch {
29034 struct list_head list;
29035 unsigned int barrier_nr;
29036 - atomic_t epoch_size; /* increased on every request added. */
29037 + atomic_unchecked_t epoch_size; /* increased on every request added. */
29038 atomic_t active; /* increased on every req. added, and dec on every finished. */
29039 unsigned long flags;
29040 };
29041 @@ -1108,7 +1108,7 @@ struct drbd_conf {
29042 void *int_dig_in;
29043 void *int_dig_vv;
29044 wait_queue_head_t seq_wait;
29045 - atomic_t packet_seq;
29046 + atomic_unchecked_t packet_seq;
29047 unsigned int peer_seq;
29048 spinlock_t peer_seq_lock;
29049 unsigned int minor;
29050 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29051
29052 static inline void drbd_tcp_cork(struct socket *sock)
29053 {
29054 - int __user val = 1;
29055 + int val = 1;
29056 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29057 - (char __user *)&val, sizeof(val));
29058 + (char __force_user *)&val, sizeof(val));
29059 }
29060
29061 static inline void drbd_tcp_uncork(struct socket *sock)
29062 {
29063 - int __user val = 0;
29064 + int val = 0;
29065 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29066 - (char __user *)&val, sizeof(val));
29067 + (char __force_user *)&val, sizeof(val));
29068 }
29069
29070 static inline void drbd_tcp_nodelay(struct socket *sock)
29071 {
29072 - int __user val = 1;
29073 + int val = 1;
29074 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29075 - (char __user *)&val, sizeof(val));
29076 + (char __force_user *)&val, sizeof(val));
29077 }
29078
29079 static inline void drbd_tcp_quickack(struct socket *sock)
29080 {
29081 - int __user val = 2;
29082 + int val = 2;
29083 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29084 - (char __user *)&val, sizeof(val));
29085 + (char __force_user *)&val, sizeof(val));
29086 }
29087
29088 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29089 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29090 index 211fc44..c5116f1 100644
29091 --- a/drivers/block/drbd/drbd_main.c
29092 +++ b/drivers/block/drbd/drbd_main.c
29093 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29094 p.sector = sector;
29095 p.block_id = block_id;
29096 p.blksize = blksize;
29097 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29098 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29099
29100 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29101 return false;
29102 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29103 p.sector = cpu_to_be64(req->sector);
29104 p.block_id = (unsigned long)req;
29105 p.seq_num = cpu_to_be32(req->seq_num =
29106 - atomic_add_return(1, &mdev->packet_seq));
29107 + atomic_add_return_unchecked(1, &mdev->packet_seq));
29108
29109 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29110
29111 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29112 atomic_set(&mdev->unacked_cnt, 0);
29113 atomic_set(&mdev->local_cnt, 0);
29114 atomic_set(&mdev->net_cnt, 0);
29115 - atomic_set(&mdev->packet_seq, 0);
29116 + atomic_set_unchecked(&mdev->packet_seq, 0);
29117 atomic_set(&mdev->pp_in_use, 0);
29118 atomic_set(&mdev->pp_in_use_by_net, 0);
29119 atomic_set(&mdev->rs_sect_in, 0);
29120 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29121 mdev->receiver.t_state);
29122
29123 /* no need to lock it, I'm the only thread alive */
29124 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29125 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29126 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29127 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29128 mdev->al_writ_cnt =
29129 mdev->bm_writ_cnt =
29130 mdev->read_cnt =
29131 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29132 index 946166e..356b39a 100644
29133 --- a/drivers/block/drbd/drbd_nl.c
29134 +++ b/drivers/block/drbd/drbd_nl.c
29135 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29136 module_put(THIS_MODULE);
29137 }
29138
29139 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29140 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29141
29142 static unsigned short *
29143 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29144 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29145 cn_reply->id.idx = CN_IDX_DRBD;
29146 cn_reply->id.val = CN_VAL_DRBD;
29147
29148 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29149 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29150 cn_reply->ack = 0; /* not used here. */
29151 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29152 (int)((char *)tl - (char *)reply->tag_list);
29153 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29154 cn_reply->id.idx = CN_IDX_DRBD;
29155 cn_reply->id.val = CN_VAL_DRBD;
29156
29157 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29158 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29159 cn_reply->ack = 0; /* not used here. */
29160 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29161 (int)((char *)tl - (char *)reply->tag_list);
29162 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29163 cn_reply->id.idx = CN_IDX_DRBD;
29164 cn_reply->id.val = CN_VAL_DRBD;
29165
29166 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29167 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29168 cn_reply->ack = 0; // not used here.
29169 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29170 (int)((char*)tl - (char*)reply->tag_list);
29171 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29172 cn_reply->id.idx = CN_IDX_DRBD;
29173 cn_reply->id.val = CN_VAL_DRBD;
29174
29175 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29176 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29177 cn_reply->ack = 0; /* not used here. */
29178 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29179 (int)((char *)tl - (char *)reply->tag_list);
29180 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29181 index 43beaca..4a5b1dd 100644
29182 --- a/drivers/block/drbd/drbd_receiver.c
29183 +++ b/drivers/block/drbd/drbd_receiver.c
29184 @@ -894,7 +894,7 @@ retry:
29185 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29186 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29187
29188 - atomic_set(&mdev->packet_seq, 0);
29189 + atomic_set_unchecked(&mdev->packet_seq, 0);
29190 mdev->peer_seq = 0;
29191
29192 drbd_thread_start(&mdev->asender);
29193 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29194 do {
29195 next_epoch = NULL;
29196
29197 - epoch_size = atomic_read(&epoch->epoch_size);
29198 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29199
29200 switch (ev & ~EV_CLEANUP) {
29201 case EV_PUT:
29202 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29203 rv = FE_DESTROYED;
29204 } else {
29205 epoch->flags = 0;
29206 - atomic_set(&epoch->epoch_size, 0);
29207 + atomic_set_unchecked(&epoch->epoch_size, 0);
29208 /* atomic_set(&epoch->active, 0); is already zero */
29209 if (rv == FE_STILL_LIVE)
29210 rv = FE_RECYCLED;
29211 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29212 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29213 drbd_flush(mdev);
29214
29215 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29216 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29217 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29218 if (epoch)
29219 break;
29220 }
29221
29222 epoch = mdev->current_epoch;
29223 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29224 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29225
29226 D_ASSERT(atomic_read(&epoch->active) == 0);
29227 D_ASSERT(epoch->flags == 0);
29228 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29229 }
29230
29231 epoch->flags = 0;
29232 - atomic_set(&epoch->epoch_size, 0);
29233 + atomic_set_unchecked(&epoch->epoch_size, 0);
29234 atomic_set(&epoch->active, 0);
29235
29236 spin_lock(&mdev->epoch_lock);
29237 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29238 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29239 list_add(&epoch->list, &mdev->current_epoch->list);
29240 mdev->current_epoch = epoch;
29241 mdev->epochs++;
29242 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29243 spin_unlock(&mdev->peer_seq_lock);
29244
29245 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29246 - atomic_inc(&mdev->current_epoch->epoch_size);
29247 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29248 return drbd_drain_block(mdev, data_size);
29249 }
29250
29251 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29252
29253 spin_lock(&mdev->epoch_lock);
29254 e->epoch = mdev->current_epoch;
29255 - atomic_inc(&e->epoch->epoch_size);
29256 + atomic_inc_unchecked(&e->epoch->epoch_size);
29257 atomic_inc(&e->epoch->active);
29258 spin_unlock(&mdev->epoch_lock);
29259
29260 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29261 D_ASSERT(list_empty(&mdev->done_ee));
29262
29263 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29264 - atomic_set(&mdev->current_epoch->epoch_size, 0);
29265 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29266 D_ASSERT(list_empty(&mdev->current_epoch->list));
29267 }
29268
29269 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29270 index bbca966..65e37dd 100644
29271 --- a/drivers/block/loop.c
29272 +++ b/drivers/block/loop.c
29273 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29274 mm_segment_t old_fs = get_fs();
29275
29276 set_fs(get_ds());
29277 - bw = file->f_op->write(file, buf, len, &pos);
29278 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29279 set_fs(old_fs);
29280 if (likely(bw == len))
29281 return 0;
29282 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29283 index ee94686..3e09ad3 100644
29284 --- a/drivers/char/Kconfig
29285 +++ b/drivers/char/Kconfig
29286 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29287
29288 config DEVKMEM
29289 bool "/dev/kmem virtual device support"
29290 - default y
29291 + default n
29292 + depends on !GRKERNSEC_KMEM
29293 help
29294 Say Y here if you want to support the /dev/kmem device. The
29295 /dev/kmem device is rarely used, but can be used for certain
29296 @@ -581,6 +582,7 @@ config DEVPORT
29297 bool
29298 depends on !M68K
29299 depends on ISA || PCI
29300 + depends on !GRKERNSEC_KMEM
29301 default y
29302
29303 source "drivers/s390/char/Kconfig"
29304 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29305 index 2e04433..22afc64 100644
29306 --- a/drivers/char/agp/frontend.c
29307 +++ b/drivers/char/agp/frontend.c
29308 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29309 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29310 return -EFAULT;
29311
29312 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29313 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29314 return -EFAULT;
29315
29316 client = agp_find_client_by_pid(reserve.pid);
29317 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29318 index 21cb980..f15107c 100644
29319 --- a/drivers/char/genrtc.c
29320 +++ b/drivers/char/genrtc.c
29321 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29322 switch (cmd) {
29323
29324 case RTC_PLL_GET:
29325 + memset(&pll, 0, sizeof(pll));
29326 if (get_rtc_pll(&pll))
29327 return -EINVAL;
29328 else
29329 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29330 index dfd7876..c0b0885 100644
29331 --- a/drivers/char/hpet.c
29332 +++ b/drivers/char/hpet.c
29333 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29334 }
29335
29336 static int
29337 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29338 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29339 struct hpet_info *info)
29340 {
29341 struct hpet_timer __iomem *timer;
29342 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29343 index 2c29942..604c5ba 100644
29344 --- a/drivers/char/ipmi/ipmi_msghandler.c
29345 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29346 @@ -420,7 +420,7 @@ struct ipmi_smi {
29347 struct proc_dir_entry *proc_dir;
29348 char proc_dir_name[10];
29349
29350 - atomic_t stats[IPMI_NUM_STATS];
29351 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29352
29353 /*
29354 * run_to_completion duplicate of smb_info, smi_info
29355 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29356
29357
29358 #define ipmi_inc_stat(intf, stat) \
29359 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29360 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29361 #define ipmi_get_stat(intf, stat) \
29362 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29363 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29364
29365 static int is_lan_addr(struct ipmi_addr *addr)
29366 {
29367 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29368 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29369 init_waitqueue_head(&intf->waitq);
29370 for (i = 0; i < IPMI_NUM_STATS; i++)
29371 - atomic_set(&intf->stats[i], 0);
29372 + atomic_set_unchecked(&intf->stats[i], 0);
29373
29374 intf->proc_dir = NULL;
29375
29376 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29377 index 1e638ff..a869ef5 100644
29378 --- a/drivers/char/ipmi/ipmi_si_intf.c
29379 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29380 @@ -275,7 +275,7 @@ struct smi_info {
29381 unsigned char slave_addr;
29382
29383 /* Counters and things for the proc filesystem. */
29384 - atomic_t stats[SI_NUM_STATS];
29385 + atomic_unchecked_t stats[SI_NUM_STATS];
29386
29387 struct task_struct *thread;
29388
29389 @@ -284,9 +284,9 @@ struct smi_info {
29390 };
29391
29392 #define smi_inc_stat(smi, stat) \
29393 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29394 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29395 #define smi_get_stat(smi, stat) \
29396 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29397 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29398
29399 #define SI_MAX_PARMS 4
29400
29401 @@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29402 atomic_set(&new_smi->req_events, 0);
29403 new_smi->run_to_completion = 0;
29404 for (i = 0; i < SI_NUM_STATS; i++)
29405 - atomic_set(&new_smi->stats[i], 0);
29406 + atomic_set_unchecked(&new_smi->stats[i], 0);
29407
29408 new_smi->interrupt_disabled = 1;
29409 atomic_set(&new_smi->stop_operation, 0);
29410 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29411 index 47ff7e4..0c7d340 100644
29412 --- a/drivers/char/mbcs.c
29413 +++ b/drivers/char/mbcs.c
29414 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29415 return 0;
29416 }
29417
29418 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29419 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29420 {
29421 .part_num = MBCS_PART_NUM,
29422 .mfg_num = MBCS_MFG_NUM,
29423 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29424 index d6e9d08..4493e89 100644
29425 --- a/drivers/char/mem.c
29426 +++ b/drivers/char/mem.c
29427 @@ -18,6 +18,7 @@
29428 #include <linux/raw.h>
29429 #include <linux/tty.h>
29430 #include <linux/capability.h>
29431 +#include <linux/security.h>
29432 #include <linux/ptrace.h>
29433 #include <linux/device.h>
29434 #include <linux/highmem.h>
29435 @@ -35,6 +36,10 @@
29436 # include <linux/efi.h>
29437 #endif
29438
29439 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29440 +extern const struct file_operations grsec_fops;
29441 +#endif
29442 +
29443 static inline unsigned long size_inside_page(unsigned long start,
29444 unsigned long size)
29445 {
29446 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29447
29448 while (cursor < to) {
29449 if (!devmem_is_allowed(pfn)) {
29450 +#ifdef CONFIG_GRKERNSEC_KMEM
29451 + gr_handle_mem_readwrite(from, to);
29452 +#else
29453 printk(KERN_INFO
29454 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29455 current->comm, from, to);
29456 +#endif
29457 return 0;
29458 }
29459 cursor += PAGE_SIZE;
29460 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29461 }
29462 return 1;
29463 }
29464 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29465 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29466 +{
29467 + return 0;
29468 +}
29469 #else
29470 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29471 {
29472 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29473
29474 while (count > 0) {
29475 unsigned long remaining;
29476 + char *temp;
29477
29478 sz = size_inside_page(p, count);
29479
29480 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29481 if (!ptr)
29482 return -EFAULT;
29483
29484 - remaining = copy_to_user(buf, ptr, sz);
29485 +#ifdef CONFIG_PAX_USERCOPY
29486 + temp = kmalloc(sz, GFP_KERNEL);
29487 + if (!temp) {
29488 + unxlate_dev_mem_ptr(p, ptr);
29489 + return -ENOMEM;
29490 + }
29491 + memcpy(temp, ptr, sz);
29492 +#else
29493 + temp = ptr;
29494 +#endif
29495 +
29496 + remaining = copy_to_user(buf, temp, sz);
29497 +
29498 +#ifdef CONFIG_PAX_USERCOPY
29499 + kfree(temp);
29500 +#endif
29501 +
29502 unxlate_dev_mem_ptr(p, ptr);
29503 if (remaining)
29504 return -EFAULT;
29505 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29506 size_t count, loff_t *ppos)
29507 {
29508 unsigned long p = *ppos;
29509 - ssize_t low_count, read, sz;
29510 + ssize_t low_count, read, sz, err = 0;
29511 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29512 - int err = 0;
29513
29514 read = 0;
29515 if (p < (unsigned long) high_memory) {
29516 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29517 }
29518 #endif
29519 while (low_count > 0) {
29520 + char *temp;
29521 +
29522 sz = size_inside_page(p, low_count);
29523
29524 /*
29525 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29526 */
29527 kbuf = xlate_dev_kmem_ptr((char *)p);
29528
29529 - if (copy_to_user(buf, kbuf, sz))
29530 +#ifdef CONFIG_PAX_USERCOPY
29531 + temp = kmalloc(sz, GFP_KERNEL);
29532 + if (!temp)
29533 + return -ENOMEM;
29534 + memcpy(temp, kbuf, sz);
29535 +#else
29536 + temp = kbuf;
29537 +#endif
29538 +
29539 + err = copy_to_user(buf, temp, sz);
29540 +
29541 +#ifdef CONFIG_PAX_USERCOPY
29542 + kfree(temp);
29543 +#endif
29544 +
29545 + if (err)
29546 return -EFAULT;
29547 buf += sz;
29548 p += sz;
29549 @@ -867,6 +914,9 @@ static const struct memdev {
29550 #ifdef CONFIG_CRASH_DUMP
29551 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29552 #endif
29553 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29554 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29555 +#endif
29556 };
29557
29558 static int memory_open(struct inode *inode, struct file *filp)
29559 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29560 index 9df78e2..01ba9ae 100644
29561 --- a/drivers/char/nvram.c
29562 +++ b/drivers/char/nvram.c
29563 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29564
29565 spin_unlock_irq(&rtc_lock);
29566
29567 - if (copy_to_user(buf, contents, tmp - contents))
29568 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29569 return -EFAULT;
29570
29571 *ppos = i;
29572 diff --git a/drivers/char/random.c b/drivers/char/random.c
29573 index 4ec04a7..4a092ed 100644
29574 --- a/drivers/char/random.c
29575 +++ b/drivers/char/random.c
29576 @@ -261,8 +261,13 @@
29577 /*
29578 * Configuration information
29579 */
29580 +#ifdef CONFIG_GRKERNSEC_RANDNET
29581 +#define INPUT_POOL_WORDS 512
29582 +#define OUTPUT_POOL_WORDS 128
29583 +#else
29584 #define INPUT_POOL_WORDS 128
29585 #define OUTPUT_POOL_WORDS 32
29586 +#endif
29587 #define SEC_XFER_SIZE 512
29588 #define EXTRACT_SIZE 10
29589
29590 @@ -300,10 +305,17 @@ static struct poolinfo {
29591 int poolwords;
29592 int tap1, tap2, tap3, tap4, tap5;
29593 } poolinfo_table[] = {
29594 +#ifdef CONFIG_GRKERNSEC_RANDNET
29595 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29596 + { 512, 411, 308, 208, 104, 1 },
29597 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29598 + { 128, 103, 76, 51, 25, 1 },
29599 +#else
29600 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29601 { 128, 103, 76, 51, 25, 1 },
29602 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29603 { 32, 26, 20, 14, 7, 1 },
29604 +#endif
29605 #if 0
29606 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29607 { 2048, 1638, 1231, 819, 411, 1 },
29608 @@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29609
29610 extract_buf(r, tmp);
29611 i = min_t(int, nbytes, EXTRACT_SIZE);
29612 - if (copy_to_user(buf, tmp, i)) {
29613 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29614 ret = -EFAULT;
29615 break;
29616 }
29617 @@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29618 #include <linux/sysctl.h>
29619
29620 static int min_read_thresh = 8, min_write_thresh;
29621 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29622 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29623 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29624 static char sysctl_bootid[16];
29625
29626 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29627 index 45713f0..8286d21 100644
29628 --- a/drivers/char/sonypi.c
29629 +++ b/drivers/char/sonypi.c
29630 @@ -54,6 +54,7 @@
29631
29632 #include <asm/uaccess.h>
29633 #include <asm/io.h>
29634 +#include <asm/local.h>
29635
29636 #include <linux/sonypi.h>
29637
29638 @@ -490,7 +491,7 @@ static struct sonypi_device {
29639 spinlock_t fifo_lock;
29640 wait_queue_head_t fifo_proc_list;
29641 struct fasync_struct *fifo_async;
29642 - int open_count;
29643 + local_t open_count;
29644 int model;
29645 struct input_dev *input_jog_dev;
29646 struct input_dev *input_key_dev;
29647 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29648 static int sonypi_misc_release(struct inode *inode, struct file *file)
29649 {
29650 mutex_lock(&sonypi_device.lock);
29651 - sonypi_device.open_count--;
29652 + local_dec(&sonypi_device.open_count);
29653 mutex_unlock(&sonypi_device.lock);
29654 return 0;
29655 }
29656 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29657 {
29658 mutex_lock(&sonypi_device.lock);
29659 /* Flush input queue on first open */
29660 - if (!sonypi_device.open_count)
29661 + if (!local_read(&sonypi_device.open_count))
29662 kfifo_reset(&sonypi_device.fifo);
29663 - sonypi_device.open_count++;
29664 + local_inc(&sonypi_device.open_count);
29665 mutex_unlock(&sonypi_device.lock);
29666
29667 return 0;
29668 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29669 index ad7c732..5aa8054 100644
29670 --- a/drivers/char/tpm/tpm.c
29671 +++ b/drivers/char/tpm/tpm.c
29672 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29673 chip->vendor.req_complete_val)
29674 goto out_recv;
29675
29676 - if ((status == chip->vendor.req_canceled)) {
29677 + if (status == chip->vendor.req_canceled) {
29678 dev_err(chip->dev, "Operation Canceled\n");
29679 rc = -ECANCELED;
29680 goto out;
29681 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29682 index 0636520..169c1d0 100644
29683 --- a/drivers/char/tpm/tpm_bios.c
29684 +++ b/drivers/char/tpm/tpm_bios.c
29685 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29686 event = addr;
29687
29688 if ((event->event_type == 0 && event->event_size == 0) ||
29689 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29690 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29691 return NULL;
29692
29693 return addr;
29694 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29695 return NULL;
29696
29697 if ((event->event_type == 0 && event->event_size == 0) ||
29698 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29699 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29700 return NULL;
29701
29702 (*pos)++;
29703 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29704 int i;
29705
29706 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29707 - seq_putc(m, data[i]);
29708 + if (!seq_putc(m, data[i]))
29709 + return -EFAULT;
29710
29711 return 0;
29712 }
29713 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29714 log->bios_event_log_end = log->bios_event_log + len;
29715
29716 virt = acpi_os_map_memory(start, len);
29717 + if (!virt) {
29718 + kfree(log->bios_event_log);
29719 + log->bios_event_log = NULL;
29720 + return -EFAULT;
29721 + }
29722
29723 - memcpy(log->bios_event_log, virt, len);
29724 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29725
29726 acpi_os_unmap_memory(virt, len);
29727 return 0;
29728 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29729 index cdf2f54..e55c197 100644
29730 --- a/drivers/char/virtio_console.c
29731 +++ b/drivers/char/virtio_console.c
29732 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29733 if (to_user) {
29734 ssize_t ret;
29735
29736 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29737 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29738 if (ret)
29739 return -EFAULT;
29740 } else {
29741 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29742 if (!port_has_data(port) && !port->host_connected)
29743 return 0;
29744
29745 - return fill_readbuf(port, ubuf, count, true);
29746 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29747 }
29748
29749 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29750 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29751 index 97f5064..202b6e6 100644
29752 --- a/drivers/edac/edac_pci_sysfs.c
29753 +++ b/drivers/edac/edac_pci_sysfs.c
29754 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29755 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29756 static int edac_pci_poll_msec = 1000; /* one second workq period */
29757
29758 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
29759 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29760 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29761 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29762
29763 static struct kobject *edac_pci_top_main_kobj;
29764 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29765 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29766 edac_printk(KERN_CRIT, EDAC_PCI,
29767 "Signaled System Error on %s\n",
29768 pci_name(dev));
29769 - atomic_inc(&pci_nonparity_count);
29770 + atomic_inc_unchecked(&pci_nonparity_count);
29771 }
29772
29773 if (status & (PCI_STATUS_PARITY)) {
29774 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29775 "Master Data Parity Error on %s\n",
29776 pci_name(dev));
29777
29778 - atomic_inc(&pci_parity_count);
29779 + atomic_inc_unchecked(&pci_parity_count);
29780 }
29781
29782 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29783 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29784 "Detected Parity Error on %s\n",
29785 pci_name(dev));
29786
29787 - atomic_inc(&pci_parity_count);
29788 + atomic_inc_unchecked(&pci_parity_count);
29789 }
29790 }
29791
29792 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29793 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29794 "Signaled System Error on %s\n",
29795 pci_name(dev));
29796 - atomic_inc(&pci_nonparity_count);
29797 + atomic_inc_unchecked(&pci_nonparity_count);
29798 }
29799
29800 if (status & (PCI_STATUS_PARITY)) {
29801 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29802 "Master Data Parity Error on "
29803 "%s\n", pci_name(dev));
29804
29805 - atomic_inc(&pci_parity_count);
29806 + atomic_inc_unchecked(&pci_parity_count);
29807 }
29808
29809 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29810 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29811 "Detected Parity Error on %s\n",
29812 pci_name(dev));
29813
29814 - atomic_inc(&pci_parity_count);
29815 + atomic_inc_unchecked(&pci_parity_count);
29816 }
29817 }
29818 }
29819 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29820 if (!check_pci_errors)
29821 return;
29822
29823 - before_count = atomic_read(&pci_parity_count);
29824 + before_count = atomic_read_unchecked(&pci_parity_count);
29825
29826 /* scan all PCI devices looking for a Parity Error on devices and
29827 * bridges.
29828 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29829 /* Only if operator has selected panic on PCI Error */
29830 if (edac_pci_get_panic_on_pe()) {
29831 /* If the count is different 'after' from 'before' */
29832 - if (before_count != atomic_read(&pci_parity_count))
29833 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29834 panic("EDAC: PCI Parity Error");
29835 }
29836 }
29837 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29838 index c6074c5..88a9e2e 100644
29839 --- a/drivers/edac/mce_amd.h
29840 +++ b/drivers/edac/mce_amd.h
29841 @@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
29842 struct amd_decoder_ops {
29843 bool (*dc_mce)(u16, u8);
29844 bool (*ic_mce)(u16, u8);
29845 -};
29846 +} __no_const;
29847
29848 void amd_report_gart_errors(bool);
29849 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29850 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29851 index cc595eb..4ec702a 100644
29852 --- a/drivers/firewire/core-card.c
29853 +++ b/drivers/firewire/core-card.c
29854 @@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
29855
29856 void fw_core_remove_card(struct fw_card *card)
29857 {
29858 - struct fw_card_driver dummy_driver = dummy_driver_template;
29859 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29860
29861 card->driver->update_phy_reg(card, 4,
29862 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29863 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29864 index 2e6b245..c3857d9 100644
29865 --- a/drivers/firewire/core-cdev.c
29866 +++ b/drivers/firewire/core-cdev.c
29867 @@ -1341,8 +1341,7 @@ static int init_iso_resource(struct client *client,
29868 int ret;
29869
29870 if ((request->channels == 0 && request->bandwidth == 0) ||
29871 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29872 - request->bandwidth < 0)
29873 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29874 return -EINVAL;
29875
29876 r = kmalloc(sizeof(*r), GFP_KERNEL);
29877 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29878 index dea2dcc..a4fb978 100644
29879 --- a/drivers/firewire/core-transaction.c
29880 +++ b/drivers/firewire/core-transaction.c
29881 @@ -37,6 +37,7 @@
29882 #include <linux/timer.h>
29883 #include <linux/types.h>
29884 #include <linux/workqueue.h>
29885 +#include <linux/sched.h>
29886
29887 #include <asm/byteorder.h>
29888
29889 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29890 index 9047f55..e47c7ff 100644
29891 --- a/drivers/firewire/core.h
29892 +++ b/drivers/firewire/core.h
29893 @@ -110,6 +110,7 @@ struct fw_card_driver {
29894
29895 int (*stop_iso)(struct fw_iso_context *ctx);
29896 };
29897 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29898
29899 void fw_card_initialize(struct fw_card *card,
29900 const struct fw_card_driver *driver, struct device *device);
29901 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29902 index 153980b..4b4d046 100644
29903 --- a/drivers/firmware/dmi_scan.c
29904 +++ b/drivers/firmware/dmi_scan.c
29905 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29906 }
29907 }
29908 else {
29909 - /*
29910 - * no iounmap() for that ioremap(); it would be a no-op, but
29911 - * it's so early in setup that sucker gets confused into doing
29912 - * what it shouldn't if we actually call it.
29913 - */
29914 p = dmi_ioremap(0xF0000, 0x10000);
29915 if (p == NULL)
29916 goto error;
29917 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29918 if (buf == NULL)
29919 return -1;
29920
29921 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29922 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29923
29924 iounmap(buf);
29925 return 0;
29926 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29927 index 82d5c20..44a7177 100644
29928 --- a/drivers/gpio/gpio-vr41xx.c
29929 +++ b/drivers/gpio/gpio-vr41xx.c
29930 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29931 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29932 maskl, pendl, maskh, pendh);
29933
29934 - atomic_inc(&irq_err_count);
29935 + atomic_inc_unchecked(&irq_err_count);
29936
29937 return -EINVAL;
29938 }
29939 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29940 index 8111889..367b253 100644
29941 --- a/drivers/gpu/drm/drm_crtc_helper.c
29942 +++ b/drivers/gpu/drm/drm_crtc_helper.c
29943 @@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29944 struct drm_crtc *tmp;
29945 int crtc_mask = 1;
29946
29947 - WARN(!crtc, "checking null crtc?\n");
29948 + BUG_ON(!crtc);
29949
29950 dev = crtc->dev;
29951
29952 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29953 index 6116e3b..c29dd16 100644
29954 --- a/drivers/gpu/drm/drm_drv.c
29955 +++ b/drivers/gpu/drm/drm_drv.c
29956 @@ -316,7 +316,7 @@ module_exit(drm_core_exit);
29957 /**
29958 * Copy and IOCTL return string to user space
29959 */
29960 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29961 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29962 {
29963 int len;
29964
29965 @@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
29966 return -ENODEV;
29967
29968 atomic_inc(&dev->ioctl_count);
29969 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29970 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29971 ++file_priv->ioctl_count;
29972
29973 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29974 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29975 index 123de28..43a0897 100644
29976 --- a/drivers/gpu/drm/drm_fops.c
29977 +++ b/drivers/gpu/drm/drm_fops.c
29978 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29979 }
29980
29981 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29982 - atomic_set(&dev->counts[i], 0);
29983 + atomic_set_unchecked(&dev->counts[i], 0);
29984
29985 dev->sigdata.lock = NULL;
29986
29987 @@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
29988
29989 retcode = drm_open_helper(inode, filp, dev);
29990 if (!retcode) {
29991 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29992 - if (!dev->open_count++)
29993 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29994 + if (local_inc_return(&dev->open_count) == 1)
29995 retcode = drm_setup(dev);
29996 }
29997 if (!retcode) {
29998 @@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
29999
30000 mutex_lock(&drm_global_mutex);
30001
30002 - DRM_DEBUG("open_count = %d\n", dev->open_count);
30003 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30004
30005 if (dev->driver->preclose)
30006 dev->driver->preclose(dev, file_priv);
30007 @@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
30008 * Begin inline drm_release
30009 */
30010
30011 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30012 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30013 task_pid_nr(current),
30014 (long)old_encode_dev(file_priv->minor->device),
30015 - dev->open_count);
30016 + local_read(&dev->open_count));
30017
30018 /* Release any auth tokens that might point to this file_priv,
30019 (do that under the drm_global_mutex) */
30020 @@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
30021 * End inline drm_release
30022 */
30023
30024 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30025 - if (!--dev->open_count) {
30026 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30027 + if (local_dec_and_test(&dev->open_count)) {
30028 if (atomic_read(&dev->ioctl_count)) {
30029 DRM_ERROR("Device busy: %d\n",
30030 atomic_read(&dev->ioctl_count));
30031 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30032 index c87dc96..326055d 100644
30033 --- a/drivers/gpu/drm/drm_global.c
30034 +++ b/drivers/gpu/drm/drm_global.c
30035 @@ -36,7 +36,7 @@
30036 struct drm_global_item {
30037 struct mutex mutex;
30038 void *object;
30039 - int refcount;
30040 + atomic_t refcount;
30041 };
30042
30043 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30044 @@ -49,7 +49,7 @@ void drm_global_init(void)
30045 struct drm_global_item *item = &glob[i];
30046 mutex_init(&item->mutex);
30047 item->object = NULL;
30048 - item->refcount = 0;
30049 + atomic_set(&item->refcount, 0);
30050 }
30051 }
30052
30053 @@ -59,7 +59,7 @@ void drm_global_release(void)
30054 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30055 struct drm_global_item *item = &glob[i];
30056 BUG_ON(item->object != NULL);
30057 - BUG_ON(item->refcount != 0);
30058 + BUG_ON(atomic_read(&item->refcount) != 0);
30059 }
30060 }
30061
30062 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30063 void *object;
30064
30065 mutex_lock(&item->mutex);
30066 - if (item->refcount == 0) {
30067 + if (atomic_read(&item->refcount) == 0) {
30068 item->object = kzalloc(ref->size, GFP_KERNEL);
30069 if (unlikely(item->object == NULL)) {
30070 ret = -ENOMEM;
30071 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30072 goto out_err;
30073
30074 }
30075 - ++item->refcount;
30076 + atomic_inc(&item->refcount);
30077 ref->object = item->object;
30078 object = item->object;
30079 mutex_unlock(&item->mutex);
30080 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30081 struct drm_global_item *item = &glob[ref->global_type];
30082
30083 mutex_lock(&item->mutex);
30084 - BUG_ON(item->refcount == 0);
30085 + BUG_ON(atomic_read(&item->refcount) == 0);
30086 BUG_ON(ref->object != item->object);
30087 - if (--item->refcount == 0) {
30088 + if (atomic_dec_and_test(&item->refcount)) {
30089 ref->release(ref);
30090 item->object = NULL;
30091 }
30092 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30093 index ab1162d..42587b2 100644
30094 --- a/drivers/gpu/drm/drm_info.c
30095 +++ b/drivers/gpu/drm/drm_info.c
30096 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30097 struct drm_local_map *map;
30098 struct drm_map_list *r_list;
30099
30100 - /* Hardcoded from _DRM_FRAME_BUFFER,
30101 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30102 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30103 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30104 + static const char * const types[] = {
30105 + [_DRM_FRAME_BUFFER] = "FB",
30106 + [_DRM_REGISTERS] = "REG",
30107 + [_DRM_SHM] = "SHM",
30108 + [_DRM_AGP] = "AGP",
30109 + [_DRM_SCATTER_GATHER] = "SG",
30110 + [_DRM_CONSISTENT] = "PCI",
30111 + [_DRM_GEM] = "GEM" };
30112 const char *type;
30113 int i;
30114
30115 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30116 map = r_list->map;
30117 if (!map)
30118 continue;
30119 - if (map->type < 0 || map->type > 5)
30120 + if (map->type >= ARRAY_SIZE(types))
30121 type = "??";
30122 else
30123 type = types[map->type];
30124 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30125 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30126 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30127 vma->vm_flags & VM_IO ? 'i' : '-',
30128 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30129 + 0);
30130 +#else
30131 vma->vm_pgoff);
30132 +#endif
30133
30134 #if defined(__i386__)
30135 pgprot = pgprot_val(vma->vm_page_prot);
30136 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30137 index 637fcc3..e890b33 100644
30138 --- a/drivers/gpu/drm/drm_ioc32.c
30139 +++ b/drivers/gpu/drm/drm_ioc32.c
30140 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30141 request = compat_alloc_user_space(nbytes);
30142 if (!access_ok(VERIFY_WRITE, request, nbytes))
30143 return -EFAULT;
30144 - list = (struct drm_buf_desc *) (request + 1);
30145 + list = (struct drm_buf_desc __user *) (request + 1);
30146
30147 if (__put_user(count, &request->count)
30148 || __put_user(list, &request->list))
30149 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30150 request = compat_alloc_user_space(nbytes);
30151 if (!access_ok(VERIFY_WRITE, request, nbytes))
30152 return -EFAULT;
30153 - list = (struct drm_buf_pub *) (request + 1);
30154 + list = (struct drm_buf_pub __user *) (request + 1);
30155
30156 if (__put_user(count, &request->count)
30157 || __put_user(list, &request->list))
30158 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30159 index cf85155..f2665cb 100644
30160 --- a/drivers/gpu/drm/drm_ioctl.c
30161 +++ b/drivers/gpu/drm/drm_ioctl.c
30162 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30163 stats->data[i].value =
30164 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30165 else
30166 - stats->data[i].value = atomic_read(&dev->counts[i]);
30167 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30168 stats->data[i].type = dev->types[i];
30169 }
30170
30171 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30172 index c79c713..2048588 100644
30173 --- a/drivers/gpu/drm/drm_lock.c
30174 +++ b/drivers/gpu/drm/drm_lock.c
30175 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30176 if (drm_lock_take(&master->lock, lock->context)) {
30177 master->lock.file_priv = file_priv;
30178 master->lock.lock_time = jiffies;
30179 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30180 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30181 break; /* Got lock */
30182 }
30183
30184 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30185 return -EINVAL;
30186 }
30187
30188 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30189 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30190
30191 if (drm_lock_free(&master->lock, lock->context)) {
30192 /* FIXME: Should really bail out here. */
30193 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30194 index aa454f8..6d38580 100644
30195 --- a/drivers/gpu/drm/drm_stub.c
30196 +++ b/drivers/gpu/drm/drm_stub.c
30197 @@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
30198
30199 drm_device_set_unplugged(dev);
30200
30201 - if (dev->open_count == 0) {
30202 + if (local_read(&dev->open_count) == 0) {
30203 drm_put_dev(dev);
30204 }
30205 mutex_unlock(&drm_global_mutex);
30206 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30207 index f920fb5..001c52d 100644
30208 --- a/drivers/gpu/drm/i810/i810_dma.c
30209 +++ b/drivers/gpu/drm/i810/i810_dma.c
30210 @@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30211 dma->buflist[vertex->idx],
30212 vertex->discard, vertex->used);
30213
30214 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30215 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30216 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30217 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30218 sarea_priv->last_enqueue = dev_priv->counter - 1;
30219 sarea_priv->last_dispatch = (int)hw_status[5];
30220
30221 @@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30222 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30223 mc->last_render);
30224
30225 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30226 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30227 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30228 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30229 sarea_priv->last_enqueue = dev_priv->counter - 1;
30230 sarea_priv->last_dispatch = (int)hw_status[5];
30231
30232 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30233 index c9339f4..f5e1b9d 100644
30234 --- a/drivers/gpu/drm/i810/i810_drv.h
30235 +++ b/drivers/gpu/drm/i810/i810_drv.h
30236 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30237 int page_flipping;
30238
30239 wait_queue_head_t irq_queue;
30240 - atomic_t irq_received;
30241 - atomic_t irq_emitted;
30242 + atomic_unchecked_t irq_received;
30243 + atomic_unchecked_t irq_emitted;
30244
30245 int front_offset;
30246 } drm_i810_private_t;
30247 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30248 index e6162a1..b2ff486 100644
30249 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30250 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30251 @@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30252 I915_READ(GTIMR));
30253 }
30254 seq_printf(m, "Interrupts received: %d\n",
30255 - atomic_read(&dev_priv->irq_received));
30256 + atomic_read_unchecked(&dev_priv->irq_received));
30257 for (i = 0; i < I915_NUM_RINGS; i++) {
30258 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30259 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30260 @@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30261 return ret;
30262
30263 if (opregion->header)
30264 - seq_write(m, opregion->header, OPREGION_SIZE);
30265 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30266
30267 mutex_unlock(&dev->struct_mutex);
30268
30269 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30270 index ba60f3c..e2dff7f 100644
30271 --- a/drivers/gpu/drm/i915/i915_dma.c
30272 +++ b/drivers/gpu/drm/i915/i915_dma.c
30273 @@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30274 bool can_switch;
30275
30276 spin_lock(&dev->count_lock);
30277 - can_switch = (dev->open_count == 0);
30278 + can_switch = (local_read(&dev->open_count) == 0);
30279 spin_unlock(&dev->count_lock);
30280 return can_switch;
30281 }
30282 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30283 index 5fabc6c..0b08aa1 100644
30284 --- a/drivers/gpu/drm/i915/i915_drv.h
30285 +++ b/drivers/gpu/drm/i915/i915_drv.h
30286 @@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
30287 /* render clock increase/decrease */
30288 /* display clock increase/decrease */
30289 /* pll clock increase/decrease */
30290 -};
30291 +} __no_const;
30292
30293 struct intel_device_info {
30294 u8 gen;
30295 @@ -350,7 +350,7 @@ typedef struct drm_i915_private {
30296 int current_page;
30297 int page_flipping;
30298
30299 - atomic_t irq_received;
30300 + atomic_unchecked_t irq_received;
30301
30302 /* protects the irq masks */
30303 spinlock_t irq_lock;
30304 @@ -937,7 +937,7 @@ struct drm_i915_gem_object {
30305 * will be page flipped away on the next vblank. When it
30306 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30307 */
30308 - atomic_t pending_flip;
30309 + atomic_unchecked_t pending_flip;
30310 };
30311
30312 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30313 @@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30314 extern void intel_teardown_gmbus(struct drm_device *dev);
30315 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30316 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30317 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30318 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30319 {
30320 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30321 }
30322 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30323 index de43194..a14c4cc 100644
30324 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30325 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30326 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30327 i915_gem_clflush_object(obj);
30328
30329 if (obj->base.pending_write_domain)
30330 - cd->flips |= atomic_read(&obj->pending_flip);
30331 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30332
30333 /* The actual obj->write_domain will be updated with
30334 * pending_write_domain after we emit the accumulated flush for all
30335 @@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30336
30337 static int
30338 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30339 - int count)
30340 + unsigned int count)
30341 {
30342 - int i;
30343 + unsigned int i;
30344
30345 for (i = 0; i < count; i++) {
30346 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30347 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30348 index f57e5cf..c82f79d 100644
30349 --- a/drivers/gpu/drm/i915/i915_irq.c
30350 +++ b/drivers/gpu/drm/i915/i915_irq.c
30351 @@ -472,7 +472,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30352 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30353 struct drm_i915_master_private *master_priv;
30354
30355 - atomic_inc(&dev_priv->irq_received);
30356 + atomic_inc_unchecked(&dev_priv->irq_received);
30357
30358 /* disable master interrupt before clearing iir */
30359 de_ier = I915_READ(DEIER);
30360 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30361 struct drm_i915_master_private *master_priv;
30362 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30363
30364 - atomic_inc(&dev_priv->irq_received);
30365 + atomic_inc_unchecked(&dev_priv->irq_received);
30366
30367 if (IS_GEN6(dev))
30368 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30369 @@ -1292,7 +1292,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30370 int ret = IRQ_NONE, pipe;
30371 bool blc_event = false;
30372
30373 - atomic_inc(&dev_priv->irq_received);
30374 + atomic_inc_unchecked(&dev_priv->irq_received);
30375
30376 iir = I915_READ(IIR);
30377
30378 @@ -1803,7 +1803,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30379 {
30380 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30381
30382 - atomic_set(&dev_priv->irq_received, 0);
30383 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30384
30385 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30386 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30387 @@ -1980,7 +1980,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30388 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30389 int pipe;
30390
30391 - atomic_set(&dev_priv->irq_received, 0);
30392 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30393
30394 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30395 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30396 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30397 index d4d162f..e80037c 100644
30398 --- a/drivers/gpu/drm/i915/intel_display.c
30399 +++ b/drivers/gpu/drm/i915/intel_display.c
30400 @@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30401
30402 wait_event(dev_priv->pending_flip_queue,
30403 atomic_read(&dev_priv->mm.wedged) ||
30404 - atomic_read(&obj->pending_flip) == 0);
30405 + atomic_read_unchecked(&obj->pending_flip) == 0);
30406
30407 /* Big Hammer, we also need to ensure that any pending
30408 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30409 @@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30410 obj = to_intel_framebuffer(crtc->fb)->obj;
30411 dev_priv = crtc->dev->dev_private;
30412 wait_event(dev_priv->pending_flip_queue,
30413 - atomic_read(&obj->pending_flip) == 0);
30414 + atomic_read_unchecked(&obj->pending_flip) == 0);
30415 }
30416
30417 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30418 @@ -7286,7 +7286,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30419
30420 atomic_clear_mask(1 << intel_crtc->plane,
30421 &obj->pending_flip.counter);
30422 - if (atomic_read(&obj->pending_flip) == 0)
30423 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30424 wake_up(&dev_priv->pending_flip_queue);
30425
30426 schedule_work(&work->work);
30427 @@ -7582,7 +7582,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30428 /* Block clients from rendering to the new back buffer until
30429 * the flip occurs and the object is no longer visible.
30430 */
30431 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30432 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30433
30434 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30435 if (ret)
30436 @@ -7596,7 +7596,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30437 return 0;
30438
30439 cleanup_pending:
30440 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30441 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30442 drm_gem_object_unreference(&work->old_fb_obj->base);
30443 drm_gem_object_unreference(&obj->base);
30444 mutex_unlock(&dev->struct_mutex);
30445 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30446 index 54558a0..2d97005 100644
30447 --- a/drivers/gpu/drm/mga/mga_drv.h
30448 +++ b/drivers/gpu/drm/mga/mga_drv.h
30449 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30450 u32 clear_cmd;
30451 u32 maccess;
30452
30453 - atomic_t vbl_received; /**< Number of vblanks received. */
30454 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30455 wait_queue_head_t fence_queue;
30456 - atomic_t last_fence_retired;
30457 + atomic_unchecked_t last_fence_retired;
30458 u32 next_fence_to_post;
30459
30460 unsigned int fb_cpp;
30461 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30462 index 2581202..f230a8d9 100644
30463 --- a/drivers/gpu/drm/mga/mga_irq.c
30464 +++ b/drivers/gpu/drm/mga/mga_irq.c
30465 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30466 if (crtc != 0)
30467 return 0;
30468
30469 - return atomic_read(&dev_priv->vbl_received);
30470 + return atomic_read_unchecked(&dev_priv->vbl_received);
30471 }
30472
30473
30474 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30475 /* VBLANK interrupt */
30476 if (status & MGA_VLINEPEN) {
30477 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30478 - atomic_inc(&dev_priv->vbl_received);
30479 + atomic_inc_unchecked(&dev_priv->vbl_received);
30480 drm_handle_vblank(dev, 0);
30481 handled = 1;
30482 }
30483 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30484 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30485 MGA_WRITE(MGA_PRIMEND, prim_end);
30486
30487 - atomic_inc(&dev_priv->last_fence_retired);
30488 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30489 DRM_WAKEUP(&dev_priv->fence_queue);
30490 handled = 1;
30491 }
30492 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30493 * using fences.
30494 */
30495 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30496 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30497 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30498 - *sequence) <= (1 << 23)));
30499
30500 *sequence = cur_fence;
30501 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30502 index 0be4a81..7464804 100644
30503 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30504 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30505 @@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30506 struct bit_table {
30507 const char id;
30508 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30509 -};
30510 +} __no_const;
30511
30512 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30513
30514 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30515 index 3aef353..0ad1322 100644
30516 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30517 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30518 @@ -240,7 +240,7 @@ struct nouveau_channel {
30519 struct list_head pending;
30520 uint32_t sequence;
30521 uint32_t sequence_ack;
30522 - atomic_t last_sequence_irq;
30523 + atomic_unchecked_t last_sequence_irq;
30524 struct nouveau_vma vma;
30525 } fence;
30526
30527 @@ -321,7 +321,7 @@ struct nouveau_exec_engine {
30528 u32 handle, u16 class);
30529 void (*set_tile_region)(struct drm_device *dev, int i);
30530 void (*tlb_flush)(struct drm_device *, int engine);
30531 -};
30532 +} __no_const;
30533
30534 struct nouveau_instmem_engine {
30535 void *priv;
30536 @@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
30537 struct nouveau_mc_engine {
30538 int (*init)(struct drm_device *dev);
30539 void (*takedown)(struct drm_device *dev);
30540 -};
30541 +} __no_const;
30542
30543 struct nouveau_timer_engine {
30544 int (*init)(struct drm_device *dev);
30545 void (*takedown)(struct drm_device *dev);
30546 uint64_t (*read)(struct drm_device *dev);
30547 -};
30548 +} __no_const;
30549
30550 struct nouveau_fb_engine {
30551 int num_tiles;
30552 @@ -590,7 +590,7 @@ struct nouveau_vram_engine {
30553 void (*put)(struct drm_device *, struct nouveau_mem **);
30554
30555 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30556 -};
30557 +} __no_const;
30558
30559 struct nouveau_engine {
30560 struct nouveau_instmem_engine instmem;
30561 @@ -739,7 +739,7 @@ struct drm_nouveau_private {
30562 struct drm_global_reference mem_global_ref;
30563 struct ttm_bo_global_ref bo_global_ref;
30564 struct ttm_bo_device bdev;
30565 - atomic_t validate_sequence;
30566 + atomic_unchecked_t validate_sequence;
30567 } ttm;
30568
30569 struct {
30570 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30571 index c1dc20f..4df673c 100644
30572 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30573 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30574 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30575 if (USE_REFCNT(dev))
30576 sequence = nvchan_rd32(chan, 0x48);
30577 else
30578 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30579 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30580
30581 if (chan->fence.sequence_ack == sequence)
30582 goto out;
30583 @@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30584 return ret;
30585 }
30586
30587 - atomic_set(&chan->fence.last_sequence_irq, 0);
30588 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30589 return 0;
30590 }
30591
30592 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30593 index ed52a6f..484acdc 100644
30594 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30595 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30596 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30597 int trycnt = 0;
30598 int ret, i;
30599
30600 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30601 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30602 retry:
30603 if (++trycnt > 100000) {
30604 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30605 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30606 index c2a8511..4b996f9 100644
30607 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30608 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30609 @@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30610 bool can_switch;
30611
30612 spin_lock(&dev->count_lock);
30613 - can_switch = (dev->open_count == 0);
30614 + can_switch = (local_read(&dev->open_count) == 0);
30615 spin_unlock(&dev->count_lock);
30616 return can_switch;
30617 }
30618 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30619 index dbdea8e..cd6eeeb 100644
30620 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30621 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30622 @@ -554,7 +554,7 @@ static int
30623 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30624 u32 class, u32 mthd, u32 data)
30625 {
30626 - atomic_set(&chan->fence.last_sequence_irq, data);
30627 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30628 return 0;
30629 }
30630
30631 diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30632 index 2746402..c8dc4a4 100644
30633 --- a/drivers/gpu/drm/nouveau/nv50_sor.c
30634 +++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30635 @@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30636 }
30637
30638 if (nv_encoder->dcb->type == OUTPUT_DP) {
30639 - struct dp_train_func func = {
30640 + static struct dp_train_func func = {
30641 .link_set = nv50_sor_dp_link_set,
30642 .train_set = nv50_sor_dp_train_set,
30643 .train_adj = nv50_sor_dp_train_adj
30644 diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30645 index 0247250..d2f6aaf 100644
30646 --- a/drivers/gpu/drm/nouveau/nvd0_display.c
30647 +++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30648 @@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30649 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30650
30651 if (nv_encoder->dcb->type == OUTPUT_DP) {
30652 - struct dp_train_func func = {
30653 + static struct dp_train_func func = {
30654 .link_set = nvd0_sor_dp_link_set,
30655 .train_set = nvd0_sor_dp_train_set,
30656 .train_adj = nvd0_sor_dp_train_adj
30657 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30658 index bcac90b..53bfc76 100644
30659 --- a/drivers/gpu/drm/r128/r128_cce.c
30660 +++ b/drivers/gpu/drm/r128/r128_cce.c
30661 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30662
30663 /* GH: Simple idle check.
30664 */
30665 - atomic_set(&dev_priv->idle_count, 0);
30666 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30667
30668 /* We don't support anything other than bus-mastering ring mode,
30669 * but the ring can be in either AGP or PCI space for the ring
30670 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30671 index 930c71b..499aded 100644
30672 --- a/drivers/gpu/drm/r128/r128_drv.h
30673 +++ b/drivers/gpu/drm/r128/r128_drv.h
30674 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30675 int is_pci;
30676 unsigned long cce_buffers_offset;
30677
30678 - atomic_t idle_count;
30679 + atomic_unchecked_t idle_count;
30680
30681 int page_flipping;
30682 int current_page;
30683 u32 crtc_offset;
30684 u32 crtc_offset_cntl;
30685
30686 - atomic_t vbl_received;
30687 + atomic_unchecked_t vbl_received;
30688
30689 u32 color_fmt;
30690 unsigned int front_offset;
30691 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30692 index 429d5a0..7e899ed 100644
30693 --- a/drivers/gpu/drm/r128/r128_irq.c
30694 +++ b/drivers/gpu/drm/r128/r128_irq.c
30695 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30696 if (crtc != 0)
30697 return 0;
30698
30699 - return atomic_read(&dev_priv->vbl_received);
30700 + return atomic_read_unchecked(&dev_priv->vbl_received);
30701 }
30702
30703 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30704 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30705 /* VBLANK interrupt */
30706 if (status & R128_CRTC_VBLANK_INT) {
30707 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30708 - atomic_inc(&dev_priv->vbl_received);
30709 + atomic_inc_unchecked(&dev_priv->vbl_received);
30710 drm_handle_vblank(dev, 0);
30711 return IRQ_HANDLED;
30712 }
30713 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30714 index a9e33ce..09edd4b 100644
30715 --- a/drivers/gpu/drm/r128/r128_state.c
30716 +++ b/drivers/gpu/drm/r128/r128_state.c
30717 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30718
30719 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30720 {
30721 - if (atomic_read(&dev_priv->idle_count) == 0)
30722 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30723 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30724 else
30725 - atomic_set(&dev_priv->idle_count, 0);
30726 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30727 }
30728
30729 #endif
30730 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30731 index 5a82b6b..9e69c73 100644
30732 --- a/drivers/gpu/drm/radeon/mkregtable.c
30733 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30734 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30735 regex_t mask_rex;
30736 regmatch_t match[4];
30737 char buf[1024];
30738 - size_t end;
30739 + long end;
30740 int len;
30741 int done = 0;
30742 int r;
30743 unsigned o;
30744 struct offset *offset;
30745 char last_reg_s[10];
30746 - int last_reg;
30747 + unsigned long last_reg;
30748
30749 if (regcomp
30750 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30751 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30752 index 138b952..d74f9cb 100644
30753 --- a/drivers/gpu/drm/radeon/radeon.h
30754 +++ b/drivers/gpu/drm/radeon/radeon.h
30755 @@ -253,7 +253,7 @@ struct radeon_fence_driver {
30756 uint32_t scratch_reg;
30757 uint64_t gpu_addr;
30758 volatile uint32_t *cpu_addr;
30759 - atomic_t seq;
30760 + atomic_unchecked_t seq;
30761 uint32_t last_seq;
30762 unsigned long last_jiffies;
30763 unsigned long last_timeout;
30764 @@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
30765 int x2, int y2);
30766 void (*draw_auto)(struct radeon_device *rdev);
30767 void (*set_default_state)(struct radeon_device *rdev);
30768 -};
30769 +} __no_const;
30770
30771 struct r600_blit {
30772 struct mutex mutex;
30773 @@ -1246,7 +1246,7 @@ struct radeon_asic {
30774 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30775 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30776 } pflip;
30777 -};
30778 +} __no_const;
30779
30780 /*
30781 * Asic structures
30782 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30783 index 5992502..c19c633 100644
30784 --- a/drivers/gpu/drm/radeon/radeon_device.c
30785 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30786 @@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30787 bool can_switch;
30788
30789 spin_lock(&dev->count_lock);
30790 - can_switch = (dev->open_count == 0);
30791 + can_switch = (local_read(&dev->open_count) == 0);
30792 spin_unlock(&dev->count_lock);
30793 return can_switch;
30794 }
30795 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30796 index a1b59ca..86f2d44 100644
30797 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30798 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30799 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30800
30801 /* SW interrupt */
30802 wait_queue_head_t swi_queue;
30803 - atomic_t swi_emitted;
30804 + atomic_unchecked_t swi_emitted;
30805 int vblank_crtc;
30806 uint32_t irq_enable_reg;
30807 uint32_t r500_disp_irq_reg;
30808 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30809 index 4bd36a3..e66fe9c 100644
30810 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30811 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30812 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30813 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30814 return 0;
30815 }
30816 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30817 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30818 if (!rdev->ring[fence->ring].ready)
30819 /* FIXME: cp is not running assume everythings is done right
30820 * away
30821 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30822 }
30823 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30824 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30825 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30826 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30827 rdev->fence_drv[ring].initialized = true;
30828 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30829 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30830 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30831 rdev->fence_drv[ring].scratch_reg = -1;
30832 rdev->fence_drv[ring].cpu_addr = NULL;
30833 rdev->fence_drv[ring].gpu_addr = 0;
30834 - atomic_set(&rdev->fence_drv[ring].seq, 0);
30835 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30836 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30837 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30838 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30839 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30840 index 48b7cea..342236f 100644
30841 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30842 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30843 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30844 request = compat_alloc_user_space(sizeof(*request));
30845 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30846 || __put_user(req32.param, &request->param)
30847 - || __put_user((void __user *)(unsigned long)req32.value,
30848 + || __put_user((unsigned long)req32.value,
30849 &request->value))
30850 return -EFAULT;
30851
30852 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30853 index 00da384..32f972d 100644
30854 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30855 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30856 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30857 unsigned int ret;
30858 RING_LOCALS;
30859
30860 - atomic_inc(&dev_priv->swi_emitted);
30861 - ret = atomic_read(&dev_priv->swi_emitted);
30862 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30863 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30864
30865 BEGIN_RING(4);
30866 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30867 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30868 drm_radeon_private_t *dev_priv =
30869 (drm_radeon_private_t *) dev->dev_private;
30870
30871 - atomic_set(&dev_priv->swi_emitted, 0);
30872 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30873 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30874
30875 dev->max_vblank_count = 0x001fffff;
30876 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30877 index e8422ae..d22d4a8 100644
30878 --- a/drivers/gpu/drm/radeon/radeon_state.c
30879 +++ b/drivers/gpu/drm/radeon/radeon_state.c
30880 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30881 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30882 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30883
30884 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30885 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30886 sarea_priv->nbox * sizeof(depth_boxes[0])))
30887 return -EFAULT;
30888
30889 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30890 {
30891 drm_radeon_private_t *dev_priv = dev->dev_private;
30892 drm_radeon_getparam_t *param = data;
30893 - int value;
30894 + int value = 0;
30895
30896 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30897
30898 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30899 index f493c64..524ab6b 100644
30900 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
30901 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30902 @@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30903 }
30904 if (unlikely(ttm_vm_ops == NULL)) {
30905 ttm_vm_ops = vma->vm_ops;
30906 - radeon_ttm_vm_ops = *ttm_vm_ops;
30907 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30908 + pax_open_kernel();
30909 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30910 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30911 + pax_close_kernel();
30912 }
30913 vma->vm_ops = &radeon_ttm_vm_ops;
30914 return 0;
30915 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30916 index f2c3b9d..d5a376b 100644
30917 --- a/drivers/gpu/drm/radeon/rs690.c
30918 +++ b/drivers/gpu/drm/radeon/rs690.c
30919 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30920 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30921 rdev->pm.sideport_bandwidth.full)
30922 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30923 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30924 + read_delay_latency.full = dfixed_const(800 * 1000);
30925 read_delay_latency.full = dfixed_div(read_delay_latency,
30926 rdev->pm.igp_sideport_mclk);
30927 + a.full = dfixed_const(370);
30928 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30929 } else {
30930 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30931 rdev->pm.k8_bandwidth.full)
30932 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30933 index ebc6fac..a8313ed 100644
30934 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30935 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30936 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
30937 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30938 struct shrink_control *sc)
30939 {
30940 - static atomic_t start_pool = ATOMIC_INIT(0);
30941 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30942 unsigned i;
30943 - unsigned pool_offset = atomic_add_return(1, &start_pool);
30944 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30945 struct ttm_page_pool *pool;
30946 int shrink_pages = sc->nr_to_scan;
30947
30948 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30949 index 88edacc..1e5412b 100644
30950 --- a/drivers/gpu/drm/via/via_drv.h
30951 +++ b/drivers/gpu/drm/via/via_drv.h
30952 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30953 typedef uint32_t maskarray_t[5];
30954
30955 typedef struct drm_via_irq {
30956 - atomic_t irq_received;
30957 + atomic_unchecked_t irq_received;
30958 uint32_t pending_mask;
30959 uint32_t enable_mask;
30960 wait_queue_head_t irq_queue;
30961 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30962 struct timeval last_vblank;
30963 int last_vblank_valid;
30964 unsigned usec_per_vblank;
30965 - atomic_t vbl_received;
30966 + atomic_unchecked_t vbl_received;
30967 drm_via_state_t hc_state;
30968 char pci_buf[VIA_PCI_BUF_SIZE];
30969 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30970 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30971 index d391f48..10c8ca3 100644
30972 --- a/drivers/gpu/drm/via/via_irq.c
30973 +++ b/drivers/gpu/drm/via/via_irq.c
30974 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30975 if (crtc != 0)
30976 return 0;
30977
30978 - return atomic_read(&dev_priv->vbl_received);
30979 + return atomic_read_unchecked(&dev_priv->vbl_received);
30980 }
30981
30982 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30983 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30984
30985 status = VIA_READ(VIA_REG_INTERRUPT);
30986 if (status & VIA_IRQ_VBLANK_PENDING) {
30987 - atomic_inc(&dev_priv->vbl_received);
30988 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30989 + atomic_inc_unchecked(&dev_priv->vbl_received);
30990 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30991 do_gettimeofday(&cur_vblank);
30992 if (dev_priv->last_vblank_valid) {
30993 dev_priv->usec_per_vblank =
30994 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30995 dev_priv->last_vblank = cur_vblank;
30996 dev_priv->last_vblank_valid = 1;
30997 }
30998 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30999 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31000 DRM_DEBUG("US per vblank is: %u\n",
31001 dev_priv->usec_per_vblank);
31002 }
31003 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31004
31005 for (i = 0; i < dev_priv->num_irqs; ++i) {
31006 if (status & cur_irq->pending_mask) {
31007 - atomic_inc(&cur_irq->irq_received);
31008 + atomic_inc_unchecked(&cur_irq->irq_received);
31009 DRM_WAKEUP(&cur_irq->irq_queue);
31010 handled = 1;
31011 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31012 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31013 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31014 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31015 masks[irq][4]));
31016 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31017 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31018 } else {
31019 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31020 (((cur_irq_sequence =
31021 - atomic_read(&cur_irq->irq_received)) -
31022 + atomic_read_unchecked(&cur_irq->irq_received)) -
31023 *sequence) <= (1 << 23)));
31024 }
31025 *sequence = cur_irq_sequence;
31026 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31027 }
31028
31029 for (i = 0; i < dev_priv->num_irqs; ++i) {
31030 - atomic_set(&cur_irq->irq_received, 0);
31031 + atomic_set_unchecked(&cur_irq->irq_received, 0);
31032 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31033 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31034 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31035 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31036 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31037 case VIA_IRQ_RELATIVE:
31038 irqwait->request.sequence +=
31039 - atomic_read(&cur_irq->irq_received);
31040 + atomic_read_unchecked(&cur_irq->irq_received);
31041 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31042 case VIA_IRQ_ABSOLUTE:
31043 break;
31044 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31045 index d0f2c07..9ebd9c3 100644
31046 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31047 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31048 @@ -263,7 +263,7 @@ struct vmw_private {
31049 * Fencing and IRQs.
31050 */
31051
31052 - atomic_t marker_seq;
31053 + atomic_unchecked_t marker_seq;
31054 wait_queue_head_t fence_queue;
31055 wait_queue_head_t fifo_queue;
31056 int fence_queue_waiters; /* Protected by hw_mutex */
31057 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31058 index a0c2f12..68ae6cb 100644
31059 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31060 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31061 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31062 (unsigned int) min,
31063 (unsigned int) fifo->capabilities);
31064
31065 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31066 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31067 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31068 vmw_marker_queue_init(&fifo->marker_queue);
31069 return vmw_fifo_send_fence(dev_priv, &dummy);
31070 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31071 if (reserveable)
31072 iowrite32(bytes, fifo_mem +
31073 SVGA_FIFO_RESERVED);
31074 - return fifo_mem + (next_cmd >> 2);
31075 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31076 } else {
31077 need_bounce = true;
31078 }
31079 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31080
31081 fm = vmw_fifo_reserve(dev_priv, bytes);
31082 if (unlikely(fm == NULL)) {
31083 - *seqno = atomic_read(&dev_priv->marker_seq);
31084 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31085 ret = -ENOMEM;
31086 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31087 false, 3*HZ);
31088 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31089 }
31090
31091 do {
31092 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31093 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31094 } while (*seqno == 0);
31095
31096 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31097 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31098 index cabc95f..14b3d77 100644
31099 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31100 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31101 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31102 * emitted. Then the fence is stale and signaled.
31103 */
31104
31105 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31106 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31107 > VMW_FENCE_WRAP);
31108
31109 return ret;
31110 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31111
31112 if (fifo_idle)
31113 down_read(&fifo_state->rwsem);
31114 - signal_seq = atomic_read(&dev_priv->marker_seq);
31115 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31116 ret = 0;
31117
31118 for (;;) {
31119 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31120 index 8a8725c..afed796 100644
31121 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31122 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31123 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31124 while (!vmw_lag_lt(queue, us)) {
31125 spin_lock(&queue->lock);
31126 if (list_empty(&queue->head))
31127 - seqno = atomic_read(&dev_priv->marker_seq);
31128 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31129 else {
31130 marker = list_first_entry(&queue->head,
31131 struct vmw_marker, head);
31132 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31133 index 4da66b4..e948655 100644
31134 --- a/drivers/hid/hid-core.c
31135 +++ b/drivers/hid/hid-core.c
31136 @@ -2063,7 +2063,7 @@ static bool hid_ignore(struct hid_device *hdev)
31137
31138 int hid_add_device(struct hid_device *hdev)
31139 {
31140 - static atomic_t id = ATOMIC_INIT(0);
31141 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31142 int ret;
31143
31144 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31145 @@ -2078,7 +2078,7 @@ int hid_add_device(struct hid_device *hdev)
31146 /* XXX hack, any other cleaner solution after the driver core
31147 * is converted to allow more than 20 bytes as the device name? */
31148 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31149 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31150 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31151
31152 hid_debug_register(hdev, dev_name(&hdev->dev));
31153 ret = device_add(&hdev->dev);
31154 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31155 index eec3291..8ed706b 100644
31156 --- a/drivers/hid/hid-wiimote-debug.c
31157 +++ b/drivers/hid/hid-wiimote-debug.c
31158 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31159 else if (size == 0)
31160 return -EIO;
31161
31162 - if (copy_to_user(u, buf, size))
31163 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
31164 return -EFAULT;
31165
31166 *off += size;
31167 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31168 index b1ec0e2..c295a61 100644
31169 --- a/drivers/hid/usbhid/hiddev.c
31170 +++ b/drivers/hid/usbhid/hiddev.c
31171 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31172 break;
31173
31174 case HIDIOCAPPLICATION:
31175 - if (arg < 0 || arg >= hid->maxapplication)
31176 + if (arg >= hid->maxapplication)
31177 break;
31178
31179 for (i = 0; i < hid->maxcollection; i++)
31180 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31181 index 4065374..10ed7dc 100644
31182 --- a/drivers/hv/channel.c
31183 +++ b/drivers/hv/channel.c
31184 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31185 int ret = 0;
31186 int t;
31187
31188 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31189 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31190 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31191 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31192
31193 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31194 if (ret)
31195 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31196 index 15956bd..ea34398 100644
31197 --- a/drivers/hv/hv.c
31198 +++ b/drivers/hv/hv.c
31199 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31200 u64 output_address = (output) ? virt_to_phys(output) : 0;
31201 u32 output_address_hi = output_address >> 32;
31202 u32 output_address_lo = output_address & 0xFFFFFFFF;
31203 - void *hypercall_page = hv_context.hypercall_page;
31204 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31205
31206 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31207 "=a"(hv_status_lo) : "d" (control_hi),
31208 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31209 index 699f0d8..f4f19250 100644
31210 --- a/drivers/hv/hyperv_vmbus.h
31211 +++ b/drivers/hv/hyperv_vmbus.h
31212 @@ -555,7 +555,7 @@ enum vmbus_connect_state {
31213 struct vmbus_connection {
31214 enum vmbus_connect_state conn_state;
31215
31216 - atomic_t next_gpadl_handle;
31217 + atomic_unchecked_t next_gpadl_handle;
31218
31219 /*
31220 * Represents channel interrupts. Each bit position represents a
31221 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31222 index a220e57..428f54d 100644
31223 --- a/drivers/hv/vmbus_drv.c
31224 +++ b/drivers/hv/vmbus_drv.c
31225 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31226 {
31227 int ret = 0;
31228
31229 - static atomic_t device_num = ATOMIC_INIT(0);
31230 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31231
31232 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31233 - atomic_inc_return(&device_num));
31234 + atomic_inc_return_unchecked(&device_num));
31235
31236 child_device_obj->device.bus = &hv_bus;
31237 child_device_obj->device.parent = &hv_acpi_dev->dev;
31238 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31239 index 9140236..ceaef4e 100644
31240 --- a/drivers/hwmon/acpi_power_meter.c
31241 +++ b/drivers/hwmon/acpi_power_meter.c
31242 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31243 return res;
31244
31245 temp /= 1000;
31246 - if (temp < 0)
31247 - return -EINVAL;
31248
31249 mutex_lock(&resource->lock);
31250 resource->trip[attr->index - 7] = temp;
31251 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31252 index 8b011d0..3de24a1 100644
31253 --- a/drivers/hwmon/sht15.c
31254 +++ b/drivers/hwmon/sht15.c
31255 @@ -166,7 +166,7 @@ struct sht15_data {
31256 int supply_uV;
31257 bool supply_uV_valid;
31258 struct work_struct update_supply_work;
31259 - atomic_t interrupt_handled;
31260 + atomic_unchecked_t interrupt_handled;
31261 };
31262
31263 /**
31264 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31265 return ret;
31266
31267 gpio_direction_input(data->pdata->gpio_data);
31268 - atomic_set(&data->interrupt_handled, 0);
31269 + atomic_set_unchecked(&data->interrupt_handled, 0);
31270
31271 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31272 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31273 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31274 /* Only relevant if the interrupt hasn't occurred. */
31275 - if (!atomic_read(&data->interrupt_handled))
31276 + if (!atomic_read_unchecked(&data->interrupt_handled))
31277 schedule_work(&data->read_work);
31278 }
31279 ret = wait_event_timeout(data->wait_queue,
31280 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31281
31282 /* First disable the interrupt */
31283 disable_irq_nosync(irq);
31284 - atomic_inc(&data->interrupt_handled);
31285 + atomic_inc_unchecked(&data->interrupt_handled);
31286 /* Then schedule a reading work struct */
31287 if (data->state != SHT15_READING_NOTHING)
31288 schedule_work(&data->read_work);
31289 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31290 * If not, then start the interrupt again - care here as could
31291 * have gone low in meantime so verify it hasn't!
31292 */
31293 - atomic_set(&data->interrupt_handled, 0);
31294 + atomic_set_unchecked(&data->interrupt_handled, 0);
31295 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31296 /* If still not occurred or another handler was scheduled */
31297 if (gpio_get_value(data->pdata->gpio_data)
31298 - || atomic_read(&data->interrupt_handled))
31299 + || atomic_read_unchecked(&data->interrupt_handled))
31300 return;
31301 }
31302
31303 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31304 index 378fcb5..5e91fa8 100644
31305 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31306 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31307 @@ -43,7 +43,7 @@
31308 extern struct i2c_adapter amd756_smbus;
31309
31310 static struct i2c_adapter *s4882_adapter;
31311 -static struct i2c_algorithm *s4882_algo;
31312 +static i2c_algorithm_no_const *s4882_algo;
31313
31314 /* Wrapper access functions for multiplexed SMBus */
31315 static DEFINE_MUTEX(amd756_lock);
31316 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31317 index 29015eb..af2d8e9 100644
31318 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31319 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31320 @@ -41,7 +41,7 @@
31321 extern struct i2c_adapter *nforce2_smbus;
31322
31323 static struct i2c_adapter *s4985_adapter;
31324 -static struct i2c_algorithm *s4985_algo;
31325 +static i2c_algorithm_no_const *s4985_algo;
31326
31327 /* Wrapper access functions for multiplexed SMBus */
31328 static DEFINE_MUTEX(nforce2_lock);
31329 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31330 index d7a4833..7fae376 100644
31331 --- a/drivers/i2c/i2c-mux.c
31332 +++ b/drivers/i2c/i2c-mux.c
31333 @@ -28,7 +28,7 @@
31334 /* multiplexer per channel data */
31335 struct i2c_mux_priv {
31336 struct i2c_adapter adap;
31337 - struct i2c_algorithm algo;
31338 + i2c_algorithm_no_const algo;
31339
31340 struct i2c_adapter *parent;
31341 void *mux_dev; /* the mux chip/device */
31342 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31343 index 57d00ca..0145194 100644
31344 --- a/drivers/ide/aec62xx.c
31345 +++ b/drivers/ide/aec62xx.c
31346 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31347 .cable_detect = atp86x_cable_detect,
31348 };
31349
31350 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31351 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31352 { /* 0: AEC6210 */
31353 .name = DRV_NAME,
31354 .init_chipset = init_chipset_aec62xx,
31355 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31356 index 2c8016a..911a27c 100644
31357 --- a/drivers/ide/alim15x3.c
31358 +++ b/drivers/ide/alim15x3.c
31359 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31360 .dma_sff_read_status = ide_dma_sff_read_status,
31361 };
31362
31363 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31364 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31365 .name = DRV_NAME,
31366 .init_chipset = init_chipset_ali15x3,
31367 .init_hwif = init_hwif_ali15x3,
31368 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31369 index 3747b25..56fc995 100644
31370 --- a/drivers/ide/amd74xx.c
31371 +++ b/drivers/ide/amd74xx.c
31372 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31373 .udma_mask = udma, \
31374 }
31375
31376 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31377 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31378 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31379 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31380 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31381 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31382 index 15f0ead..cb43480 100644
31383 --- a/drivers/ide/atiixp.c
31384 +++ b/drivers/ide/atiixp.c
31385 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31386 .cable_detect = atiixp_cable_detect,
31387 };
31388
31389 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31390 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31391 { /* 0: IXP200/300/400/700 */
31392 .name = DRV_NAME,
31393 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31394 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31395 index 5f80312..d1fc438 100644
31396 --- a/drivers/ide/cmd64x.c
31397 +++ b/drivers/ide/cmd64x.c
31398 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31399 .dma_sff_read_status = ide_dma_sff_read_status,
31400 };
31401
31402 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31403 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31404 { /* 0: CMD643 */
31405 .name = DRV_NAME,
31406 .init_chipset = init_chipset_cmd64x,
31407 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31408 index 2c1e5f7..1444762 100644
31409 --- a/drivers/ide/cs5520.c
31410 +++ b/drivers/ide/cs5520.c
31411 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31412 .set_dma_mode = cs5520_set_dma_mode,
31413 };
31414
31415 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31416 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31417 .name = DRV_NAME,
31418 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31419 .port_ops = &cs5520_port_ops,
31420 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31421 index 4dc4eb9..49b40ad 100644
31422 --- a/drivers/ide/cs5530.c
31423 +++ b/drivers/ide/cs5530.c
31424 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31425 .udma_filter = cs5530_udma_filter,
31426 };
31427
31428 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31429 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31430 .name = DRV_NAME,
31431 .init_chipset = init_chipset_cs5530,
31432 .init_hwif = init_hwif_cs5530,
31433 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31434 index 5059faf..18d4c85 100644
31435 --- a/drivers/ide/cs5535.c
31436 +++ b/drivers/ide/cs5535.c
31437 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31438 .cable_detect = cs5535_cable_detect,
31439 };
31440
31441 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31442 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31443 .name = DRV_NAME,
31444 .port_ops = &cs5535_port_ops,
31445 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31446 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31447 index 847553f..3ffb49d 100644
31448 --- a/drivers/ide/cy82c693.c
31449 +++ b/drivers/ide/cy82c693.c
31450 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31451 .set_dma_mode = cy82c693_set_dma_mode,
31452 };
31453
31454 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31455 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31456 .name = DRV_NAME,
31457 .init_iops = init_iops_cy82c693,
31458 .port_ops = &cy82c693_port_ops,
31459 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31460 index 58c51cd..4aec3b8 100644
31461 --- a/drivers/ide/hpt366.c
31462 +++ b/drivers/ide/hpt366.c
31463 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31464 }
31465 };
31466
31467 -static const struct hpt_info hpt36x __devinitdata = {
31468 +static const struct hpt_info hpt36x __devinitconst = {
31469 .chip_name = "HPT36x",
31470 .chip_type = HPT36x,
31471 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31472 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31473 .timings = &hpt36x_timings
31474 };
31475
31476 -static const struct hpt_info hpt370 __devinitdata = {
31477 +static const struct hpt_info hpt370 __devinitconst = {
31478 .chip_name = "HPT370",
31479 .chip_type = HPT370,
31480 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31481 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31482 .timings = &hpt37x_timings
31483 };
31484
31485 -static const struct hpt_info hpt370a __devinitdata = {
31486 +static const struct hpt_info hpt370a __devinitconst = {
31487 .chip_name = "HPT370A",
31488 .chip_type = HPT370A,
31489 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31490 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31491 .timings = &hpt37x_timings
31492 };
31493
31494 -static const struct hpt_info hpt374 __devinitdata = {
31495 +static const struct hpt_info hpt374 __devinitconst = {
31496 .chip_name = "HPT374",
31497 .chip_type = HPT374,
31498 .udma_mask = ATA_UDMA5,
31499 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31500 .timings = &hpt37x_timings
31501 };
31502
31503 -static const struct hpt_info hpt372 __devinitdata = {
31504 +static const struct hpt_info hpt372 __devinitconst = {
31505 .chip_name = "HPT372",
31506 .chip_type = HPT372,
31507 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31508 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31509 .timings = &hpt37x_timings
31510 };
31511
31512 -static const struct hpt_info hpt372a __devinitdata = {
31513 +static const struct hpt_info hpt372a __devinitconst = {
31514 .chip_name = "HPT372A",
31515 .chip_type = HPT372A,
31516 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31517 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31518 .timings = &hpt37x_timings
31519 };
31520
31521 -static const struct hpt_info hpt302 __devinitdata = {
31522 +static const struct hpt_info hpt302 __devinitconst = {
31523 .chip_name = "HPT302",
31524 .chip_type = HPT302,
31525 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31526 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31527 .timings = &hpt37x_timings
31528 };
31529
31530 -static const struct hpt_info hpt371 __devinitdata = {
31531 +static const struct hpt_info hpt371 __devinitconst = {
31532 .chip_name = "HPT371",
31533 .chip_type = HPT371,
31534 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31535 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31536 .timings = &hpt37x_timings
31537 };
31538
31539 -static const struct hpt_info hpt372n __devinitdata = {
31540 +static const struct hpt_info hpt372n __devinitconst = {
31541 .chip_name = "HPT372N",
31542 .chip_type = HPT372N,
31543 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31544 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31545 .timings = &hpt37x_timings
31546 };
31547
31548 -static const struct hpt_info hpt302n __devinitdata = {
31549 +static const struct hpt_info hpt302n __devinitconst = {
31550 .chip_name = "HPT302N",
31551 .chip_type = HPT302N,
31552 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31553 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31554 .timings = &hpt37x_timings
31555 };
31556
31557 -static const struct hpt_info hpt371n __devinitdata = {
31558 +static const struct hpt_info hpt371n __devinitconst = {
31559 .chip_name = "HPT371N",
31560 .chip_type = HPT371N,
31561 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31562 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31563 .dma_sff_read_status = ide_dma_sff_read_status,
31564 };
31565
31566 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31567 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31568 { /* 0: HPT36x */
31569 .name = DRV_NAME,
31570 .init_chipset = init_chipset_hpt366,
31571 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31572 index 8126824..55a2798 100644
31573 --- a/drivers/ide/ide-cd.c
31574 +++ b/drivers/ide/ide-cd.c
31575 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31576 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31577 if ((unsigned long)buf & alignment
31578 || blk_rq_bytes(rq) & q->dma_pad_mask
31579 - || object_is_on_stack(buf))
31580 + || object_starts_on_stack(buf))
31581 drive->dma = 0;
31582 }
31583 }
31584 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31585 index 7f56b73..dab5b67 100644
31586 --- a/drivers/ide/ide-pci-generic.c
31587 +++ b/drivers/ide/ide-pci-generic.c
31588 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31589 .udma_mask = ATA_UDMA6, \
31590 }
31591
31592 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31593 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31594 /* 0: Unknown */
31595 DECLARE_GENERIC_PCI_DEV(0),
31596
31597 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31598 index 560e66d..d5dd180 100644
31599 --- a/drivers/ide/it8172.c
31600 +++ b/drivers/ide/it8172.c
31601 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31602 .set_dma_mode = it8172_set_dma_mode,
31603 };
31604
31605 -static const struct ide_port_info it8172_port_info __devinitdata = {
31606 +static const struct ide_port_info it8172_port_info __devinitconst = {
31607 .name = DRV_NAME,
31608 .port_ops = &it8172_port_ops,
31609 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31610 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31611 index 46816ba..1847aeb 100644
31612 --- a/drivers/ide/it8213.c
31613 +++ b/drivers/ide/it8213.c
31614 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31615 .cable_detect = it8213_cable_detect,
31616 };
31617
31618 -static const struct ide_port_info it8213_chipset __devinitdata = {
31619 +static const struct ide_port_info it8213_chipset __devinitconst = {
31620 .name = DRV_NAME,
31621 .enablebits = { {0x41, 0x80, 0x80} },
31622 .port_ops = &it8213_port_ops,
31623 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31624 index 2e3169f..c5611db 100644
31625 --- a/drivers/ide/it821x.c
31626 +++ b/drivers/ide/it821x.c
31627 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31628 .cable_detect = it821x_cable_detect,
31629 };
31630
31631 -static const struct ide_port_info it821x_chipset __devinitdata = {
31632 +static const struct ide_port_info it821x_chipset __devinitconst = {
31633 .name = DRV_NAME,
31634 .init_chipset = init_chipset_it821x,
31635 .init_hwif = init_hwif_it821x,
31636 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31637 index 74c2c4a..efddd7d 100644
31638 --- a/drivers/ide/jmicron.c
31639 +++ b/drivers/ide/jmicron.c
31640 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31641 .cable_detect = jmicron_cable_detect,
31642 };
31643
31644 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31645 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31646 .name = DRV_NAME,
31647 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31648 .port_ops = &jmicron_port_ops,
31649 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31650 index 95327a2..73f78d8 100644
31651 --- a/drivers/ide/ns87415.c
31652 +++ b/drivers/ide/ns87415.c
31653 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31654 .dma_sff_read_status = superio_dma_sff_read_status,
31655 };
31656
31657 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31658 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31659 .name = DRV_NAME,
31660 .init_hwif = init_hwif_ns87415,
31661 .tp_ops = &ns87415_tp_ops,
31662 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31663 index 1a53a4c..39edc66 100644
31664 --- a/drivers/ide/opti621.c
31665 +++ b/drivers/ide/opti621.c
31666 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31667 .set_pio_mode = opti621_set_pio_mode,
31668 };
31669
31670 -static const struct ide_port_info opti621_chipset __devinitdata = {
31671 +static const struct ide_port_info opti621_chipset __devinitconst = {
31672 .name = DRV_NAME,
31673 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31674 .port_ops = &opti621_port_ops,
31675 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31676 index 9546fe2..2e5ceb6 100644
31677 --- a/drivers/ide/pdc202xx_new.c
31678 +++ b/drivers/ide/pdc202xx_new.c
31679 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31680 .udma_mask = udma, \
31681 }
31682
31683 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31684 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31685 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31686 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31687 };
31688 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31689 index 3a35ec6..5634510 100644
31690 --- a/drivers/ide/pdc202xx_old.c
31691 +++ b/drivers/ide/pdc202xx_old.c
31692 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31693 .max_sectors = sectors, \
31694 }
31695
31696 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31697 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31698 { /* 0: PDC20246 */
31699 .name = DRV_NAME,
31700 .init_chipset = init_chipset_pdc202xx,
31701 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31702 index 1892e81..fe0fd60 100644
31703 --- a/drivers/ide/piix.c
31704 +++ b/drivers/ide/piix.c
31705 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31706 .udma_mask = udma, \
31707 }
31708
31709 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31710 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31711 /* 0: MPIIX */
31712 { /*
31713 * MPIIX actually has only a single IDE channel mapped to
31714 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31715 index a6414a8..c04173e 100644
31716 --- a/drivers/ide/rz1000.c
31717 +++ b/drivers/ide/rz1000.c
31718 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31719 }
31720 }
31721
31722 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31723 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31724 .name = DRV_NAME,
31725 .host_flags = IDE_HFLAG_NO_DMA,
31726 };
31727 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31728 index 356b9b5..d4758eb 100644
31729 --- a/drivers/ide/sc1200.c
31730 +++ b/drivers/ide/sc1200.c
31731 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31732 .dma_sff_read_status = ide_dma_sff_read_status,
31733 };
31734
31735 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31736 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31737 .name = DRV_NAME,
31738 .port_ops = &sc1200_port_ops,
31739 .dma_ops = &sc1200_dma_ops,
31740 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31741 index b7f5b0c..9701038 100644
31742 --- a/drivers/ide/scc_pata.c
31743 +++ b/drivers/ide/scc_pata.c
31744 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31745 .dma_sff_read_status = scc_dma_sff_read_status,
31746 };
31747
31748 -static const struct ide_port_info scc_chipset __devinitdata = {
31749 +static const struct ide_port_info scc_chipset __devinitconst = {
31750 .name = "sccIDE",
31751 .init_iops = init_iops_scc,
31752 .init_dma = scc_init_dma,
31753 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31754 index 35fb8da..24d72ef 100644
31755 --- a/drivers/ide/serverworks.c
31756 +++ b/drivers/ide/serverworks.c
31757 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31758 .cable_detect = svwks_cable_detect,
31759 };
31760
31761 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31762 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31763 { /* 0: OSB4 */
31764 .name = DRV_NAME,
31765 .init_chipset = init_chipset_svwks,
31766 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31767 index ddeda44..46f7e30 100644
31768 --- a/drivers/ide/siimage.c
31769 +++ b/drivers/ide/siimage.c
31770 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31771 .udma_mask = ATA_UDMA6, \
31772 }
31773
31774 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31775 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31776 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31777 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31778 };
31779 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31780 index 4a00225..09e61b4 100644
31781 --- a/drivers/ide/sis5513.c
31782 +++ b/drivers/ide/sis5513.c
31783 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31784 .cable_detect = sis_cable_detect,
31785 };
31786
31787 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31788 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31789 .name = DRV_NAME,
31790 .init_chipset = init_chipset_sis5513,
31791 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31792 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31793 index f21dc2a..d051cd2 100644
31794 --- a/drivers/ide/sl82c105.c
31795 +++ b/drivers/ide/sl82c105.c
31796 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31797 .dma_sff_read_status = ide_dma_sff_read_status,
31798 };
31799
31800 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31801 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31802 .name = DRV_NAME,
31803 .init_chipset = init_chipset_sl82c105,
31804 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31805 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31806 index 864ffe0..863a5e9 100644
31807 --- a/drivers/ide/slc90e66.c
31808 +++ b/drivers/ide/slc90e66.c
31809 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31810 .cable_detect = slc90e66_cable_detect,
31811 };
31812
31813 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31814 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31815 .name = DRV_NAME,
31816 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31817 .port_ops = &slc90e66_port_ops,
31818 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31819 index 4799d5c..1794678 100644
31820 --- a/drivers/ide/tc86c001.c
31821 +++ b/drivers/ide/tc86c001.c
31822 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31823 .dma_sff_read_status = ide_dma_sff_read_status,
31824 };
31825
31826 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31827 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31828 .name = DRV_NAME,
31829 .init_hwif = init_hwif_tc86c001,
31830 .port_ops = &tc86c001_port_ops,
31831 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31832 index 281c914..55ce1b8 100644
31833 --- a/drivers/ide/triflex.c
31834 +++ b/drivers/ide/triflex.c
31835 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31836 .set_dma_mode = triflex_set_mode,
31837 };
31838
31839 -static const struct ide_port_info triflex_device __devinitdata = {
31840 +static const struct ide_port_info triflex_device __devinitconst = {
31841 .name = DRV_NAME,
31842 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31843 .port_ops = &triflex_port_ops,
31844 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31845 index 4b42ca0..e494a98 100644
31846 --- a/drivers/ide/trm290.c
31847 +++ b/drivers/ide/trm290.c
31848 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31849 .dma_check = trm290_dma_check,
31850 };
31851
31852 -static const struct ide_port_info trm290_chipset __devinitdata = {
31853 +static const struct ide_port_info trm290_chipset __devinitconst = {
31854 .name = DRV_NAME,
31855 .init_hwif = init_hwif_trm290,
31856 .tp_ops = &trm290_tp_ops,
31857 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31858 index f46f49c..eb77678 100644
31859 --- a/drivers/ide/via82cxxx.c
31860 +++ b/drivers/ide/via82cxxx.c
31861 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31862 .cable_detect = via82cxxx_cable_detect,
31863 };
31864
31865 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31866 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31867 .name = DRV_NAME,
31868 .init_chipset = init_chipset_via82cxxx,
31869 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31870 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31871 index 73d4531..c90cd2d 100644
31872 --- a/drivers/ieee802154/fakehard.c
31873 +++ b/drivers/ieee802154/fakehard.c
31874 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31875 phy->transmit_power = 0xbf;
31876
31877 dev->netdev_ops = &fake_ops;
31878 - dev->ml_priv = &fake_mlme;
31879 + dev->ml_priv = (void *)&fake_mlme;
31880
31881 priv = netdev_priv(dev);
31882 priv->phy = phy;
31883 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31884 index c889aae..6cf5aa7 100644
31885 --- a/drivers/infiniband/core/cm.c
31886 +++ b/drivers/infiniband/core/cm.c
31887 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31888
31889 struct cm_counter_group {
31890 struct kobject obj;
31891 - atomic_long_t counter[CM_ATTR_COUNT];
31892 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31893 };
31894
31895 struct cm_counter_attribute {
31896 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31897 struct ib_mad_send_buf *msg = NULL;
31898 int ret;
31899
31900 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31901 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31902 counter[CM_REQ_COUNTER]);
31903
31904 /* Quick state check to discard duplicate REQs. */
31905 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31906 if (!cm_id_priv)
31907 return;
31908
31909 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31910 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31911 counter[CM_REP_COUNTER]);
31912 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31913 if (ret)
31914 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31915 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31916 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31917 spin_unlock_irq(&cm_id_priv->lock);
31918 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31919 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31920 counter[CM_RTU_COUNTER]);
31921 goto out;
31922 }
31923 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31924 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31925 dreq_msg->local_comm_id);
31926 if (!cm_id_priv) {
31927 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31928 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31929 counter[CM_DREQ_COUNTER]);
31930 cm_issue_drep(work->port, work->mad_recv_wc);
31931 return -EINVAL;
31932 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31933 case IB_CM_MRA_REP_RCVD:
31934 break;
31935 case IB_CM_TIMEWAIT:
31936 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31937 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31938 counter[CM_DREQ_COUNTER]);
31939 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31940 goto unlock;
31941 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31942 cm_free_msg(msg);
31943 goto deref;
31944 case IB_CM_DREQ_RCVD:
31945 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31946 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31947 counter[CM_DREQ_COUNTER]);
31948 goto unlock;
31949 default:
31950 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31951 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31952 cm_id_priv->msg, timeout)) {
31953 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31954 - atomic_long_inc(&work->port->
31955 + atomic_long_inc_unchecked(&work->port->
31956 counter_group[CM_RECV_DUPLICATES].
31957 counter[CM_MRA_COUNTER]);
31958 goto out;
31959 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31960 break;
31961 case IB_CM_MRA_REQ_RCVD:
31962 case IB_CM_MRA_REP_RCVD:
31963 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31964 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31965 counter[CM_MRA_COUNTER]);
31966 /* fall through */
31967 default:
31968 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31969 case IB_CM_LAP_IDLE:
31970 break;
31971 case IB_CM_MRA_LAP_SENT:
31972 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31973 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31974 counter[CM_LAP_COUNTER]);
31975 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31976 goto unlock;
31977 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31978 cm_free_msg(msg);
31979 goto deref;
31980 case IB_CM_LAP_RCVD:
31981 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31982 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31983 counter[CM_LAP_COUNTER]);
31984 goto unlock;
31985 default:
31986 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31987 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31988 if (cur_cm_id_priv) {
31989 spin_unlock_irq(&cm.lock);
31990 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31991 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31992 counter[CM_SIDR_REQ_COUNTER]);
31993 goto out; /* Duplicate message. */
31994 }
31995 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31996 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31997 msg->retries = 1;
31998
31999 - atomic_long_add(1 + msg->retries,
32000 + atomic_long_add_unchecked(1 + msg->retries,
32001 &port->counter_group[CM_XMIT].counter[attr_index]);
32002 if (msg->retries)
32003 - atomic_long_add(msg->retries,
32004 + atomic_long_add_unchecked(msg->retries,
32005 &port->counter_group[CM_XMIT_RETRIES].
32006 counter[attr_index]);
32007
32008 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32009 }
32010
32011 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32012 - atomic_long_inc(&port->counter_group[CM_RECV].
32013 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32014 counter[attr_id - CM_ATTR_ID_OFFSET]);
32015
32016 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32017 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32018 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32019
32020 return sprintf(buf, "%ld\n",
32021 - atomic_long_read(&group->counter[cm_attr->index]));
32022 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32023 }
32024
32025 static const struct sysfs_ops cm_counter_ops = {
32026 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32027 index 176c8f9..2627b62 100644
32028 --- a/drivers/infiniband/core/fmr_pool.c
32029 +++ b/drivers/infiniband/core/fmr_pool.c
32030 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
32031
32032 struct task_struct *thread;
32033
32034 - atomic_t req_ser;
32035 - atomic_t flush_ser;
32036 + atomic_unchecked_t req_ser;
32037 + atomic_unchecked_t flush_ser;
32038
32039 wait_queue_head_t force_wait;
32040 };
32041 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32042 struct ib_fmr_pool *pool = pool_ptr;
32043
32044 do {
32045 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32046 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32047 ib_fmr_batch_release(pool);
32048
32049 - atomic_inc(&pool->flush_ser);
32050 + atomic_inc_unchecked(&pool->flush_ser);
32051 wake_up_interruptible(&pool->force_wait);
32052
32053 if (pool->flush_function)
32054 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32055 }
32056
32057 set_current_state(TASK_INTERRUPTIBLE);
32058 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32059 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32060 !kthread_should_stop())
32061 schedule();
32062 __set_current_state(TASK_RUNNING);
32063 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32064 pool->dirty_watermark = params->dirty_watermark;
32065 pool->dirty_len = 0;
32066 spin_lock_init(&pool->pool_lock);
32067 - atomic_set(&pool->req_ser, 0);
32068 - atomic_set(&pool->flush_ser, 0);
32069 + atomic_set_unchecked(&pool->req_ser, 0);
32070 + atomic_set_unchecked(&pool->flush_ser, 0);
32071 init_waitqueue_head(&pool->force_wait);
32072
32073 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32074 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32075 }
32076 spin_unlock_irq(&pool->pool_lock);
32077
32078 - serial = atomic_inc_return(&pool->req_ser);
32079 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32080 wake_up_process(pool->thread);
32081
32082 if (wait_event_interruptible(pool->force_wait,
32083 - atomic_read(&pool->flush_ser) - serial >= 0))
32084 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32085 return -EINTR;
32086
32087 return 0;
32088 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32089 } else {
32090 list_add_tail(&fmr->list, &pool->dirty_list);
32091 if (++pool->dirty_len >= pool->dirty_watermark) {
32092 - atomic_inc(&pool->req_ser);
32093 + atomic_inc_unchecked(&pool->req_ser);
32094 wake_up_process(pool->thread);
32095 }
32096 }
32097 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32098 index 40c8353..946b0e4 100644
32099 --- a/drivers/infiniband/hw/cxgb4/mem.c
32100 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32101 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32102 int err;
32103 struct fw_ri_tpte tpt;
32104 u32 stag_idx;
32105 - static atomic_t key;
32106 + static atomic_unchecked_t key;
32107
32108 if (c4iw_fatal_error(rdev))
32109 return -EIO;
32110 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32111 &rdev->resource.tpt_fifo_lock);
32112 if (!stag_idx)
32113 return -ENOMEM;
32114 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32115 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32116 }
32117 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32118 __func__, stag_state, type, pdid, stag_idx);
32119 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32120 index 79b3dbc..96e5fcc 100644
32121 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32122 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32123 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32124 struct ib_atomic_eth *ateth;
32125 struct ipath_ack_entry *e;
32126 u64 vaddr;
32127 - atomic64_t *maddr;
32128 + atomic64_unchecked_t *maddr;
32129 u64 sdata;
32130 u32 rkey;
32131 u8 next;
32132 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32133 IB_ACCESS_REMOTE_ATOMIC)))
32134 goto nack_acc_unlck;
32135 /* Perform atomic OP and save result. */
32136 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32137 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32138 sdata = be64_to_cpu(ateth->swap_data);
32139 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32140 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32141 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32142 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32143 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32144 be64_to_cpu(ateth->compare_data),
32145 sdata);
32146 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32147 index 1f95bba..9530f87 100644
32148 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32149 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32150 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32151 unsigned long flags;
32152 struct ib_wc wc;
32153 u64 sdata;
32154 - atomic64_t *maddr;
32155 + atomic64_unchecked_t *maddr;
32156 enum ib_wc_status send_status;
32157
32158 /*
32159 @@ -382,11 +382,11 @@ again:
32160 IB_ACCESS_REMOTE_ATOMIC)))
32161 goto acc_err;
32162 /* Perform atomic OP and save result. */
32163 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32164 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32165 sdata = wqe->wr.wr.atomic.compare_add;
32166 *(u64 *) sqp->s_sge.sge.vaddr =
32167 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32168 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32169 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32170 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32171 sdata, wqe->wr.wr.atomic.swap);
32172 goto send_comp;
32173 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32174 index 7140199..da60063 100644
32175 --- a/drivers/infiniband/hw/nes/nes.c
32176 +++ b/drivers/infiniband/hw/nes/nes.c
32177 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32178 LIST_HEAD(nes_adapter_list);
32179 static LIST_HEAD(nes_dev_list);
32180
32181 -atomic_t qps_destroyed;
32182 +atomic_unchecked_t qps_destroyed;
32183
32184 static unsigned int ee_flsh_adapter;
32185 static unsigned int sysfs_nonidx_addr;
32186 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32187 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32188 struct nes_adapter *nesadapter = nesdev->nesadapter;
32189
32190 - atomic_inc(&qps_destroyed);
32191 + atomic_inc_unchecked(&qps_destroyed);
32192
32193 /* Free the control structures */
32194
32195 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32196 index c438e46..ca30356 100644
32197 --- a/drivers/infiniband/hw/nes/nes.h
32198 +++ b/drivers/infiniband/hw/nes/nes.h
32199 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32200 extern unsigned int wqm_quanta;
32201 extern struct list_head nes_adapter_list;
32202
32203 -extern atomic_t cm_connects;
32204 -extern atomic_t cm_accepts;
32205 -extern atomic_t cm_disconnects;
32206 -extern atomic_t cm_closes;
32207 -extern atomic_t cm_connecteds;
32208 -extern atomic_t cm_connect_reqs;
32209 -extern atomic_t cm_rejects;
32210 -extern atomic_t mod_qp_timouts;
32211 -extern atomic_t qps_created;
32212 -extern atomic_t qps_destroyed;
32213 -extern atomic_t sw_qps_destroyed;
32214 +extern atomic_unchecked_t cm_connects;
32215 +extern atomic_unchecked_t cm_accepts;
32216 +extern atomic_unchecked_t cm_disconnects;
32217 +extern atomic_unchecked_t cm_closes;
32218 +extern atomic_unchecked_t cm_connecteds;
32219 +extern atomic_unchecked_t cm_connect_reqs;
32220 +extern atomic_unchecked_t cm_rejects;
32221 +extern atomic_unchecked_t mod_qp_timouts;
32222 +extern atomic_unchecked_t qps_created;
32223 +extern atomic_unchecked_t qps_destroyed;
32224 +extern atomic_unchecked_t sw_qps_destroyed;
32225 extern u32 mh_detected;
32226 extern u32 mh_pauses_sent;
32227 extern u32 cm_packets_sent;
32228 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32229 extern u32 cm_packets_received;
32230 extern u32 cm_packets_dropped;
32231 extern u32 cm_packets_retrans;
32232 -extern atomic_t cm_listens_created;
32233 -extern atomic_t cm_listens_destroyed;
32234 +extern atomic_unchecked_t cm_listens_created;
32235 +extern atomic_unchecked_t cm_listens_destroyed;
32236 extern u32 cm_backlog_drops;
32237 -extern atomic_t cm_loopbacks;
32238 -extern atomic_t cm_nodes_created;
32239 -extern atomic_t cm_nodes_destroyed;
32240 -extern atomic_t cm_accel_dropped_pkts;
32241 -extern atomic_t cm_resets_recvd;
32242 -extern atomic_t pau_qps_created;
32243 -extern atomic_t pau_qps_destroyed;
32244 +extern atomic_unchecked_t cm_loopbacks;
32245 +extern atomic_unchecked_t cm_nodes_created;
32246 +extern atomic_unchecked_t cm_nodes_destroyed;
32247 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32248 +extern atomic_unchecked_t cm_resets_recvd;
32249 +extern atomic_unchecked_t pau_qps_created;
32250 +extern atomic_unchecked_t pau_qps_destroyed;
32251
32252 extern u32 int_mod_timer_init;
32253 extern u32 int_mod_cq_depth_256;
32254 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32255 index 71edfbb..15b62ae 100644
32256 --- a/drivers/infiniband/hw/nes/nes_cm.c
32257 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32258 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32259 u32 cm_packets_retrans;
32260 u32 cm_packets_created;
32261 u32 cm_packets_received;
32262 -atomic_t cm_listens_created;
32263 -atomic_t cm_listens_destroyed;
32264 +atomic_unchecked_t cm_listens_created;
32265 +atomic_unchecked_t cm_listens_destroyed;
32266 u32 cm_backlog_drops;
32267 -atomic_t cm_loopbacks;
32268 -atomic_t cm_nodes_created;
32269 -atomic_t cm_nodes_destroyed;
32270 -atomic_t cm_accel_dropped_pkts;
32271 -atomic_t cm_resets_recvd;
32272 +atomic_unchecked_t cm_loopbacks;
32273 +atomic_unchecked_t cm_nodes_created;
32274 +atomic_unchecked_t cm_nodes_destroyed;
32275 +atomic_unchecked_t cm_accel_dropped_pkts;
32276 +atomic_unchecked_t cm_resets_recvd;
32277
32278 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32279 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32280 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32281
32282 static struct nes_cm_core *g_cm_core;
32283
32284 -atomic_t cm_connects;
32285 -atomic_t cm_accepts;
32286 -atomic_t cm_disconnects;
32287 -atomic_t cm_closes;
32288 -atomic_t cm_connecteds;
32289 -atomic_t cm_connect_reqs;
32290 -atomic_t cm_rejects;
32291 +atomic_unchecked_t cm_connects;
32292 +atomic_unchecked_t cm_accepts;
32293 +atomic_unchecked_t cm_disconnects;
32294 +atomic_unchecked_t cm_closes;
32295 +atomic_unchecked_t cm_connecteds;
32296 +atomic_unchecked_t cm_connect_reqs;
32297 +atomic_unchecked_t cm_rejects;
32298
32299 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32300 {
32301 @@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32302 kfree(listener);
32303 listener = NULL;
32304 ret = 0;
32305 - atomic_inc(&cm_listens_destroyed);
32306 + atomic_inc_unchecked(&cm_listens_destroyed);
32307 } else {
32308 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32309 }
32310 @@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32311 cm_node->rem_mac);
32312
32313 add_hte_node(cm_core, cm_node);
32314 - atomic_inc(&cm_nodes_created);
32315 + atomic_inc_unchecked(&cm_nodes_created);
32316
32317 return cm_node;
32318 }
32319 @@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32320 }
32321
32322 atomic_dec(&cm_core->node_cnt);
32323 - atomic_inc(&cm_nodes_destroyed);
32324 + atomic_inc_unchecked(&cm_nodes_destroyed);
32325 nesqp = cm_node->nesqp;
32326 if (nesqp) {
32327 nesqp->cm_node = NULL;
32328 @@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32329
32330 static void drop_packet(struct sk_buff *skb)
32331 {
32332 - atomic_inc(&cm_accel_dropped_pkts);
32333 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32334 dev_kfree_skb_any(skb);
32335 }
32336
32337 @@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32338 {
32339
32340 int reset = 0; /* whether to send reset in case of err.. */
32341 - atomic_inc(&cm_resets_recvd);
32342 + atomic_inc_unchecked(&cm_resets_recvd);
32343 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32344 " refcnt=%d\n", cm_node, cm_node->state,
32345 atomic_read(&cm_node->ref_count));
32346 @@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32347 rem_ref_cm_node(cm_node->cm_core, cm_node);
32348 return NULL;
32349 }
32350 - atomic_inc(&cm_loopbacks);
32351 + atomic_inc_unchecked(&cm_loopbacks);
32352 loopbackremotenode->loopbackpartner = cm_node;
32353 loopbackremotenode->tcp_cntxt.rcv_wscale =
32354 NES_CM_DEFAULT_RCV_WND_SCALE;
32355 @@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32356 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32357 else {
32358 rem_ref_cm_node(cm_core, cm_node);
32359 - atomic_inc(&cm_accel_dropped_pkts);
32360 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32361 dev_kfree_skb_any(skb);
32362 }
32363 break;
32364 @@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32365
32366 if ((cm_id) && (cm_id->event_handler)) {
32367 if (issue_disconn) {
32368 - atomic_inc(&cm_disconnects);
32369 + atomic_inc_unchecked(&cm_disconnects);
32370 cm_event.event = IW_CM_EVENT_DISCONNECT;
32371 cm_event.status = disconn_status;
32372 cm_event.local_addr = cm_id->local_addr;
32373 @@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32374 }
32375
32376 if (issue_close) {
32377 - atomic_inc(&cm_closes);
32378 + atomic_inc_unchecked(&cm_closes);
32379 nes_disconnect(nesqp, 1);
32380
32381 cm_id->provider_data = nesqp;
32382 @@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32383
32384 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32385 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32386 - atomic_inc(&cm_accepts);
32387 + atomic_inc_unchecked(&cm_accepts);
32388
32389 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32390 netdev_refcnt_read(nesvnic->netdev));
32391 @@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32392 struct nes_cm_core *cm_core;
32393 u8 *start_buff;
32394
32395 - atomic_inc(&cm_rejects);
32396 + atomic_inc_unchecked(&cm_rejects);
32397 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32398 loopback = cm_node->loopbackpartner;
32399 cm_core = cm_node->cm_core;
32400 @@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32401 ntohl(cm_id->local_addr.sin_addr.s_addr),
32402 ntohs(cm_id->local_addr.sin_port));
32403
32404 - atomic_inc(&cm_connects);
32405 + atomic_inc_unchecked(&cm_connects);
32406 nesqp->active_conn = 1;
32407
32408 /* cache the cm_id in the qp */
32409 @@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32410 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32411 return err;
32412 }
32413 - atomic_inc(&cm_listens_created);
32414 + atomic_inc_unchecked(&cm_listens_created);
32415 }
32416
32417 cm_id->add_ref(cm_id);
32418 @@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32419
32420 if (nesqp->destroyed)
32421 return;
32422 - atomic_inc(&cm_connecteds);
32423 + atomic_inc_unchecked(&cm_connecteds);
32424 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32425 " local port 0x%04X. jiffies = %lu.\n",
32426 nesqp->hwqp.qp_id,
32427 @@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32428
32429 cm_id->add_ref(cm_id);
32430 ret = cm_id->event_handler(cm_id, &cm_event);
32431 - atomic_inc(&cm_closes);
32432 + atomic_inc_unchecked(&cm_closes);
32433 cm_event.event = IW_CM_EVENT_CLOSE;
32434 cm_event.status = 0;
32435 cm_event.provider_data = cm_id->provider_data;
32436 @@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32437 return;
32438 cm_id = cm_node->cm_id;
32439
32440 - atomic_inc(&cm_connect_reqs);
32441 + atomic_inc_unchecked(&cm_connect_reqs);
32442 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32443 cm_node, cm_id, jiffies);
32444
32445 @@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32446 return;
32447 cm_id = cm_node->cm_id;
32448
32449 - atomic_inc(&cm_connect_reqs);
32450 + atomic_inc_unchecked(&cm_connect_reqs);
32451 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32452 cm_node, cm_id, jiffies);
32453
32454 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32455 index 3ba7be3..c81f6ff 100644
32456 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32457 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32458 @@ -40,8 +40,8 @@
32459 #include "nes.h"
32460 #include "nes_mgt.h"
32461
32462 -atomic_t pau_qps_created;
32463 -atomic_t pau_qps_destroyed;
32464 +atomic_unchecked_t pau_qps_created;
32465 +atomic_unchecked_t pau_qps_destroyed;
32466
32467 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32468 {
32469 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32470 {
32471 struct sk_buff *skb;
32472 unsigned long flags;
32473 - atomic_inc(&pau_qps_destroyed);
32474 + atomic_inc_unchecked(&pau_qps_destroyed);
32475
32476 /* Free packets that have not yet been forwarded */
32477 /* Lock is acquired by skb_dequeue when removing the skb */
32478 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32479 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32480 skb_queue_head_init(&nesqp->pau_list);
32481 spin_lock_init(&nesqp->pau_lock);
32482 - atomic_inc(&pau_qps_created);
32483 + atomic_inc_unchecked(&pau_qps_created);
32484 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32485 }
32486
32487 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32488 index f3a3ecf..57d311d 100644
32489 --- a/drivers/infiniband/hw/nes/nes_nic.c
32490 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32491 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32492 target_stat_values[++index] = mh_detected;
32493 target_stat_values[++index] = mh_pauses_sent;
32494 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32495 - target_stat_values[++index] = atomic_read(&cm_connects);
32496 - target_stat_values[++index] = atomic_read(&cm_accepts);
32497 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32498 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32499 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32500 - target_stat_values[++index] = atomic_read(&cm_rejects);
32501 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32502 - target_stat_values[++index] = atomic_read(&qps_created);
32503 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32504 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32505 - target_stat_values[++index] = atomic_read(&cm_closes);
32506 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32507 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32508 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32509 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32510 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32511 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32512 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32513 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32514 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32515 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32516 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32517 target_stat_values[++index] = cm_packets_sent;
32518 target_stat_values[++index] = cm_packets_bounced;
32519 target_stat_values[++index] = cm_packets_created;
32520 target_stat_values[++index] = cm_packets_received;
32521 target_stat_values[++index] = cm_packets_dropped;
32522 target_stat_values[++index] = cm_packets_retrans;
32523 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32524 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32525 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32526 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32527 target_stat_values[++index] = cm_backlog_drops;
32528 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32529 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32530 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32531 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32532 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32533 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32534 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32535 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32536 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32537 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32538 target_stat_values[++index] = nesadapter->free_4kpbl;
32539 target_stat_values[++index] = nesadapter->free_256pbl;
32540 target_stat_values[++index] = int_mod_timer_init;
32541 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32542 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32543 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32544 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32545 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32546 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32547 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32548 }
32549
32550 /**
32551 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32552 index 8b8812d..a5e1133 100644
32553 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32554 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32555 @@ -46,9 +46,9 @@
32556
32557 #include <rdma/ib_umem.h>
32558
32559 -atomic_t mod_qp_timouts;
32560 -atomic_t qps_created;
32561 -atomic_t sw_qps_destroyed;
32562 +atomic_unchecked_t mod_qp_timouts;
32563 +atomic_unchecked_t qps_created;
32564 +atomic_unchecked_t sw_qps_destroyed;
32565
32566 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32567
32568 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32569 if (init_attr->create_flags)
32570 return ERR_PTR(-EINVAL);
32571
32572 - atomic_inc(&qps_created);
32573 + atomic_inc_unchecked(&qps_created);
32574 switch (init_attr->qp_type) {
32575 case IB_QPT_RC:
32576 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32577 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32578 struct iw_cm_event cm_event;
32579 int ret = 0;
32580
32581 - atomic_inc(&sw_qps_destroyed);
32582 + atomic_inc_unchecked(&sw_qps_destroyed);
32583 nesqp->destroyed = 1;
32584
32585 /* Blow away the connection if it exists. */
32586 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32587 index 6b811e3..f8acf88 100644
32588 --- a/drivers/infiniband/hw/qib/qib.h
32589 +++ b/drivers/infiniband/hw/qib/qib.h
32590 @@ -51,6 +51,7 @@
32591 #include <linux/completion.h>
32592 #include <linux/kref.h>
32593 #include <linux/sched.h>
32594 +#include <linux/slab.h>
32595
32596 #include "qib_common.h"
32597 #include "qib_verbs.h"
32598 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32599 index da739d9..da1c7f4 100644
32600 --- a/drivers/input/gameport/gameport.c
32601 +++ b/drivers/input/gameport/gameport.c
32602 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32603 */
32604 static void gameport_init_port(struct gameport *gameport)
32605 {
32606 - static atomic_t gameport_no = ATOMIC_INIT(0);
32607 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32608
32609 __module_get(THIS_MODULE);
32610
32611 mutex_init(&gameport->drv_mutex);
32612 device_initialize(&gameport->dev);
32613 dev_set_name(&gameport->dev, "gameport%lu",
32614 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32615 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32616 gameport->dev.bus = &gameport_bus;
32617 gameport->dev.release = gameport_release_port;
32618 if (gameport->parent)
32619 diff --git a/drivers/input/input.c b/drivers/input/input.c
32620 index 8921c61..f5cd63d 100644
32621 --- a/drivers/input/input.c
32622 +++ b/drivers/input/input.c
32623 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32624 */
32625 int input_register_device(struct input_dev *dev)
32626 {
32627 - static atomic_t input_no = ATOMIC_INIT(0);
32628 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32629 struct input_handler *handler;
32630 const char *path;
32631 int error;
32632 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32633 dev->setkeycode = input_default_setkeycode;
32634
32635 dev_set_name(&dev->dev, "input%ld",
32636 - (unsigned long) atomic_inc_return(&input_no) - 1);
32637 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32638
32639 error = device_add(&dev->dev);
32640 if (error)
32641 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32642 index b8d8611..7a4a04b 100644
32643 --- a/drivers/input/joystick/sidewinder.c
32644 +++ b/drivers/input/joystick/sidewinder.c
32645 @@ -30,6 +30,7 @@
32646 #include <linux/kernel.h>
32647 #include <linux/module.h>
32648 #include <linux/slab.h>
32649 +#include <linux/sched.h>
32650 #include <linux/init.h>
32651 #include <linux/input.h>
32652 #include <linux/gameport.h>
32653 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32654 index fd7a0d5..a4af10c 100644
32655 --- a/drivers/input/joystick/xpad.c
32656 +++ b/drivers/input/joystick/xpad.c
32657 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32658
32659 static int xpad_led_probe(struct usb_xpad *xpad)
32660 {
32661 - static atomic_t led_seq = ATOMIC_INIT(0);
32662 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32663 long led_no;
32664 struct xpad_led *led;
32665 struct led_classdev *led_cdev;
32666 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32667 if (!led)
32668 return -ENOMEM;
32669
32670 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32671 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32672
32673 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32674 led->xpad = xpad;
32675 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32676 index 0110b5a..d3ad144 100644
32677 --- a/drivers/input/mousedev.c
32678 +++ b/drivers/input/mousedev.c
32679 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32680
32681 spin_unlock_irq(&client->packet_lock);
32682
32683 - if (copy_to_user(buffer, data, count))
32684 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32685 return -EFAULT;
32686
32687 return count;
32688 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32689 index d0f7533..fb8215b 100644
32690 --- a/drivers/input/serio/serio.c
32691 +++ b/drivers/input/serio/serio.c
32692 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32693 */
32694 static void serio_init_port(struct serio *serio)
32695 {
32696 - static atomic_t serio_no = ATOMIC_INIT(0);
32697 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32698
32699 __module_get(THIS_MODULE);
32700
32701 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32702 mutex_init(&serio->drv_mutex);
32703 device_initialize(&serio->dev);
32704 dev_set_name(&serio->dev, "serio%ld",
32705 - (long)atomic_inc_return(&serio_no) - 1);
32706 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32707 serio->dev.bus = &serio_bus;
32708 serio->dev.release = serio_release_port;
32709 serio->dev.groups = serio_device_attr_groups;
32710 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32711 index b902794..fc7b85b 100644
32712 --- a/drivers/isdn/capi/capi.c
32713 +++ b/drivers/isdn/capi/capi.c
32714 @@ -83,8 +83,8 @@ struct capiminor {
32715
32716 struct capi20_appl *ap;
32717 u32 ncci;
32718 - atomic_t datahandle;
32719 - atomic_t msgid;
32720 + atomic_unchecked_t datahandle;
32721 + atomic_unchecked_t msgid;
32722
32723 struct tty_port port;
32724 int ttyinstop;
32725 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32726 capimsg_setu16(s, 2, mp->ap->applid);
32727 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32728 capimsg_setu8 (s, 5, CAPI_RESP);
32729 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32730 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32731 capimsg_setu32(s, 8, mp->ncci);
32732 capimsg_setu16(s, 12, datahandle);
32733 }
32734 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32735 mp->outbytes -= len;
32736 spin_unlock_bh(&mp->outlock);
32737
32738 - datahandle = atomic_inc_return(&mp->datahandle);
32739 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32740 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32741 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32742 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32743 capimsg_setu16(skb->data, 2, mp->ap->applid);
32744 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32745 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32746 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32747 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32748 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32749 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32750 capimsg_setu16(skb->data, 16, len); /* Data length */
32751 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32752 index 821f7ac..28d4030 100644
32753 --- a/drivers/isdn/hardware/avm/b1.c
32754 +++ b/drivers/isdn/hardware/avm/b1.c
32755 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
32756 }
32757 if (left) {
32758 if (t4file->user) {
32759 - if (copy_from_user(buf, dp, left))
32760 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32761 return -EFAULT;
32762 } else {
32763 memcpy(buf, dp, left);
32764 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
32765 }
32766 if (left) {
32767 if (config->user) {
32768 - if (copy_from_user(buf, dp, left))
32769 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32770 return -EFAULT;
32771 } else {
32772 memcpy(buf, dp, left);
32773 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32774 index dd6b53a..19d9ee6 100644
32775 --- a/drivers/isdn/hardware/eicon/divasync.h
32776 +++ b/drivers/isdn/hardware/eicon/divasync.h
32777 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32778 } diva_didd_add_adapter_t;
32779 typedef struct _diva_didd_remove_adapter {
32780 IDI_CALL p_request;
32781 -} diva_didd_remove_adapter_t;
32782 +} __no_const diva_didd_remove_adapter_t;
32783 typedef struct _diva_didd_read_adapter_array {
32784 void *buffer;
32785 dword length;
32786 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32787 index d303e65..28bcb7b 100644
32788 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32789 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32790 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32791 typedef struct _diva_os_idi_adapter_interface {
32792 diva_init_card_proc_t cleanup_adapter_proc;
32793 diva_cmd_card_proc_t cmd_proc;
32794 -} diva_os_idi_adapter_interface_t;
32795 +} __no_const diva_os_idi_adapter_interface_t;
32796
32797 typedef struct _diva_os_xdi_adapter {
32798 struct list_head link;
32799 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32800 index e74df7c..03a03ba 100644
32801 --- a/drivers/isdn/icn/icn.c
32802 +++ b/drivers/isdn/icn/icn.c
32803 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
32804 if (count > len)
32805 count = len;
32806 if (user) {
32807 - if (copy_from_user(msg, buf, count))
32808 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32809 return -EFAULT;
32810 } else
32811 memcpy(msg, buf, count);
32812 diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
32813 index 8bc4915..4cc6a2e 100644
32814 --- a/drivers/leds/leds-mc13783.c
32815 +++ b/drivers/leds/leds-mc13783.c
32816 @@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
32817 return -EINVAL;
32818 }
32819
32820 - led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
32821 + led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
32822 if (led == NULL) {
32823 dev_err(&pdev->dev, "failed to alloc memory\n");
32824 return -ENOMEM;
32825 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32826 index b5fdcb7..5b6c59f 100644
32827 --- a/drivers/lguest/core.c
32828 +++ b/drivers/lguest/core.c
32829 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32830 * it's worked so far. The end address needs +1 because __get_vm_area
32831 * allocates an extra guard page, so we need space for that.
32832 */
32833 +
32834 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32835 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32836 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32837 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32838 +#else
32839 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32840 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32841 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32842 +#endif
32843 +
32844 if (!switcher_vma) {
32845 err = -ENOMEM;
32846 printk("lguest: could not map switcher pages high\n");
32847 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32848 * Now the Switcher is mapped at the right address, we can't fail!
32849 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32850 */
32851 - memcpy(switcher_vma->addr, start_switcher_text,
32852 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32853 end_switcher_text - start_switcher_text);
32854
32855 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32856 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32857 index 3980903..ce25c5e 100644
32858 --- a/drivers/lguest/x86/core.c
32859 +++ b/drivers/lguest/x86/core.c
32860 @@ -59,7 +59,7 @@ static struct {
32861 /* Offset from where switcher.S was compiled to where we've copied it */
32862 static unsigned long switcher_offset(void)
32863 {
32864 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32865 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32866 }
32867
32868 /* This cpu's struct lguest_pages. */
32869 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32870 * These copies are pretty cheap, so we do them unconditionally: */
32871 /* Save the current Host top-level page directory.
32872 */
32873 +
32874 +#ifdef CONFIG_PAX_PER_CPU_PGD
32875 + pages->state.host_cr3 = read_cr3();
32876 +#else
32877 pages->state.host_cr3 = __pa(current->mm->pgd);
32878 +#endif
32879 +
32880 /*
32881 * Set up the Guest's page tables to see this CPU's pages (and no
32882 * other CPU's pages).
32883 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32884 * compiled-in switcher code and the high-mapped copy we just made.
32885 */
32886 for (i = 0; i < IDT_ENTRIES; i++)
32887 - default_idt_entries[i] += switcher_offset();
32888 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32889
32890 /*
32891 * Set up the Switcher's per-cpu areas.
32892 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32893 * it will be undisturbed when we switch. To change %cs and jump we
32894 * need this structure to feed to Intel's "lcall" instruction.
32895 */
32896 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32897 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32898 lguest_entry.segment = LGUEST_CS;
32899
32900 /*
32901 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32902 index 40634b0..4f5855e 100644
32903 --- a/drivers/lguest/x86/switcher_32.S
32904 +++ b/drivers/lguest/x86/switcher_32.S
32905 @@ -87,6 +87,7 @@
32906 #include <asm/page.h>
32907 #include <asm/segment.h>
32908 #include <asm/lguest.h>
32909 +#include <asm/processor-flags.h>
32910
32911 // We mark the start of the code to copy
32912 // It's placed in .text tho it's never run here
32913 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32914 // Changes type when we load it: damn Intel!
32915 // For after we switch over our page tables
32916 // That entry will be read-only: we'd crash.
32917 +
32918 +#ifdef CONFIG_PAX_KERNEXEC
32919 + mov %cr0, %edx
32920 + xor $X86_CR0_WP, %edx
32921 + mov %edx, %cr0
32922 +#endif
32923 +
32924 movl $(GDT_ENTRY_TSS*8), %edx
32925 ltr %dx
32926
32927 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32928 // Let's clear it again for our return.
32929 // The GDT descriptor of the Host
32930 // Points to the table after two "size" bytes
32931 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32932 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32933 // Clear "used" from type field (byte 5, bit 2)
32934 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32935 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32936 +
32937 +#ifdef CONFIG_PAX_KERNEXEC
32938 + mov %cr0, %eax
32939 + xor $X86_CR0_WP, %eax
32940 + mov %eax, %cr0
32941 +#endif
32942
32943 // Once our page table's switched, the Guest is live!
32944 // The Host fades as we run this final step.
32945 @@ -295,13 +309,12 @@ deliver_to_host:
32946 // I consulted gcc, and it gave
32947 // These instructions, which I gladly credit:
32948 leal (%edx,%ebx,8), %eax
32949 - movzwl (%eax),%edx
32950 - movl 4(%eax), %eax
32951 - xorw %ax, %ax
32952 - orl %eax, %edx
32953 + movl 4(%eax), %edx
32954 + movw (%eax), %dx
32955 // Now the address of the handler's in %edx
32956 // We call it now: its "iret" drops us home.
32957 - jmp *%edx
32958 + ljmp $__KERNEL_CS, $1f
32959 +1: jmp *%edx
32960
32961 // Every interrupt can come to us here
32962 // But we must truly tell each apart.
32963 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
32964 index 20e5c2c..9e849a9 100644
32965 --- a/drivers/macintosh/macio_asic.c
32966 +++ b/drivers/macintosh/macio_asic.c
32967 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
32968 * MacIO is matched against any Apple ID, it's probe() function
32969 * will then decide wether it applies or not
32970 */
32971 -static const struct pci_device_id __devinitdata pci_ids [] = { {
32972 +static const struct pci_device_id __devinitconst pci_ids [] = { {
32973 .vendor = PCI_VENDOR_ID_APPLE,
32974 .device = PCI_ANY_ID,
32975 .subvendor = PCI_ANY_ID,
32976 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
32977 index 17e2b47..bcbeec4 100644
32978 --- a/drivers/md/bitmap.c
32979 +++ b/drivers/md/bitmap.c
32980 @@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
32981 chunk_kb ? "KB" : "B");
32982 if (bitmap->file) {
32983 seq_printf(seq, ", file: ");
32984 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32985 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32986 }
32987
32988 seq_printf(seq, "\n");
32989 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
32990 index a1a3e6d..1918bfc 100644
32991 --- a/drivers/md/dm-ioctl.c
32992 +++ b/drivers/md/dm-ioctl.c
32993 @@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
32994 cmd == DM_LIST_VERSIONS_CMD)
32995 return 0;
32996
32997 - if ((cmd == DM_DEV_CREATE_CMD)) {
32998 + if (cmd == DM_DEV_CREATE_CMD) {
32999 if (!*param->name) {
33000 DMWARN("name not supplied when creating device");
33001 return -EINVAL;
33002 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33003 index d039de8..0cf5b87 100644
33004 --- a/drivers/md/dm-raid1.c
33005 +++ b/drivers/md/dm-raid1.c
33006 @@ -40,7 +40,7 @@ enum dm_raid1_error {
33007
33008 struct mirror {
33009 struct mirror_set *ms;
33010 - atomic_t error_count;
33011 + atomic_unchecked_t error_count;
33012 unsigned long error_type;
33013 struct dm_dev *dev;
33014 sector_t offset;
33015 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33016 struct mirror *m;
33017
33018 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33019 - if (!atomic_read(&m->error_count))
33020 + if (!atomic_read_unchecked(&m->error_count))
33021 return m;
33022
33023 return NULL;
33024 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33025 * simple way to tell if a device has encountered
33026 * errors.
33027 */
33028 - atomic_inc(&m->error_count);
33029 + atomic_inc_unchecked(&m->error_count);
33030
33031 if (test_and_set_bit(error_type, &m->error_type))
33032 return;
33033 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33034 struct mirror *m = get_default_mirror(ms);
33035
33036 do {
33037 - if (likely(!atomic_read(&m->error_count)))
33038 + if (likely(!atomic_read_unchecked(&m->error_count)))
33039 return m;
33040
33041 if (m-- == ms->mirror)
33042 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33043 {
33044 struct mirror *default_mirror = get_default_mirror(m->ms);
33045
33046 - return !atomic_read(&default_mirror->error_count);
33047 + return !atomic_read_unchecked(&default_mirror->error_count);
33048 }
33049
33050 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33051 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33052 */
33053 if (likely(region_in_sync(ms, region, 1)))
33054 m = choose_mirror(ms, bio->bi_sector);
33055 - else if (m && atomic_read(&m->error_count))
33056 + else if (m && atomic_read_unchecked(&m->error_count))
33057 m = NULL;
33058
33059 if (likely(m))
33060 @@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33061 }
33062
33063 ms->mirror[mirror].ms = ms;
33064 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33065 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33066 ms->mirror[mirror].error_type = 0;
33067 ms->mirror[mirror].offset = offset;
33068
33069 @@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33070 */
33071 static char device_status_char(struct mirror *m)
33072 {
33073 - if (!atomic_read(&(m->error_count)))
33074 + if (!atomic_read_unchecked(&(m->error_count)))
33075 return 'A';
33076
33077 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33078 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33079 index 35c94ff..20d4c17 100644
33080 --- a/drivers/md/dm-stripe.c
33081 +++ b/drivers/md/dm-stripe.c
33082 @@ -20,7 +20,7 @@ struct stripe {
33083 struct dm_dev *dev;
33084 sector_t physical_start;
33085
33086 - atomic_t error_count;
33087 + atomic_unchecked_t error_count;
33088 };
33089
33090 struct stripe_c {
33091 @@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33092 kfree(sc);
33093 return r;
33094 }
33095 - atomic_set(&(sc->stripe[i].error_count), 0);
33096 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33097 }
33098
33099 ti->private = sc;
33100 @@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33101 DMEMIT("%d ", sc->stripes);
33102 for (i = 0; i < sc->stripes; i++) {
33103 DMEMIT("%s ", sc->stripe[i].dev->name);
33104 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33105 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33106 'D' : 'A';
33107 }
33108 buffer[i] = '\0';
33109 @@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33110 */
33111 for (i = 0; i < sc->stripes; i++)
33112 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33113 - atomic_inc(&(sc->stripe[i].error_count));
33114 - if (atomic_read(&(sc->stripe[i].error_count)) <
33115 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33116 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33117 DM_IO_ERROR_THRESHOLD)
33118 schedule_work(&sc->trigger_event);
33119 }
33120 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33121 index 2e227fb..44ead1f 100644
33122 --- a/drivers/md/dm-table.c
33123 +++ b/drivers/md/dm-table.c
33124 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33125 if (!dev_size)
33126 return 0;
33127
33128 - if ((start >= dev_size) || (start + len > dev_size)) {
33129 + if ((start >= dev_size) || (len > dev_size - start)) {
33130 DMWARN("%s: %s too small for target: "
33131 "start=%llu, len=%llu, dev_size=%llu",
33132 dm_device_name(ti->table->md), bdevname(bdev, b),
33133 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33134 index 737d388..811ad5a 100644
33135 --- a/drivers/md/dm-thin-metadata.c
33136 +++ b/drivers/md/dm-thin-metadata.c
33137 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33138
33139 pmd->info.tm = tm;
33140 pmd->info.levels = 2;
33141 - pmd->info.value_type.context = pmd->data_sm;
33142 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33143 pmd->info.value_type.size = sizeof(__le64);
33144 pmd->info.value_type.inc = data_block_inc;
33145 pmd->info.value_type.dec = data_block_dec;
33146 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33147
33148 pmd->bl_info.tm = tm;
33149 pmd->bl_info.levels = 1;
33150 - pmd->bl_info.value_type.context = pmd->data_sm;
33151 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33152 pmd->bl_info.value_type.size = sizeof(__le64);
33153 pmd->bl_info.value_type.inc = data_block_inc;
33154 pmd->bl_info.value_type.dec = data_block_dec;
33155 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33156 index e24143c..ce2f21a1 100644
33157 --- a/drivers/md/dm.c
33158 +++ b/drivers/md/dm.c
33159 @@ -176,9 +176,9 @@ struct mapped_device {
33160 /*
33161 * Event handling.
33162 */
33163 - atomic_t event_nr;
33164 + atomic_unchecked_t event_nr;
33165 wait_queue_head_t eventq;
33166 - atomic_t uevent_seq;
33167 + atomic_unchecked_t uevent_seq;
33168 struct list_head uevent_list;
33169 spinlock_t uevent_lock; /* Protect access to uevent_list */
33170
33171 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33172 rwlock_init(&md->map_lock);
33173 atomic_set(&md->holders, 1);
33174 atomic_set(&md->open_count, 0);
33175 - atomic_set(&md->event_nr, 0);
33176 - atomic_set(&md->uevent_seq, 0);
33177 + atomic_set_unchecked(&md->event_nr, 0);
33178 + atomic_set_unchecked(&md->uevent_seq, 0);
33179 INIT_LIST_HEAD(&md->uevent_list);
33180 spin_lock_init(&md->uevent_lock);
33181
33182 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33183
33184 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33185
33186 - atomic_inc(&md->event_nr);
33187 + atomic_inc_unchecked(&md->event_nr);
33188 wake_up(&md->eventq);
33189 }
33190
33191 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33192
33193 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33194 {
33195 - return atomic_add_return(1, &md->uevent_seq);
33196 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33197 }
33198
33199 uint32_t dm_get_event_nr(struct mapped_device *md)
33200 {
33201 - return atomic_read(&md->event_nr);
33202 + return atomic_read_unchecked(&md->event_nr);
33203 }
33204
33205 int dm_wait_event(struct mapped_device *md, int event_nr)
33206 {
33207 return wait_event_interruptible(md->eventq,
33208 - (event_nr != atomic_read(&md->event_nr)));
33209 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33210 }
33211
33212 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33213 diff --git a/drivers/md/md.c b/drivers/md/md.c
33214 index 2b30ffd..bf789ce 100644
33215 --- a/drivers/md/md.c
33216 +++ b/drivers/md/md.c
33217 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33218 * start build, activate spare
33219 */
33220 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33221 -static atomic_t md_event_count;
33222 +static atomic_unchecked_t md_event_count;
33223 void md_new_event(struct mddev *mddev)
33224 {
33225 - atomic_inc(&md_event_count);
33226 + atomic_inc_unchecked(&md_event_count);
33227 wake_up(&md_event_waiters);
33228 }
33229 EXPORT_SYMBOL_GPL(md_new_event);
33230 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33231 */
33232 static void md_new_event_inintr(struct mddev *mddev)
33233 {
33234 - atomic_inc(&md_event_count);
33235 + atomic_inc_unchecked(&md_event_count);
33236 wake_up(&md_event_waiters);
33237 }
33238
33239 @@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33240
33241 rdev->preferred_minor = 0xffff;
33242 rdev->data_offset = le64_to_cpu(sb->data_offset);
33243 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33244 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33245
33246 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33247 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33248 @@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33249 else
33250 sb->resync_offset = cpu_to_le64(0);
33251
33252 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33253 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33254
33255 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33256 sb->size = cpu_to_le64(mddev->dev_sectors);
33257 @@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33258 static ssize_t
33259 errors_show(struct md_rdev *rdev, char *page)
33260 {
33261 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33262 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33263 }
33264
33265 static ssize_t
33266 @@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33267 char *e;
33268 unsigned long n = simple_strtoul(buf, &e, 10);
33269 if (*buf && (*e == 0 || *e == '\n')) {
33270 - atomic_set(&rdev->corrected_errors, n);
33271 + atomic_set_unchecked(&rdev->corrected_errors, n);
33272 return len;
33273 }
33274 return -EINVAL;
33275 @@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
33276 rdev->sb_loaded = 0;
33277 rdev->bb_page = NULL;
33278 atomic_set(&rdev->nr_pending, 0);
33279 - atomic_set(&rdev->read_errors, 0);
33280 - atomic_set(&rdev->corrected_errors, 0);
33281 + atomic_set_unchecked(&rdev->read_errors, 0);
33282 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33283
33284 INIT_LIST_HEAD(&rdev->same_set);
33285 init_waitqueue_head(&rdev->blocked_wait);
33286 @@ -6738,7 +6738,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33287
33288 spin_unlock(&pers_lock);
33289 seq_printf(seq, "\n");
33290 - seq->poll_event = atomic_read(&md_event_count);
33291 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33292 return 0;
33293 }
33294 if (v == (void*)2) {
33295 @@ -6841,7 +6841,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33296 return error;
33297
33298 seq = file->private_data;
33299 - seq->poll_event = atomic_read(&md_event_count);
33300 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33301 return error;
33302 }
33303
33304 @@ -6855,7 +6855,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33305 /* always allow read */
33306 mask = POLLIN | POLLRDNORM;
33307
33308 - if (seq->poll_event != atomic_read(&md_event_count))
33309 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33310 mask |= POLLERR | POLLPRI;
33311 return mask;
33312 }
33313 @@ -6899,7 +6899,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33314 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33315 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33316 (int)part_stat_read(&disk->part0, sectors[1]) -
33317 - atomic_read(&disk->sync_io);
33318 + atomic_read_unchecked(&disk->sync_io);
33319 /* sync IO will cause sync_io to increase before the disk_stats
33320 * as sync_io is counted when a request starts, and
33321 * disk_stats is counted when it completes.
33322 diff --git a/drivers/md/md.h b/drivers/md/md.h
33323 index 1c2063c..9639970 100644
33324 --- a/drivers/md/md.h
33325 +++ b/drivers/md/md.h
33326 @@ -93,13 +93,13 @@ struct md_rdev {
33327 * only maintained for arrays that
33328 * support hot removal
33329 */
33330 - atomic_t read_errors; /* number of consecutive read errors that
33331 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33332 * we have tried to ignore.
33333 */
33334 struct timespec last_read_error; /* monotonic time since our
33335 * last read error
33336 */
33337 - atomic_t corrected_errors; /* number of corrected read errors,
33338 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33339 * for reporting to userspace and storing
33340 * in superblock.
33341 */
33342 @@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33343
33344 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33345 {
33346 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33347 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33348 }
33349
33350 struct md_personality
33351 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33352 index 50ed53b..4f29d7d 100644
33353 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33354 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33355 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
33356 /*----------------------------------------------------------------*/
33357
33358 struct sm_checker {
33359 - struct dm_space_map sm;
33360 + dm_space_map_no_const sm;
33361
33362 struct count_array old_counts;
33363 struct count_array counts;
33364 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33365 index fc469ba..2d91555 100644
33366 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33367 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33368 @@ -23,7 +23,7 @@
33369 * Space map interface.
33370 */
33371 struct sm_disk {
33372 - struct dm_space_map sm;
33373 + dm_space_map_no_const sm;
33374
33375 struct ll_disk ll;
33376 struct ll_disk old_ll;
33377 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33378 index e89ae5e..062e4c2 100644
33379 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33380 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33381 @@ -43,7 +43,7 @@ struct block_op {
33382 };
33383
33384 struct sm_metadata {
33385 - struct dm_space_map sm;
33386 + dm_space_map_no_const sm;
33387
33388 struct ll_disk ll;
33389 struct ll_disk old_ll;
33390 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33391 index 1cbfc6b..56e1dbb 100644
33392 --- a/drivers/md/persistent-data/dm-space-map.h
33393 +++ b/drivers/md/persistent-data/dm-space-map.h
33394 @@ -60,6 +60,7 @@ struct dm_space_map {
33395 int (*root_size)(struct dm_space_map *sm, size_t *result);
33396 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33397 };
33398 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33399
33400 /*----------------------------------------------------------------*/
33401
33402 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33403 index d7e9577..faa512f2 100644
33404 --- a/drivers/md/raid1.c
33405 +++ b/drivers/md/raid1.c
33406 @@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33407 if (r1_sync_page_io(rdev, sect, s,
33408 bio->bi_io_vec[idx].bv_page,
33409 READ) != 0)
33410 - atomic_add(s, &rdev->corrected_errors);
33411 + atomic_add_unchecked(s, &rdev->corrected_errors);
33412 }
33413 sectors -= s;
33414 sect += s;
33415 @@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33416 test_bit(In_sync, &rdev->flags)) {
33417 if (r1_sync_page_io(rdev, sect, s,
33418 conf->tmppage, READ)) {
33419 - atomic_add(s, &rdev->corrected_errors);
33420 + atomic_add_unchecked(s, &rdev->corrected_errors);
33421 printk(KERN_INFO
33422 "md/raid1:%s: read error corrected "
33423 "(%d sectors at %llu on %s)\n",
33424 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33425 index d037adb..ed17dc9 100644
33426 --- a/drivers/md/raid10.c
33427 +++ b/drivers/md/raid10.c
33428 @@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bio, int error)
33429 /* The write handler will notice the lack of
33430 * R10BIO_Uptodate and record any errors etc
33431 */
33432 - atomic_add(r10_bio->sectors,
33433 + atomic_add_unchecked(r10_bio->sectors,
33434 &conf->mirrors[d].rdev->corrected_errors);
33435
33436 /* for reconstruct, we always reschedule after a read.
33437 @@ -2033,7 +2033,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33438 {
33439 struct timespec cur_time_mon;
33440 unsigned long hours_since_last;
33441 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33442 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33443
33444 ktime_get_ts(&cur_time_mon);
33445
33446 @@ -2055,9 +2055,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33447 * overflowing the shift of read_errors by hours_since_last.
33448 */
33449 if (hours_since_last >= 8 * sizeof(read_errors))
33450 - atomic_set(&rdev->read_errors, 0);
33451 + atomic_set_unchecked(&rdev->read_errors, 0);
33452 else
33453 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33454 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33455 }
33456
33457 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33458 @@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33459 return;
33460
33461 check_decay_read_errors(mddev, rdev);
33462 - atomic_inc(&rdev->read_errors);
33463 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33464 + atomic_inc_unchecked(&rdev->read_errors);
33465 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33466 char b[BDEVNAME_SIZE];
33467 bdevname(rdev->bdev, b);
33468
33469 @@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33470 "md/raid10:%s: %s: Raid device exceeded "
33471 "read_error threshold [cur %d:max %d]\n",
33472 mdname(mddev), b,
33473 - atomic_read(&rdev->read_errors), max_read_errors);
33474 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33475 printk(KERN_NOTICE
33476 "md/raid10:%s: %s: Failing raid device\n",
33477 mdname(mddev), b);
33478 @@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33479 (unsigned long long)(
33480 sect + rdev->data_offset),
33481 bdevname(rdev->bdev, b));
33482 - atomic_add(s, &rdev->corrected_errors);
33483 + atomic_add_unchecked(s, &rdev->corrected_errors);
33484 }
33485
33486 rdev_dec_pending(rdev, mddev);
33487 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33488 index f351422..85c01bb 100644
33489 --- a/drivers/md/raid5.c
33490 +++ b/drivers/md/raid5.c
33491 @@ -1686,18 +1686,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33492 (unsigned long long)(sh->sector
33493 + rdev->data_offset),
33494 bdevname(rdev->bdev, b));
33495 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33496 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33497 clear_bit(R5_ReadError, &sh->dev[i].flags);
33498 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33499 }
33500 - if (atomic_read(&rdev->read_errors))
33501 - atomic_set(&rdev->read_errors, 0);
33502 + if (atomic_read_unchecked(&rdev->read_errors))
33503 + atomic_set_unchecked(&rdev->read_errors, 0);
33504 } else {
33505 const char *bdn = bdevname(rdev->bdev, b);
33506 int retry = 0;
33507
33508 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33509 - atomic_inc(&rdev->read_errors);
33510 + atomic_inc_unchecked(&rdev->read_errors);
33511 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33512 printk_ratelimited(
33513 KERN_WARNING
33514 @@ -1726,7 +1726,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33515 (unsigned long long)(sh->sector
33516 + rdev->data_offset),
33517 bdn);
33518 - else if (atomic_read(&rdev->read_errors)
33519 + else if (atomic_read_unchecked(&rdev->read_errors)
33520 > conf->max_nr_stripes)
33521 printk(KERN_WARNING
33522 "md/raid:%s: Too many read errors, failing device %s.\n",
33523 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33524 index d88c4aa..17c80b1 100644
33525 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33526 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33527 @@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
33528 .subvendor = _subvend, .subdevice = _subdev, \
33529 .driver_data = (unsigned long)&_driverdata }
33530
33531 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33532 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33533 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33534 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33535 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33536 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33537 index a7d876f..8c21b61 100644
33538 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33539 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33540 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33541 union {
33542 dmx_ts_cb ts;
33543 dmx_section_cb sec;
33544 - } cb;
33545 + } __no_const cb;
33546
33547 struct dvb_demux *demux;
33548 void *priv;
33549 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33550 index 00a6732..70a682e 100644
33551 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33552 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33553 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33554 const struct dvb_device *template, void *priv, int type)
33555 {
33556 struct dvb_device *dvbdev;
33557 - struct file_operations *dvbdevfops;
33558 + file_operations_no_const *dvbdevfops;
33559 struct device *clsdev;
33560 int minor;
33561 int id;
33562 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33563 index 3940bb0..fb3952a 100644
33564 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33565 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33566 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33567
33568 struct dib0700_adapter_state {
33569 int (*set_param_save) (struct dvb_frontend *);
33570 -};
33571 +} __no_const;
33572
33573 static int dib7070_set_param_override(struct dvb_frontend *fe)
33574 {
33575 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33576 index 451c5a7..649f711 100644
33577 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33578 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33579 @@ -95,7 +95,7 @@ struct su3000_state {
33580
33581 struct s6x0_state {
33582 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33583 -};
33584 +} __no_const;
33585
33586 /* debug */
33587 static int dvb_usb_dw2102_debug;
33588 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33589 index 404f63a..4796533 100644
33590 --- a/drivers/media/dvb/frontends/dib3000.h
33591 +++ b/drivers/media/dvb/frontends/dib3000.h
33592 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33593 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33594 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33595 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33596 -};
33597 +} __no_const;
33598
33599 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33600 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33601 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33602 index 7539a5d..06531a6 100644
33603 --- a/drivers/media/dvb/ngene/ngene-cards.c
33604 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33605 @@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33606
33607 /****************************************************************************/
33608
33609 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33610 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33611 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33612 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33613 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33614 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33615 index 16a089f..1661b11 100644
33616 --- a/drivers/media/radio/radio-cadet.c
33617 +++ b/drivers/media/radio/radio-cadet.c
33618 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33619 unsigned char readbuf[RDS_BUFFER];
33620 int i = 0;
33621
33622 + if (count > RDS_BUFFER)
33623 + return -EFAULT;
33624 mutex_lock(&dev->lock);
33625 if (dev->rdsstat == 0) {
33626 dev->rdsstat = 1;
33627 @@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33628 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33629 mutex_unlock(&dev->lock);
33630
33631 - if (copy_to_user(data, readbuf, i))
33632 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33633 return -EFAULT;
33634 return i;
33635 }
33636 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33637 index 9cde353..8c6a1c3 100644
33638 --- a/drivers/media/video/au0828/au0828.h
33639 +++ b/drivers/media/video/au0828/au0828.h
33640 @@ -191,7 +191,7 @@ struct au0828_dev {
33641
33642 /* I2C */
33643 struct i2c_adapter i2c_adap;
33644 - struct i2c_algorithm i2c_algo;
33645 + i2c_algorithm_no_const i2c_algo;
33646 struct i2c_client i2c_client;
33647 u32 i2c_rc;
33648
33649 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33650 index 04bf662..e0ac026 100644
33651 --- a/drivers/media/video/cx88/cx88-alsa.c
33652 +++ b/drivers/media/video/cx88/cx88-alsa.c
33653 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33654 * Only boards with eeprom and byte 1 at eeprom=1 have it
33655 */
33656
33657 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33658 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33659 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33660 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33661 {0, }
33662 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33663 index 88cf9d9..bbc4b2c 100644
33664 --- a/drivers/media/video/omap/omap_vout.c
33665 +++ b/drivers/media/video/omap/omap_vout.c
33666 @@ -64,7 +64,6 @@ enum omap_vout_channels {
33667 OMAP_VIDEO2,
33668 };
33669
33670 -static struct videobuf_queue_ops video_vbq_ops;
33671 /* Variables configurable through module params*/
33672 static u32 video1_numbuffers = 3;
33673 static u32 video2_numbuffers = 3;
33674 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33675 {
33676 struct videobuf_queue *q;
33677 struct omap_vout_device *vout = NULL;
33678 + static struct videobuf_queue_ops video_vbq_ops = {
33679 + .buf_setup = omap_vout_buffer_setup,
33680 + .buf_prepare = omap_vout_buffer_prepare,
33681 + .buf_release = omap_vout_buffer_release,
33682 + .buf_queue = omap_vout_buffer_queue,
33683 + };
33684
33685 vout = video_drvdata(file);
33686 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33687 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33688 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33689
33690 q = &vout->vbq;
33691 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33692 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33693 - video_vbq_ops.buf_release = omap_vout_buffer_release;
33694 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33695 spin_lock_init(&vout->vbq_lock);
33696
33697 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33698 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33699 index 305e6aa..0143317 100644
33700 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33701 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33702 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33703
33704 /* I2C stuff */
33705 struct i2c_adapter i2c_adap;
33706 - struct i2c_algorithm i2c_algo;
33707 + i2c_algorithm_no_const i2c_algo;
33708 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33709 int i2c_cx25840_hack_state;
33710 int i2c_linked;
33711 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33712 index 02194c0..091733b 100644
33713 --- a/drivers/media/video/timblogiw.c
33714 +++ b/drivers/media/video/timblogiw.c
33715 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33716
33717 /* Platform device functions */
33718
33719 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33720 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33721 .vidioc_querycap = timblogiw_querycap,
33722 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33723 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33724 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33725 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33726 };
33727
33728 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33729 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33730 .owner = THIS_MODULE,
33731 .open = timblogiw_open,
33732 .release = timblogiw_close,
33733 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33734 index a5c591f..db692a3 100644
33735 --- a/drivers/message/fusion/mptbase.c
33736 +++ b/drivers/message/fusion/mptbase.c
33737 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33738 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33739 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33740
33741 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33742 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33743 +#else
33744 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33745 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33746 +#endif
33747 +
33748 /*
33749 * Rounding UP to nearest 4-kB boundary here...
33750 */
33751 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33752 index 551262e..7551198 100644
33753 --- a/drivers/message/fusion/mptsas.c
33754 +++ b/drivers/message/fusion/mptsas.c
33755 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33756 return 0;
33757 }
33758
33759 +static inline void
33760 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33761 +{
33762 + if (phy_info->port_details) {
33763 + phy_info->port_details->rphy = rphy;
33764 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33765 + ioc->name, rphy));
33766 + }
33767 +
33768 + if (rphy) {
33769 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33770 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33771 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33772 + ioc->name, rphy, rphy->dev.release));
33773 + }
33774 +}
33775 +
33776 /* no mutex */
33777 static void
33778 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33779 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33780 return NULL;
33781 }
33782
33783 -static inline void
33784 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33785 -{
33786 - if (phy_info->port_details) {
33787 - phy_info->port_details->rphy = rphy;
33788 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33789 - ioc->name, rphy));
33790 - }
33791 -
33792 - if (rphy) {
33793 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33794 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33795 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33796 - ioc->name, rphy, rphy->dev.release));
33797 - }
33798 -}
33799 -
33800 static inline struct sas_port *
33801 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33802 {
33803 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33804 index 0c3ced7..1fe34ec 100644
33805 --- a/drivers/message/fusion/mptscsih.c
33806 +++ b/drivers/message/fusion/mptscsih.c
33807 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33808
33809 h = shost_priv(SChost);
33810
33811 - if (h) {
33812 - if (h->info_kbuf == NULL)
33813 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33814 - return h->info_kbuf;
33815 - h->info_kbuf[0] = '\0';
33816 + if (!h)
33817 + return NULL;
33818
33819 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33820 - h->info_kbuf[size-1] = '\0';
33821 - }
33822 + if (h->info_kbuf == NULL)
33823 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33824 + return h->info_kbuf;
33825 + h->info_kbuf[0] = '\0';
33826 +
33827 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33828 + h->info_kbuf[size-1] = '\0';
33829
33830 return h->info_kbuf;
33831 }
33832 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33833 index 6d115c7..58ff7fd 100644
33834 --- a/drivers/message/i2o/i2o_proc.c
33835 +++ b/drivers/message/i2o/i2o_proc.c
33836 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33837 "Array Controller Device"
33838 };
33839
33840 -static char *chtostr(u8 * chars, int n)
33841 -{
33842 - char tmp[256];
33843 - tmp[0] = 0;
33844 - return strncat(tmp, (char *)chars, n);
33845 -}
33846 -
33847 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33848 char *group)
33849 {
33850 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33851
33852 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33853 seq_printf(seq, "%-#8x", ddm_table.module_id);
33854 - seq_printf(seq, "%-29s",
33855 - chtostr(ddm_table.module_name_version, 28));
33856 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33857 seq_printf(seq, "%9d ", ddm_table.data_size);
33858 seq_printf(seq, "%8d", ddm_table.code_size);
33859
33860 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33861
33862 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33863 seq_printf(seq, "%-#8x", dst->module_id);
33864 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33865 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33866 + seq_printf(seq, "%-.28s", dst->module_name_version);
33867 + seq_printf(seq, "%-.8s", dst->date);
33868 seq_printf(seq, "%8d ", dst->module_size);
33869 seq_printf(seq, "%8d ", dst->mpb_size);
33870 seq_printf(seq, "0x%04x", dst->module_flags);
33871 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33872 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33873 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33874 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33875 - seq_printf(seq, "Vendor info : %s\n",
33876 - chtostr((u8 *) (work32 + 2), 16));
33877 - seq_printf(seq, "Product info : %s\n",
33878 - chtostr((u8 *) (work32 + 6), 16));
33879 - seq_printf(seq, "Description : %s\n",
33880 - chtostr((u8 *) (work32 + 10), 16));
33881 - seq_printf(seq, "Product rev. : %s\n",
33882 - chtostr((u8 *) (work32 + 14), 8));
33883 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33884 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33885 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33886 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33887
33888 seq_printf(seq, "Serial number : ");
33889 print_serial_number(seq, (u8 *) (work32 + 16),
33890 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33891 }
33892
33893 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33894 - seq_printf(seq, "Module name : %s\n",
33895 - chtostr(result.module_name, 24));
33896 - seq_printf(seq, "Module revision : %s\n",
33897 - chtostr(result.module_rev, 8));
33898 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
33899 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33900
33901 seq_printf(seq, "Serial number : ");
33902 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33903 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33904 return 0;
33905 }
33906
33907 - seq_printf(seq, "Device name : %s\n",
33908 - chtostr(result.device_name, 64));
33909 - seq_printf(seq, "Service name : %s\n",
33910 - chtostr(result.service_name, 64));
33911 - seq_printf(seq, "Physical name : %s\n",
33912 - chtostr(result.physical_location, 64));
33913 - seq_printf(seq, "Instance number : %s\n",
33914 - chtostr(result.instance_number, 4));
33915 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
33916 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
33917 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33918 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33919
33920 return 0;
33921 }
33922 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33923 index a8c08f3..155fe3d 100644
33924 --- a/drivers/message/i2o/iop.c
33925 +++ b/drivers/message/i2o/iop.c
33926 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33927
33928 spin_lock_irqsave(&c->context_list_lock, flags);
33929
33930 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33931 - atomic_inc(&c->context_list_counter);
33932 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33933 + atomic_inc_unchecked(&c->context_list_counter);
33934
33935 - entry->context = atomic_read(&c->context_list_counter);
33936 + entry->context = atomic_read_unchecked(&c->context_list_counter);
33937
33938 list_add(&entry->list, &c->context_list);
33939
33940 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
33941
33942 #if BITS_PER_LONG == 64
33943 spin_lock_init(&c->context_list_lock);
33944 - atomic_set(&c->context_list_counter, 0);
33945 + atomic_set_unchecked(&c->context_list_counter, 0);
33946 INIT_LIST_HEAD(&c->context_list);
33947 #endif
33948
33949 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
33950 index 7ce65f4..e66e9bc 100644
33951 --- a/drivers/mfd/abx500-core.c
33952 +++ b/drivers/mfd/abx500-core.c
33953 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
33954
33955 struct abx500_device_entry {
33956 struct list_head list;
33957 - struct abx500_ops ops;
33958 + abx500_ops_no_const ops;
33959 struct device *dev;
33960 };
33961
33962 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
33963 index a9223ed..4127b13 100644
33964 --- a/drivers/mfd/janz-cmodio.c
33965 +++ b/drivers/mfd/janz-cmodio.c
33966 @@ -13,6 +13,7 @@
33967
33968 #include <linux/kernel.h>
33969 #include <linux/module.h>
33970 +#include <linux/slab.h>
33971 #include <linux/init.h>
33972 #include <linux/pci.h>
33973 #include <linux/interrupt.h>
33974 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
33975 index a981e2a..5ca0c8b 100644
33976 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
33977 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
33978 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
33979 * the lid is closed. This leads to interrupts as soon as a little move
33980 * is done.
33981 */
33982 - atomic_inc(&lis3->count);
33983 + atomic_inc_unchecked(&lis3->count);
33984
33985 wake_up_interruptible(&lis3->misc_wait);
33986 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
33987 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33988 if (lis3->pm_dev)
33989 pm_runtime_get_sync(lis3->pm_dev);
33990
33991 - atomic_set(&lis3->count, 0);
33992 + atomic_set_unchecked(&lis3->count, 0);
33993 return 0;
33994 }
33995
33996 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33997 add_wait_queue(&lis3->misc_wait, &wait);
33998 while (true) {
33999 set_current_state(TASK_INTERRUPTIBLE);
34000 - data = atomic_xchg(&lis3->count, 0);
34001 + data = atomic_xchg_unchecked(&lis3->count, 0);
34002 if (data)
34003 break;
34004
34005 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34006 struct lis3lv02d, miscdev);
34007
34008 poll_wait(file, &lis3->misc_wait, wait);
34009 - if (atomic_read(&lis3->count))
34010 + if (atomic_read_unchecked(&lis3->count))
34011 return POLLIN | POLLRDNORM;
34012 return 0;
34013 }
34014 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34015 index 2b1482a..5d33616 100644
34016 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
34017 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34018 @@ -266,7 +266,7 @@ struct lis3lv02d {
34019 struct input_polled_dev *idev; /* input device */
34020 struct platform_device *pdev; /* platform device */
34021 struct regulator_bulk_data regulators[2];
34022 - atomic_t count; /* interrupt count after last read */
34023 + atomic_unchecked_t count; /* interrupt count after last read */
34024 union axis_conversion ac; /* hw -> logical axis */
34025 int mapped_btns[3];
34026
34027 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34028 index 2f30bad..c4c13d0 100644
34029 --- a/drivers/misc/sgi-gru/gruhandles.c
34030 +++ b/drivers/misc/sgi-gru/gruhandles.c
34031 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34032 unsigned long nsec;
34033
34034 nsec = CLKS2NSEC(clks);
34035 - atomic_long_inc(&mcs_op_statistics[op].count);
34036 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34037 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34038 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34039 if (mcs_op_statistics[op].max < nsec)
34040 mcs_op_statistics[op].max = nsec;
34041 }
34042 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34043 index 950dbe9..eeef0f8 100644
34044 --- a/drivers/misc/sgi-gru/gruprocfs.c
34045 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34046 @@ -32,9 +32,9 @@
34047
34048 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34049
34050 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34051 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34052 {
34053 - unsigned long val = atomic_long_read(v);
34054 + unsigned long val = atomic_long_read_unchecked(v);
34055
34056 seq_printf(s, "%16lu %s\n", val, id);
34057 }
34058 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34059
34060 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34061 for (op = 0; op < mcsop_last; op++) {
34062 - count = atomic_long_read(&mcs_op_statistics[op].count);
34063 - total = atomic_long_read(&mcs_op_statistics[op].total);
34064 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34065 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34066 max = mcs_op_statistics[op].max;
34067 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34068 count ? total / count : 0, max);
34069 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34070 index 5c3ce24..4915ccb 100644
34071 --- a/drivers/misc/sgi-gru/grutables.h
34072 +++ b/drivers/misc/sgi-gru/grutables.h
34073 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34074 * GRU statistics.
34075 */
34076 struct gru_stats_s {
34077 - atomic_long_t vdata_alloc;
34078 - atomic_long_t vdata_free;
34079 - atomic_long_t gts_alloc;
34080 - atomic_long_t gts_free;
34081 - atomic_long_t gms_alloc;
34082 - atomic_long_t gms_free;
34083 - atomic_long_t gts_double_allocate;
34084 - atomic_long_t assign_context;
34085 - atomic_long_t assign_context_failed;
34086 - atomic_long_t free_context;
34087 - atomic_long_t load_user_context;
34088 - atomic_long_t load_kernel_context;
34089 - atomic_long_t lock_kernel_context;
34090 - atomic_long_t unlock_kernel_context;
34091 - atomic_long_t steal_user_context;
34092 - atomic_long_t steal_kernel_context;
34093 - atomic_long_t steal_context_failed;
34094 - atomic_long_t nopfn;
34095 - atomic_long_t asid_new;
34096 - atomic_long_t asid_next;
34097 - atomic_long_t asid_wrap;
34098 - atomic_long_t asid_reuse;
34099 - atomic_long_t intr;
34100 - atomic_long_t intr_cbr;
34101 - atomic_long_t intr_tfh;
34102 - atomic_long_t intr_spurious;
34103 - atomic_long_t intr_mm_lock_failed;
34104 - atomic_long_t call_os;
34105 - atomic_long_t call_os_wait_queue;
34106 - atomic_long_t user_flush_tlb;
34107 - atomic_long_t user_unload_context;
34108 - atomic_long_t user_exception;
34109 - atomic_long_t set_context_option;
34110 - atomic_long_t check_context_retarget_intr;
34111 - atomic_long_t check_context_unload;
34112 - atomic_long_t tlb_dropin;
34113 - atomic_long_t tlb_preload_page;
34114 - atomic_long_t tlb_dropin_fail_no_asid;
34115 - atomic_long_t tlb_dropin_fail_upm;
34116 - atomic_long_t tlb_dropin_fail_invalid;
34117 - atomic_long_t tlb_dropin_fail_range_active;
34118 - atomic_long_t tlb_dropin_fail_idle;
34119 - atomic_long_t tlb_dropin_fail_fmm;
34120 - atomic_long_t tlb_dropin_fail_no_exception;
34121 - atomic_long_t tfh_stale_on_fault;
34122 - atomic_long_t mmu_invalidate_range;
34123 - atomic_long_t mmu_invalidate_page;
34124 - atomic_long_t flush_tlb;
34125 - atomic_long_t flush_tlb_gru;
34126 - atomic_long_t flush_tlb_gru_tgh;
34127 - atomic_long_t flush_tlb_gru_zero_asid;
34128 + atomic_long_unchecked_t vdata_alloc;
34129 + atomic_long_unchecked_t vdata_free;
34130 + atomic_long_unchecked_t gts_alloc;
34131 + atomic_long_unchecked_t gts_free;
34132 + atomic_long_unchecked_t gms_alloc;
34133 + atomic_long_unchecked_t gms_free;
34134 + atomic_long_unchecked_t gts_double_allocate;
34135 + atomic_long_unchecked_t assign_context;
34136 + atomic_long_unchecked_t assign_context_failed;
34137 + atomic_long_unchecked_t free_context;
34138 + atomic_long_unchecked_t load_user_context;
34139 + atomic_long_unchecked_t load_kernel_context;
34140 + atomic_long_unchecked_t lock_kernel_context;
34141 + atomic_long_unchecked_t unlock_kernel_context;
34142 + atomic_long_unchecked_t steal_user_context;
34143 + atomic_long_unchecked_t steal_kernel_context;
34144 + atomic_long_unchecked_t steal_context_failed;
34145 + atomic_long_unchecked_t nopfn;
34146 + atomic_long_unchecked_t asid_new;
34147 + atomic_long_unchecked_t asid_next;
34148 + atomic_long_unchecked_t asid_wrap;
34149 + atomic_long_unchecked_t asid_reuse;
34150 + atomic_long_unchecked_t intr;
34151 + atomic_long_unchecked_t intr_cbr;
34152 + atomic_long_unchecked_t intr_tfh;
34153 + atomic_long_unchecked_t intr_spurious;
34154 + atomic_long_unchecked_t intr_mm_lock_failed;
34155 + atomic_long_unchecked_t call_os;
34156 + atomic_long_unchecked_t call_os_wait_queue;
34157 + atomic_long_unchecked_t user_flush_tlb;
34158 + atomic_long_unchecked_t user_unload_context;
34159 + atomic_long_unchecked_t user_exception;
34160 + atomic_long_unchecked_t set_context_option;
34161 + atomic_long_unchecked_t check_context_retarget_intr;
34162 + atomic_long_unchecked_t check_context_unload;
34163 + atomic_long_unchecked_t tlb_dropin;
34164 + atomic_long_unchecked_t tlb_preload_page;
34165 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34166 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34167 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34168 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34169 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34170 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34171 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34172 + atomic_long_unchecked_t tfh_stale_on_fault;
34173 + atomic_long_unchecked_t mmu_invalidate_range;
34174 + atomic_long_unchecked_t mmu_invalidate_page;
34175 + atomic_long_unchecked_t flush_tlb;
34176 + atomic_long_unchecked_t flush_tlb_gru;
34177 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34178 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34179
34180 - atomic_long_t copy_gpa;
34181 - atomic_long_t read_gpa;
34182 + atomic_long_unchecked_t copy_gpa;
34183 + atomic_long_unchecked_t read_gpa;
34184
34185 - atomic_long_t mesq_receive;
34186 - atomic_long_t mesq_receive_none;
34187 - atomic_long_t mesq_send;
34188 - atomic_long_t mesq_send_failed;
34189 - atomic_long_t mesq_noop;
34190 - atomic_long_t mesq_send_unexpected_error;
34191 - atomic_long_t mesq_send_lb_overflow;
34192 - atomic_long_t mesq_send_qlimit_reached;
34193 - atomic_long_t mesq_send_amo_nacked;
34194 - atomic_long_t mesq_send_put_nacked;
34195 - atomic_long_t mesq_page_overflow;
34196 - atomic_long_t mesq_qf_locked;
34197 - atomic_long_t mesq_qf_noop_not_full;
34198 - atomic_long_t mesq_qf_switch_head_failed;
34199 - atomic_long_t mesq_qf_unexpected_error;
34200 - atomic_long_t mesq_noop_unexpected_error;
34201 - atomic_long_t mesq_noop_lb_overflow;
34202 - atomic_long_t mesq_noop_qlimit_reached;
34203 - atomic_long_t mesq_noop_amo_nacked;
34204 - atomic_long_t mesq_noop_put_nacked;
34205 - atomic_long_t mesq_noop_page_overflow;
34206 + atomic_long_unchecked_t mesq_receive;
34207 + atomic_long_unchecked_t mesq_receive_none;
34208 + atomic_long_unchecked_t mesq_send;
34209 + atomic_long_unchecked_t mesq_send_failed;
34210 + atomic_long_unchecked_t mesq_noop;
34211 + atomic_long_unchecked_t mesq_send_unexpected_error;
34212 + atomic_long_unchecked_t mesq_send_lb_overflow;
34213 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34214 + atomic_long_unchecked_t mesq_send_amo_nacked;
34215 + atomic_long_unchecked_t mesq_send_put_nacked;
34216 + atomic_long_unchecked_t mesq_page_overflow;
34217 + atomic_long_unchecked_t mesq_qf_locked;
34218 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34219 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34220 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34221 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34222 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34223 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34224 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34225 + atomic_long_unchecked_t mesq_noop_put_nacked;
34226 + atomic_long_unchecked_t mesq_noop_page_overflow;
34227
34228 };
34229
34230 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34231 tghop_invalidate, mcsop_last};
34232
34233 struct mcs_op_statistic {
34234 - atomic_long_t count;
34235 - atomic_long_t total;
34236 + atomic_long_unchecked_t count;
34237 + atomic_long_unchecked_t total;
34238 unsigned long max;
34239 };
34240
34241 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34242
34243 #define STAT(id) do { \
34244 if (gru_options & OPT_STATS) \
34245 - atomic_long_inc(&gru_stats.id); \
34246 + atomic_long_inc_unchecked(&gru_stats.id); \
34247 } while (0)
34248
34249 #ifdef CONFIG_SGI_GRU_DEBUG
34250 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34251 index c862cd4..0d176fe 100644
34252 --- a/drivers/misc/sgi-xp/xp.h
34253 +++ b/drivers/misc/sgi-xp/xp.h
34254 @@ -288,7 +288,7 @@ struct xpc_interface {
34255 xpc_notify_func, void *);
34256 void (*received) (short, int, void *);
34257 enum xp_retval (*partid_to_nasids) (short, void *);
34258 -};
34259 +} __no_const;
34260
34261 extern struct xpc_interface xpc_interface;
34262
34263 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34264 index b94d5f7..7f494c5 100644
34265 --- a/drivers/misc/sgi-xp/xpc.h
34266 +++ b/drivers/misc/sgi-xp/xpc.h
34267 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34268 void (*received_payload) (struct xpc_channel *, void *);
34269 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34270 };
34271 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34272
34273 /* struct xpc_partition act_state values (for XPC HB) */
34274
34275 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34276 /* found in xpc_main.c */
34277 extern struct device *xpc_part;
34278 extern struct device *xpc_chan;
34279 -extern struct xpc_arch_operations xpc_arch_ops;
34280 +extern xpc_arch_operations_no_const xpc_arch_ops;
34281 extern int xpc_disengage_timelimit;
34282 extern int xpc_disengage_timedout;
34283 extern int xpc_activate_IRQ_rcvd;
34284 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34285 index 8d082b4..aa749ae 100644
34286 --- a/drivers/misc/sgi-xp/xpc_main.c
34287 +++ b/drivers/misc/sgi-xp/xpc_main.c
34288 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34289 .notifier_call = xpc_system_die,
34290 };
34291
34292 -struct xpc_arch_operations xpc_arch_ops;
34293 +xpc_arch_operations_no_const xpc_arch_ops;
34294
34295 /*
34296 * Timer function to enforce the timelimit on the partition disengage.
34297 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34298 index 69ef0be..f3ef91e 100644
34299 --- a/drivers/mmc/host/sdhci-pci.c
34300 +++ b/drivers/mmc/host/sdhci-pci.c
34301 @@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34302 .probe = via_probe,
34303 };
34304
34305 -static const struct pci_device_id pci_ids[] __devinitdata = {
34306 +static const struct pci_device_id pci_ids[] __devinitconst = {
34307 {
34308 .vendor = PCI_VENDOR_ID_RICOH,
34309 .device = PCI_DEVICE_ID_RICOH_R5C822,
34310 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34311 index a4eb8b5..8c0628f 100644
34312 --- a/drivers/mtd/devices/doc2000.c
34313 +++ b/drivers/mtd/devices/doc2000.c
34314 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34315
34316 /* The ECC will not be calculated correctly if less than 512 is written */
34317 /* DBB-
34318 - if (len != 0x200 && eccbuf)
34319 + if (len != 0x200)
34320 printk(KERN_WARNING
34321 "ECC needs a full sector write (adr: %lx size %lx)\n",
34322 (long) to, (long) len);
34323 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34324 index a9e57d6..c6d8731 100644
34325 --- a/drivers/mtd/nand/denali.c
34326 +++ b/drivers/mtd/nand/denali.c
34327 @@ -26,6 +26,7 @@
34328 #include <linux/pci.h>
34329 #include <linux/mtd/mtd.h>
34330 #include <linux/module.h>
34331 +#include <linux/slab.h>
34332
34333 #include "denali.h"
34334
34335 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34336 index 51b9d6a..52af9a7 100644
34337 --- a/drivers/mtd/nftlmount.c
34338 +++ b/drivers/mtd/nftlmount.c
34339 @@ -24,6 +24,7 @@
34340 #include <asm/errno.h>
34341 #include <linux/delay.h>
34342 #include <linux/slab.h>
34343 +#include <linux/sched.h>
34344 #include <linux/mtd/mtd.h>
34345 #include <linux/mtd/nand.h>
34346 #include <linux/mtd/nftl.h>
34347 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34348 index 6762dc4..9956862 100644
34349 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34350 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34351 @@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34352 */
34353
34354 #define ATL2_PARAM(X, desc) \
34355 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34356 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34357 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34358 MODULE_PARM_DESC(X, desc);
34359 #else
34360 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34361 index 61a7670..7da6e34 100644
34362 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34363 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34364 @@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
34365
34366 int (*wait_comp)(struct bnx2x *bp,
34367 struct bnx2x_rx_mode_ramrod_params *p);
34368 -};
34369 +} __no_const;
34370
34371 /********************** Set multicast group ***********************************/
34372
34373 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34374 index 93865f8..5448741 100644
34375 --- a/drivers/net/ethernet/broadcom/tg3.h
34376 +++ b/drivers/net/ethernet/broadcom/tg3.h
34377 @@ -140,6 +140,7 @@
34378 #define CHIPREV_ID_5750_A0 0x4000
34379 #define CHIPREV_ID_5750_A1 0x4001
34380 #define CHIPREV_ID_5750_A3 0x4003
34381 +#define CHIPREV_ID_5750_C1 0x4201
34382 #define CHIPREV_ID_5750_C2 0x4202
34383 #define CHIPREV_ID_5752_A0_HW 0x5000
34384 #define CHIPREV_ID_5752_A0 0x6000
34385 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34386 index c4e8643..0979484 100644
34387 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34388 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34389 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34390 */
34391 struct l2t_skb_cb {
34392 arp_failure_handler_func arp_failure_handler;
34393 -};
34394 +} __no_const;
34395
34396 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34397
34398 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34399 index 18b106c..2b38d36 100644
34400 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34401 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34402 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34403 for (i=0; i<ETH_ALEN; i++) {
34404 tmp.addr[i] = dev->dev_addr[i];
34405 }
34406 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34407 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34408 break;
34409
34410 case DE4X5_SET_HWADDR: /* Set the hardware address */
34411 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34412 spin_lock_irqsave(&lp->lock, flags);
34413 memcpy(&statbuf, &lp->pktStats, ioc->len);
34414 spin_unlock_irqrestore(&lp->lock, flags);
34415 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34416 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34417 return -EFAULT;
34418 break;
34419 }
34420 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34421 index ed7d1dc..d426748 100644
34422 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34423 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34424 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34425 {NULL}};
34426
34427
34428 -static const char *block_name[] __devinitdata = {
34429 +static const char *block_name[] __devinitconst = {
34430 "21140 non-MII",
34431 "21140 MII PHY",
34432 "21142 Serial PHY",
34433 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34434 index 2ac6fff..2d127d0 100644
34435 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34436 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34437 @@ -236,7 +236,7 @@ struct pci_id_info {
34438 int drv_flags; /* Driver use, intended as capability flags. */
34439 };
34440
34441 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34442 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34443 { /* Sometime a Level-One switch card. */
34444 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34445 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34446 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34447 index d783f4f..97fa1b0 100644
34448 --- a/drivers/net/ethernet/dlink/sundance.c
34449 +++ b/drivers/net/ethernet/dlink/sundance.c
34450 @@ -218,7 +218,7 @@ enum {
34451 struct pci_id_info {
34452 const char *name;
34453 };
34454 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34455 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34456 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34457 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34458 {"D-Link DFE-580TX 4 port Server Adapter"},
34459 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34460 index 528a886..e6a98a3 100644
34461 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34462 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34463 @@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34464
34465 if (wrapped)
34466 newacc += 65536;
34467 - ACCESS_ONCE(*acc) = newacc;
34468 + ACCESS_ONCE_RW(*acc) = newacc;
34469 }
34470
34471 void be_parse_stats(struct be_adapter *adapter)
34472 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34473 index 16b0704..d2c07d7 100644
34474 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34475 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34476 @@ -31,6 +31,8 @@
34477 #include <linux/netdevice.h>
34478 #include <linux/phy.h>
34479 #include <linux/platform_device.h>
34480 +#include <linux/interrupt.h>
34481 +#include <linux/irqreturn.h>
34482 #include <net/ip.h>
34483
34484 #include "ftgmac100.h"
34485 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34486 index 829b109..4ae5f6a 100644
34487 --- a/drivers/net/ethernet/faraday/ftmac100.c
34488 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34489 @@ -31,6 +31,8 @@
34490 #include <linux/module.h>
34491 #include <linux/netdevice.h>
34492 #include <linux/platform_device.h>
34493 +#include <linux/interrupt.h>
34494 +#include <linux/irqreturn.h>
34495
34496 #include "ftmac100.h"
34497
34498 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34499 index 1637b98..c42f87b 100644
34500 --- a/drivers/net/ethernet/fealnx.c
34501 +++ b/drivers/net/ethernet/fealnx.c
34502 @@ -150,7 +150,7 @@ struct chip_info {
34503 int flags;
34504 };
34505
34506 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34507 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34508 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34509 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34510 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34511 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34512 index f82ecf5..7d59ecb 100644
34513 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34514 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34515 @@ -784,6 +784,7 @@ struct e1000_mac_operations {
34516 void (*config_collision_dist)(struct e1000_hw *);
34517 s32 (*read_mac_addr)(struct e1000_hw *);
34518 };
34519 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34520
34521 /*
34522 * When to use various PHY register access functions:
34523 @@ -824,6 +825,7 @@ struct e1000_phy_operations {
34524 void (*power_up)(struct e1000_hw *);
34525 void (*power_down)(struct e1000_hw *);
34526 };
34527 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34528
34529 /* Function pointers for the NVM. */
34530 struct e1000_nvm_operations {
34531 @@ -836,9 +838,10 @@ struct e1000_nvm_operations {
34532 s32 (*validate)(struct e1000_hw *);
34533 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34534 };
34535 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34536
34537 struct e1000_mac_info {
34538 - struct e1000_mac_operations ops;
34539 + e1000_mac_operations_no_const ops;
34540 u8 addr[ETH_ALEN];
34541 u8 perm_addr[ETH_ALEN];
34542
34543 @@ -879,7 +882,7 @@ struct e1000_mac_info {
34544 };
34545
34546 struct e1000_phy_info {
34547 - struct e1000_phy_operations ops;
34548 + e1000_phy_operations_no_const ops;
34549
34550 enum e1000_phy_type type;
34551
34552 @@ -913,7 +916,7 @@ struct e1000_phy_info {
34553 };
34554
34555 struct e1000_nvm_info {
34556 - struct e1000_nvm_operations ops;
34557 + e1000_nvm_operations_no_const ops;
34558
34559 enum e1000_nvm_type type;
34560 enum e1000_nvm_override override;
34561 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34562 index f67cbd3..cef9e3d 100644
34563 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34564 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34565 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34566 s32 (*read_mac_addr)(struct e1000_hw *);
34567 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34568 };
34569 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34570
34571 struct e1000_phy_operations {
34572 s32 (*acquire)(struct e1000_hw *);
34573 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34574 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34575 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34576 };
34577 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34578
34579 struct e1000_nvm_operations {
34580 s32 (*acquire)(struct e1000_hw *);
34581 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34582 s32 (*update)(struct e1000_hw *);
34583 s32 (*validate)(struct e1000_hw *);
34584 };
34585 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34586
34587 struct e1000_info {
34588 s32 (*get_invariants)(struct e1000_hw *);
34589 @@ -350,7 +353,7 @@ struct e1000_info {
34590 extern const struct e1000_info e1000_82575_info;
34591
34592 struct e1000_mac_info {
34593 - struct e1000_mac_operations ops;
34594 + e1000_mac_operations_no_const ops;
34595
34596 u8 addr[6];
34597 u8 perm_addr[6];
34598 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34599 };
34600
34601 struct e1000_phy_info {
34602 - struct e1000_phy_operations ops;
34603 + e1000_phy_operations_no_const ops;
34604
34605 enum e1000_phy_type type;
34606
34607 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34608 };
34609
34610 struct e1000_nvm_info {
34611 - struct e1000_nvm_operations ops;
34612 + e1000_nvm_operations_no_const ops;
34613 enum e1000_nvm_type type;
34614 enum e1000_nvm_override override;
34615
34616 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34617 s32 (*check_for_ack)(struct e1000_hw *, u16);
34618 s32 (*check_for_rst)(struct e1000_hw *, u16);
34619 };
34620 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34621
34622 struct e1000_mbx_stats {
34623 u32 msgs_tx;
34624 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34625 };
34626
34627 struct e1000_mbx_info {
34628 - struct e1000_mbx_operations ops;
34629 + e1000_mbx_operations_no_const ops;
34630 struct e1000_mbx_stats stats;
34631 u32 timeout;
34632 u32 usec_delay;
34633 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34634 index 57db3c6..aa825fc 100644
34635 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34636 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34637 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34638 s32 (*read_mac_addr)(struct e1000_hw *);
34639 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34640 };
34641 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34642
34643 struct e1000_mac_info {
34644 - struct e1000_mac_operations ops;
34645 + e1000_mac_operations_no_const ops;
34646 u8 addr[6];
34647 u8 perm_addr[6];
34648
34649 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34650 s32 (*check_for_ack)(struct e1000_hw *);
34651 s32 (*check_for_rst)(struct e1000_hw *);
34652 };
34653 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34654
34655 struct e1000_mbx_stats {
34656 u32 msgs_tx;
34657 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34658 };
34659
34660 struct e1000_mbx_info {
34661 - struct e1000_mbx_operations ops;
34662 + e1000_mbx_operations_no_const ops;
34663 struct e1000_mbx_stats stats;
34664 u32 timeout;
34665 u32 usec_delay;
34666 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34667 index 8636e83..ab9bbc3 100644
34668 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34669 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34670 @@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
34671 s32 (*update_checksum)(struct ixgbe_hw *);
34672 u16 (*calc_checksum)(struct ixgbe_hw *);
34673 };
34674 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34675
34676 struct ixgbe_mac_operations {
34677 s32 (*init_hw)(struct ixgbe_hw *);
34678 @@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
34679 /* Manageability interface */
34680 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34681 };
34682 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34683
34684 struct ixgbe_phy_operations {
34685 s32 (*identify)(struct ixgbe_hw *);
34686 @@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
34687 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34688 s32 (*check_overtemp)(struct ixgbe_hw *);
34689 };
34690 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34691
34692 struct ixgbe_eeprom_info {
34693 - struct ixgbe_eeprom_operations ops;
34694 + ixgbe_eeprom_operations_no_const ops;
34695 enum ixgbe_eeprom_type type;
34696 u32 semaphore_delay;
34697 u16 word_size;
34698 @@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
34699
34700 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34701 struct ixgbe_mac_info {
34702 - struct ixgbe_mac_operations ops;
34703 + ixgbe_mac_operations_no_const ops;
34704 enum ixgbe_mac_type type;
34705 u8 addr[ETH_ALEN];
34706 u8 perm_addr[ETH_ALEN];
34707 @@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
34708 };
34709
34710 struct ixgbe_phy_info {
34711 - struct ixgbe_phy_operations ops;
34712 + ixgbe_phy_operations_no_const ops;
34713 struct mdio_if_info mdio;
34714 enum ixgbe_phy_type type;
34715 u32 id;
34716 @@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
34717 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34718 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34719 };
34720 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34721
34722 struct ixgbe_mbx_stats {
34723 u32 msgs_tx;
34724 @@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
34725 };
34726
34727 struct ixgbe_mbx_info {
34728 - struct ixgbe_mbx_operations ops;
34729 + ixgbe_mbx_operations_no_const ops;
34730 struct ixgbe_mbx_stats stats;
34731 u32 timeout;
34732 u32 usec_delay;
34733 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34734 index 25c951d..cc7cf33 100644
34735 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34736 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34737 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34738 s32 (*clear_vfta)(struct ixgbe_hw *);
34739 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34740 };
34741 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34742
34743 enum ixgbe_mac_type {
34744 ixgbe_mac_unknown = 0,
34745 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34746 };
34747
34748 struct ixgbe_mac_info {
34749 - struct ixgbe_mac_operations ops;
34750 + ixgbe_mac_operations_no_const ops;
34751 u8 addr[6];
34752 u8 perm_addr[6];
34753
34754 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34755 s32 (*check_for_ack)(struct ixgbe_hw *);
34756 s32 (*check_for_rst)(struct ixgbe_hw *);
34757 };
34758 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34759
34760 struct ixgbe_mbx_stats {
34761 u32 msgs_tx;
34762 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34763 };
34764
34765 struct ixgbe_mbx_info {
34766 - struct ixgbe_mbx_operations ops;
34767 + ixgbe_mbx_operations_no_const ops;
34768 struct ixgbe_mbx_stats stats;
34769 u32 timeout;
34770 u32 udelay;
34771 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34772 index 8bb05b4..074796f 100644
34773 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
34774 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34775 @@ -41,6 +41,7 @@
34776 #include <linux/slab.h>
34777 #include <linux/io-mapping.h>
34778 #include <linux/delay.h>
34779 +#include <linux/sched.h>
34780
34781 #include <linux/mlx4/device.h>
34782 #include <linux/mlx4/doorbell.h>
34783 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34784 index 5046a64..71ca936 100644
34785 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34786 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34787 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34788 void (*link_down)(struct __vxge_hw_device *devh);
34789 void (*crit_err)(struct __vxge_hw_device *devh,
34790 enum vxge_hw_event type, u64 ext_data);
34791 -};
34792 +} __no_const;
34793
34794 /*
34795 * struct __vxge_hw_blockpool_entry - Block private data structure
34796 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34797 index 4a518a3..936b334 100644
34798 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34799 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34800 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34801 struct vxge_hw_mempool_dma *dma_object,
34802 u32 index,
34803 u32 is_last);
34804 -};
34805 +} __no_const;
34806
34807 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34808 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34809 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34810 index ce6b44d..74f10c2 100644
34811 --- a/drivers/net/ethernet/realtek/r8169.c
34812 +++ b/drivers/net/ethernet/realtek/r8169.c
34813 @@ -708,17 +708,17 @@ struct rtl8169_private {
34814 struct mdio_ops {
34815 void (*write)(void __iomem *, int, int);
34816 int (*read)(void __iomem *, int);
34817 - } mdio_ops;
34818 + } __no_const mdio_ops;
34819
34820 struct pll_power_ops {
34821 void (*down)(struct rtl8169_private *);
34822 void (*up)(struct rtl8169_private *);
34823 - } pll_power_ops;
34824 + } __no_const pll_power_ops;
34825
34826 struct jumbo_ops {
34827 void (*enable)(struct rtl8169_private *);
34828 void (*disable)(struct rtl8169_private *);
34829 - } jumbo_ops;
34830 + } __no_const jumbo_ops;
34831
34832 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34833 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34834 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34835 index a9deda8..5507c31 100644
34836 --- a/drivers/net/ethernet/sis/sis190.c
34837 +++ b/drivers/net/ethernet/sis/sis190.c
34838 @@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34839 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34840 struct net_device *dev)
34841 {
34842 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34843 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34844 struct sis190_private *tp = netdev_priv(dev);
34845 struct pci_dev *isa_bridge;
34846 u8 reg, tmp8;
34847 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34848 index c07cfe9..81cbf7e 100644
34849 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34850 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34851 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
34852
34853 writel(value, ioaddr + MMC_CNTRL);
34854
34855 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34856 - MMC_CNTRL, value);
34857 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34858 +// MMC_CNTRL, value);
34859 }
34860
34861 /* To mask all all interrupts.*/
34862 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34863 index 48d56da..a27e46c 100644
34864 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34865 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34866 @@ -1584,7 +1584,7 @@ static const struct file_operations stmmac_rings_status_fops = {
34867 .open = stmmac_sysfs_ring_open,
34868 .read = seq_read,
34869 .llseek = seq_lseek,
34870 - .release = seq_release,
34871 + .release = single_release,
34872 };
34873
34874 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
34875 @@ -1656,7 +1656,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
34876 .open = stmmac_sysfs_dma_cap_open,
34877 .read = seq_read,
34878 .llseek = seq_lseek,
34879 - .release = seq_release,
34880 + .release = single_release,
34881 };
34882
34883 static int stmmac_init_fs(struct net_device *dev)
34884 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
34885 index c358245..8c1de63 100644
34886 --- a/drivers/net/hyperv/hyperv_net.h
34887 +++ b/drivers/net/hyperv/hyperv_net.h
34888 @@ -98,7 +98,7 @@ struct rndis_device {
34889
34890 enum rndis_device_state state;
34891 bool link_state;
34892 - atomic_t new_req_id;
34893 + atomic_unchecked_t new_req_id;
34894
34895 spinlock_t request_lock;
34896 struct list_head req_list;
34897 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
34898 index d6be64b..5d97e3b 100644
34899 --- a/drivers/net/hyperv/rndis_filter.c
34900 +++ b/drivers/net/hyperv/rndis_filter.c
34901 @@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34902 * template
34903 */
34904 set = &rndis_msg->msg.set_req;
34905 - set->req_id = atomic_inc_return(&dev->new_req_id);
34906 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34907
34908 /* Add to the request list */
34909 spin_lock_irqsave(&dev->request_lock, flags);
34910 @@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34911
34912 /* Setup the rndis set */
34913 halt = &request->request_msg.msg.halt_req;
34914 - halt->req_id = atomic_inc_return(&dev->new_req_id);
34915 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34916
34917 /* Ignore return since this msg is optional. */
34918 rndis_filter_send_request(dev, request);
34919 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
34920 index cb8fd50..003ec38 100644
34921 --- a/drivers/net/macvtap.c
34922 +++ b/drivers/net/macvtap.c
34923 @@ -528,6 +528,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
34924 }
34925 base = (unsigned long)from->iov_base + offset1;
34926 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
34927 + if (i + size >= MAX_SKB_FRAGS)
34928 + return -EFAULT;
34929 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
34930 if ((num_pages != size) ||
34931 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
34932 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
34933 index 21d7151..8034208 100644
34934 --- a/drivers/net/ppp/ppp_generic.c
34935 +++ b/drivers/net/ppp/ppp_generic.c
34936 @@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34937 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34938 struct ppp_stats stats;
34939 struct ppp_comp_stats cstats;
34940 - char *vers;
34941
34942 switch (cmd) {
34943 case SIOCGPPPSTATS:
34944 @@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34945 break;
34946
34947 case SIOCGPPPVER:
34948 - vers = PPP_VERSION;
34949 - if (copy_to_user(addr, vers, strlen(vers) + 1))
34950 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34951 break;
34952 err = 0;
34953 break;
34954 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34955 index b715e6b..6d2490f 100644
34956 --- a/drivers/net/tokenring/abyss.c
34957 +++ b/drivers/net/tokenring/abyss.c
34958 @@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
34959
34960 static int __init abyss_init (void)
34961 {
34962 - abyss_netdev_ops = tms380tr_netdev_ops;
34963 + pax_open_kernel();
34964 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34965
34966 - abyss_netdev_ops.ndo_open = abyss_open;
34967 - abyss_netdev_ops.ndo_stop = abyss_close;
34968 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34969 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34970 + pax_close_kernel();
34971
34972 return pci_register_driver(&abyss_driver);
34973 }
34974 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34975 index 28adcdf..ae82f35 100644
34976 --- a/drivers/net/tokenring/madgemc.c
34977 +++ b/drivers/net/tokenring/madgemc.c
34978 @@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
34979
34980 static int __init madgemc_init (void)
34981 {
34982 - madgemc_netdev_ops = tms380tr_netdev_ops;
34983 - madgemc_netdev_ops.ndo_open = madgemc_open;
34984 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34985 + pax_open_kernel();
34986 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34987 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34988 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34989 + pax_close_kernel();
34990
34991 return mca_register_driver (&madgemc_driver);
34992 }
34993 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34994 index 62d90e4..9d84237 100644
34995 --- a/drivers/net/tokenring/proteon.c
34996 +++ b/drivers/net/tokenring/proteon.c
34997 @@ -352,9 +352,11 @@ static int __init proteon_init(void)
34998 struct platform_device *pdev;
34999 int i, num = 0, err = 0;
35000
35001 - proteon_netdev_ops = tms380tr_netdev_ops;
35002 - proteon_netdev_ops.ndo_open = proteon_open;
35003 - proteon_netdev_ops.ndo_stop = tms380tr_close;
35004 + pax_open_kernel();
35005 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35006 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35007 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35008 + pax_close_kernel();
35009
35010 err = platform_driver_register(&proteon_driver);
35011 if (err)
35012 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35013 index ee11e93..c8f19c7 100644
35014 --- a/drivers/net/tokenring/skisa.c
35015 +++ b/drivers/net/tokenring/skisa.c
35016 @@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
35017 struct platform_device *pdev;
35018 int i, num = 0, err = 0;
35019
35020 - sk_isa_netdev_ops = tms380tr_netdev_ops;
35021 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
35022 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35023 + pax_open_kernel();
35024 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35025 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35026 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35027 + pax_close_kernel();
35028
35029 err = platform_driver_register(&sk_isa_driver);
35030 if (err)
35031 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35032 index 2d2a688..35f2372 100644
35033 --- a/drivers/net/usb/hso.c
35034 +++ b/drivers/net/usb/hso.c
35035 @@ -71,7 +71,7 @@
35036 #include <asm/byteorder.h>
35037 #include <linux/serial_core.h>
35038 #include <linux/serial.h>
35039 -
35040 +#include <asm/local.h>
35041
35042 #define MOD_AUTHOR "Option Wireless"
35043 #define MOD_DESCRIPTION "USB High Speed Option driver"
35044 @@ -257,7 +257,7 @@ struct hso_serial {
35045
35046 /* from usb_serial_port */
35047 struct tty_struct *tty;
35048 - int open_count;
35049 + local_t open_count;
35050 spinlock_t serial_lock;
35051
35052 int (*write_data) (struct hso_serial *serial);
35053 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35054 struct urb *urb;
35055
35056 urb = serial->rx_urb[0];
35057 - if (serial->open_count > 0) {
35058 + if (local_read(&serial->open_count) > 0) {
35059 count = put_rxbuf_data(urb, serial);
35060 if (count == -1)
35061 return;
35062 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35063 DUMP1(urb->transfer_buffer, urb->actual_length);
35064
35065 /* Anyone listening? */
35066 - if (serial->open_count == 0)
35067 + if (local_read(&serial->open_count) == 0)
35068 return;
35069
35070 if (status == 0) {
35071 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35072 spin_unlock_irq(&serial->serial_lock);
35073
35074 /* check for port already opened, if not set the termios */
35075 - serial->open_count++;
35076 - if (serial->open_count == 1) {
35077 + if (local_inc_return(&serial->open_count) == 1) {
35078 serial->rx_state = RX_IDLE;
35079 /* Force default termio settings */
35080 _hso_serial_set_termios(tty, NULL);
35081 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35082 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35083 if (result) {
35084 hso_stop_serial_device(serial->parent);
35085 - serial->open_count--;
35086 + local_dec(&serial->open_count);
35087 kref_put(&serial->parent->ref, hso_serial_ref_free);
35088 }
35089 } else {
35090 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35091
35092 /* reset the rts and dtr */
35093 /* do the actual close */
35094 - serial->open_count--;
35095 + local_dec(&serial->open_count);
35096
35097 - if (serial->open_count <= 0) {
35098 - serial->open_count = 0;
35099 + if (local_read(&serial->open_count) <= 0) {
35100 + local_set(&serial->open_count, 0);
35101 spin_lock_irq(&serial->serial_lock);
35102 if (serial->tty == tty) {
35103 serial->tty->driver_data = NULL;
35104 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35105
35106 /* the actual setup */
35107 spin_lock_irqsave(&serial->serial_lock, flags);
35108 - if (serial->open_count)
35109 + if (local_read(&serial->open_count))
35110 _hso_serial_set_termios(tty, old);
35111 else
35112 tty->termios = old;
35113 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35114 D1("Pending read interrupt on port %d\n", i);
35115 spin_lock(&serial->serial_lock);
35116 if (serial->rx_state == RX_IDLE &&
35117 - serial->open_count > 0) {
35118 + local_read(&serial->open_count) > 0) {
35119 /* Setup and send a ctrl req read on
35120 * port i */
35121 if (!serial->rx_urb_filled[0]) {
35122 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35123 /* Start all serial ports */
35124 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35125 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35126 - if (dev2ser(serial_table[i])->open_count) {
35127 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35128 result =
35129 hso_start_serial_device(serial_table[i], GFP_NOIO);
35130 hso_kick_transmit(dev2ser(serial_table[i]));
35131 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35132 index c54b7d37..af1f359 100644
35133 --- a/drivers/net/wireless/ath/ath.h
35134 +++ b/drivers/net/wireless/ath/ath.h
35135 @@ -119,6 +119,7 @@ struct ath_ops {
35136 void (*write_flush) (void *);
35137 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35138 };
35139 +typedef struct ath_ops __no_const ath_ops_no_const;
35140
35141 struct ath_common;
35142 struct ath_bus_ops;
35143 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35144 index aa2abaf..5f5152d 100644
35145 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35146 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35147 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35148 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35149 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35150
35151 - ACCESS_ONCE(ads->ds_link) = i->link;
35152 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35153 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35154 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35155
35156 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35157 ctl6 = SM(i->keytype, AR_EncrType);
35158 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35159
35160 if ((i->is_first || i->is_last) &&
35161 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35162 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35163 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35164 | set11nTries(i->rates, 1)
35165 | set11nTries(i->rates, 2)
35166 | set11nTries(i->rates, 3)
35167 | (i->dur_update ? AR_DurUpdateEna : 0)
35168 | SM(0, AR_BurstDur);
35169
35170 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35171 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35172 | set11nRate(i->rates, 1)
35173 | set11nRate(i->rates, 2)
35174 | set11nRate(i->rates, 3);
35175 } else {
35176 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35177 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35178 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35179 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35180 }
35181
35182 if (!i->is_first) {
35183 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35184 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35185 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35186 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35187 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35188 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35189 return;
35190 }
35191
35192 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35193 break;
35194 }
35195
35196 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35197 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35198 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35199 | SM(i->txpower, AR_XmitPower)
35200 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35201 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35202 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35203 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35204
35205 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35206 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35207 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35208 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35209
35210 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35211 return;
35212
35213 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35214 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35215 | set11nPktDurRTSCTS(i->rates, 1);
35216
35217 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35218 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35219 | set11nPktDurRTSCTS(i->rates, 3);
35220
35221 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35222 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35223 | set11nRateFlags(i->rates, 1)
35224 | set11nRateFlags(i->rates, 2)
35225 | set11nRateFlags(i->rates, 3)
35226 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35227 index a66a13b..0ef399e 100644
35228 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35229 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35230 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35231 (i->qcu << AR_TxQcuNum_S) | desc_len;
35232
35233 checksum += val;
35234 - ACCESS_ONCE(ads->info) = val;
35235 + ACCESS_ONCE_RW(ads->info) = val;
35236
35237 checksum += i->link;
35238 - ACCESS_ONCE(ads->link) = i->link;
35239 + ACCESS_ONCE_RW(ads->link) = i->link;
35240
35241 checksum += i->buf_addr[0];
35242 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35243 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35244 checksum += i->buf_addr[1];
35245 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35246 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35247 checksum += i->buf_addr[2];
35248 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35249 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35250 checksum += i->buf_addr[3];
35251 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35252 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35253
35254 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35255 - ACCESS_ONCE(ads->ctl3) = val;
35256 + ACCESS_ONCE_RW(ads->ctl3) = val;
35257 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35258 - ACCESS_ONCE(ads->ctl5) = val;
35259 + ACCESS_ONCE_RW(ads->ctl5) = val;
35260 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35261 - ACCESS_ONCE(ads->ctl7) = val;
35262 + ACCESS_ONCE_RW(ads->ctl7) = val;
35263 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35264 - ACCESS_ONCE(ads->ctl9) = val;
35265 + ACCESS_ONCE_RW(ads->ctl9) = val;
35266
35267 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35268 - ACCESS_ONCE(ads->ctl10) = checksum;
35269 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
35270
35271 if (i->is_first || i->is_last) {
35272 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35273 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35274 | set11nTries(i->rates, 1)
35275 | set11nTries(i->rates, 2)
35276 | set11nTries(i->rates, 3)
35277 | (i->dur_update ? AR_DurUpdateEna : 0)
35278 | SM(0, AR_BurstDur);
35279
35280 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35281 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35282 | set11nRate(i->rates, 1)
35283 | set11nRate(i->rates, 2)
35284 | set11nRate(i->rates, 3);
35285 } else {
35286 - ACCESS_ONCE(ads->ctl13) = 0;
35287 - ACCESS_ONCE(ads->ctl14) = 0;
35288 + ACCESS_ONCE_RW(ads->ctl13) = 0;
35289 + ACCESS_ONCE_RW(ads->ctl14) = 0;
35290 }
35291
35292 ads->ctl20 = 0;
35293 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35294
35295 ctl17 = SM(i->keytype, AR_EncrType);
35296 if (!i->is_first) {
35297 - ACCESS_ONCE(ads->ctl11) = 0;
35298 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35299 - ACCESS_ONCE(ads->ctl15) = 0;
35300 - ACCESS_ONCE(ads->ctl16) = 0;
35301 - ACCESS_ONCE(ads->ctl17) = ctl17;
35302 - ACCESS_ONCE(ads->ctl18) = 0;
35303 - ACCESS_ONCE(ads->ctl19) = 0;
35304 + ACCESS_ONCE_RW(ads->ctl11) = 0;
35305 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35306 + ACCESS_ONCE_RW(ads->ctl15) = 0;
35307 + ACCESS_ONCE_RW(ads->ctl16) = 0;
35308 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35309 + ACCESS_ONCE_RW(ads->ctl18) = 0;
35310 + ACCESS_ONCE_RW(ads->ctl19) = 0;
35311 return;
35312 }
35313
35314 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35315 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35316 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35317 | SM(i->txpower, AR_XmitPower)
35318 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35319 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35320 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35321 ctl12 |= SM(val, AR_PAPRDChainMask);
35322
35323 - ACCESS_ONCE(ads->ctl12) = ctl12;
35324 - ACCESS_ONCE(ads->ctl17) = ctl17;
35325 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35326 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35327
35328 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35329 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35330 | set11nPktDurRTSCTS(i->rates, 1);
35331
35332 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35333 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35334 | set11nPktDurRTSCTS(i->rates, 3);
35335
35336 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35337 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35338 | set11nRateFlags(i->rates, 1)
35339 | set11nRateFlags(i->rates, 2)
35340 | set11nRateFlags(i->rates, 3)
35341 | SM(i->rtscts_rate, AR_RTSCTSRate);
35342
35343 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35344 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35345 }
35346
35347 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35348 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35349 index e88f182..4e57f5d 100644
35350 --- a/drivers/net/wireless/ath/ath9k/hw.h
35351 +++ b/drivers/net/wireless/ath/ath9k/hw.h
35352 @@ -614,7 +614,7 @@ struct ath_hw_private_ops {
35353
35354 /* ANI */
35355 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35356 -};
35357 +} __no_const;
35358
35359 /**
35360 * struct ath_hw_ops - callbacks used by hardware code and driver code
35361 @@ -644,7 +644,7 @@ struct ath_hw_ops {
35362 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35363 struct ath_hw_antcomb_conf *antconf);
35364
35365 -};
35366 +} __no_const;
35367
35368 struct ath_nf_limits {
35369 s16 max;
35370 @@ -664,7 +664,7 @@ enum ath_cal_list {
35371 #define AH_FASTCC 0x4
35372
35373 struct ath_hw {
35374 - struct ath_ops reg_ops;
35375 + ath_ops_no_const reg_ops;
35376
35377 struct ieee80211_hw *hw;
35378 struct ath_common common;
35379 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35380 index af00e2c..ab04d34 100644
35381 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35382 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35383 @@ -545,7 +545,7 @@ struct phy_func_ptr {
35384 void (*carrsuppr)(struct brcms_phy *);
35385 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35386 void (*detach)(struct brcms_phy *);
35387 -};
35388 +} __no_const;
35389
35390 struct brcms_phy {
35391 struct brcms_phy_pub pubpi_ro;
35392 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35393 index faec404..a5277f1 100644
35394 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
35395 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35396 @@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35397 */
35398 if (il3945_mod_params.disable_hw_scan) {
35399 D_INFO("Disabling hw_scan\n");
35400 - il3945_mac_ops.hw_scan = NULL;
35401 + pax_open_kernel();
35402 + *(void **)&il3945_mac_ops.hw_scan = NULL;
35403 + pax_close_kernel();
35404 }
35405
35406 D_INFO("*** LOAD DRIVER ***\n");
35407 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35408 index b7ce6a6..5649756 100644
35409 --- a/drivers/net/wireless/mac80211_hwsim.c
35410 +++ b/drivers/net/wireless/mac80211_hwsim.c
35411 @@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
35412 return -EINVAL;
35413
35414 if (fake_hw_scan) {
35415 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35416 - mac80211_hwsim_ops.sw_scan_start = NULL;
35417 - mac80211_hwsim_ops.sw_scan_complete = NULL;
35418 + pax_open_kernel();
35419 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35420 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35421 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35422 + pax_close_kernel();
35423 }
35424
35425 spin_lock_init(&hwsim_radio_lock);
35426 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35427 index 35225e9..95e6bf9 100644
35428 --- a/drivers/net/wireless/mwifiex/main.h
35429 +++ b/drivers/net/wireless/mwifiex/main.h
35430 @@ -537,7 +537,7 @@ struct mwifiex_if_ops {
35431 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35432 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35433 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35434 -};
35435 +} __no_const;
35436
35437 struct mwifiex_adapter {
35438 u8 iface_type;
35439 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35440 index d66e298..55b0a89 100644
35441 --- a/drivers/net/wireless/rndis_wlan.c
35442 +++ b/drivers/net/wireless/rndis_wlan.c
35443 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35444
35445 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35446
35447 - if (rts_threshold < 0 || rts_threshold > 2347)
35448 + if (rts_threshold > 2347)
35449 rts_threshold = 2347;
35450
35451 tmp = cpu_to_le32(rts_threshold);
35452 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35453 index 9d8f581..0f6589e 100644
35454 --- a/drivers/net/wireless/wl1251/wl1251.h
35455 +++ b/drivers/net/wireless/wl1251/wl1251.h
35456 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
35457 void (*reset)(struct wl1251 *wl);
35458 void (*enable_irq)(struct wl1251 *wl);
35459 void (*disable_irq)(struct wl1251 *wl);
35460 -};
35461 +} __no_const;
35462
35463 struct wl1251 {
35464 struct ieee80211_hw *hw;
35465 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35466 index f34b5b2..b5abb9f 100644
35467 --- a/drivers/oprofile/buffer_sync.c
35468 +++ b/drivers/oprofile/buffer_sync.c
35469 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35470 if (cookie == NO_COOKIE)
35471 offset = pc;
35472 if (cookie == INVALID_COOKIE) {
35473 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35474 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35475 offset = pc;
35476 }
35477 if (cookie != last_cookie) {
35478 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35479 /* add userspace sample */
35480
35481 if (!mm) {
35482 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35483 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35484 return 0;
35485 }
35486
35487 cookie = lookup_dcookie(mm, s->eip, &offset);
35488
35489 if (cookie == INVALID_COOKIE) {
35490 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35491 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35492 return 0;
35493 }
35494
35495 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35496 /* ignore backtraces if failed to add a sample */
35497 if (state == sb_bt_start) {
35498 state = sb_bt_ignore;
35499 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35500 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35501 }
35502 }
35503 release_mm(mm);
35504 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35505 index c0cc4e7..44d4e54 100644
35506 --- a/drivers/oprofile/event_buffer.c
35507 +++ b/drivers/oprofile/event_buffer.c
35508 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35509 }
35510
35511 if (buffer_pos == buffer_size) {
35512 - atomic_inc(&oprofile_stats.event_lost_overflow);
35513 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35514 return;
35515 }
35516
35517 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35518 index ed2c3ec..deda85a 100644
35519 --- a/drivers/oprofile/oprof.c
35520 +++ b/drivers/oprofile/oprof.c
35521 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35522 if (oprofile_ops.switch_events())
35523 return;
35524
35525 - atomic_inc(&oprofile_stats.multiplex_counter);
35526 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35527 start_switch_worker();
35528 }
35529
35530 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35531 index 917d28e..d62d981 100644
35532 --- a/drivers/oprofile/oprofile_stats.c
35533 +++ b/drivers/oprofile/oprofile_stats.c
35534 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35535 cpu_buf->sample_invalid_eip = 0;
35536 }
35537
35538 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35539 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35540 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35541 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35542 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35543 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35544 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35545 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35546 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35547 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35548 }
35549
35550
35551 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35552 index 38b6fc0..b5cbfce 100644
35553 --- a/drivers/oprofile/oprofile_stats.h
35554 +++ b/drivers/oprofile/oprofile_stats.h
35555 @@ -13,11 +13,11 @@
35556 #include <linux/atomic.h>
35557
35558 struct oprofile_stat_struct {
35559 - atomic_t sample_lost_no_mm;
35560 - atomic_t sample_lost_no_mapping;
35561 - atomic_t bt_lost_no_mapping;
35562 - atomic_t event_lost_overflow;
35563 - atomic_t multiplex_counter;
35564 + atomic_unchecked_t sample_lost_no_mm;
35565 + atomic_unchecked_t sample_lost_no_mapping;
35566 + atomic_unchecked_t bt_lost_no_mapping;
35567 + atomic_unchecked_t event_lost_overflow;
35568 + atomic_unchecked_t multiplex_counter;
35569 };
35570
35571 extern struct oprofile_stat_struct oprofile_stats;
35572 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35573 index 849357c..b83c1e0 100644
35574 --- a/drivers/oprofile/oprofilefs.c
35575 +++ b/drivers/oprofile/oprofilefs.c
35576 @@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
35577
35578
35579 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35580 - char const *name, atomic_t *val)
35581 + char const *name, atomic_unchecked_t *val)
35582 {
35583 return __oprofilefs_create_file(sb, root, name,
35584 &atomic_ro_fops, 0444, val);
35585 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35586 index 3f56bc0..707d642 100644
35587 --- a/drivers/parport/procfs.c
35588 +++ b/drivers/parport/procfs.c
35589 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35590
35591 *ppos += len;
35592
35593 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35594 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35595 }
35596
35597 #ifdef CONFIG_PARPORT_1284
35598 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35599
35600 *ppos += len;
35601
35602 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35603 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35604 }
35605 #endif /* IEEE1284.3 support. */
35606
35607 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35608 index 9fff878..ad0ad53 100644
35609 --- a/drivers/pci/hotplug/cpci_hotplug.h
35610 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35611 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35612 int (*hardware_test) (struct slot* slot, u32 value);
35613 u8 (*get_power) (struct slot* slot);
35614 int (*set_power) (struct slot* slot, int value);
35615 -};
35616 +} __no_const;
35617
35618 struct cpci_hp_controller {
35619 unsigned int irq;
35620 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35621 index 76ba8a1..20ca857 100644
35622 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35623 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35624 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35625
35626 void compaq_nvram_init (void __iomem *rom_start)
35627 {
35628 +
35629 +#ifndef CONFIG_PAX_KERNEXEC
35630 if (rom_start) {
35631 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35632 }
35633 +#endif
35634 +
35635 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35636
35637 /* initialize our int15 lock */
35638 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35639 index b500840..d7159d3 100644
35640 --- a/drivers/pci/pcie/aspm.c
35641 +++ b/drivers/pci/pcie/aspm.c
35642 @@ -27,9 +27,9 @@
35643 #define MODULE_PARAM_PREFIX "pcie_aspm."
35644
35645 /* Note: those are not register definitions */
35646 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35647 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35648 -#define ASPM_STATE_L1 (4) /* L1 state */
35649 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35650 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35651 +#define ASPM_STATE_L1 (4U) /* L1 state */
35652 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35653 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35654
35655 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35656 index 5e1ca3c..08082fe 100644
35657 --- a/drivers/pci/probe.c
35658 +++ b/drivers/pci/probe.c
35659 @@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35660 u16 orig_cmd;
35661 struct pci_bus_region region;
35662
35663 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35664 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35665
35666 if (!dev->mmio_always_on) {
35667 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35668 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35669 index 27911b5..5b6db88 100644
35670 --- a/drivers/pci/proc.c
35671 +++ b/drivers/pci/proc.c
35672 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35673 static int __init pci_proc_init(void)
35674 {
35675 struct pci_dev *dev = NULL;
35676 +
35677 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35678 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35679 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35680 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35681 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35682 +#endif
35683 +#else
35684 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35685 +#endif
35686 proc_create("devices", 0, proc_bus_pci_dir,
35687 &proc_bus_pci_dev_operations);
35688 proc_initialized = 1;
35689 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35690 index d68c000..f6094ca 100644
35691 --- a/drivers/platform/x86/thinkpad_acpi.c
35692 +++ b/drivers/platform/x86/thinkpad_acpi.c
35693 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35694 return 0;
35695 }
35696
35697 -void static hotkey_mask_warn_incomplete_mask(void)
35698 +static void hotkey_mask_warn_incomplete_mask(void)
35699 {
35700 /* log only what the user can fix... */
35701 const u32 wantedmask = hotkey_driver_mask &
35702 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35703 }
35704 }
35705
35706 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35707 - struct tp_nvram_state *newn,
35708 - const u32 event_mask)
35709 -{
35710 -
35711 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35712 do { \
35713 if ((event_mask & (1 << __scancode)) && \
35714 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35715 tpacpi_hotkey_send_key(__scancode); \
35716 } while (0)
35717
35718 - void issue_volchange(const unsigned int oldvol,
35719 - const unsigned int newvol)
35720 - {
35721 - unsigned int i = oldvol;
35722 +static void issue_volchange(const unsigned int oldvol,
35723 + const unsigned int newvol,
35724 + const u32 event_mask)
35725 +{
35726 + unsigned int i = oldvol;
35727
35728 - while (i > newvol) {
35729 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35730 - i--;
35731 - }
35732 - while (i < newvol) {
35733 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35734 - i++;
35735 - }
35736 + while (i > newvol) {
35737 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35738 + i--;
35739 }
35740 + while (i < newvol) {
35741 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35742 + i++;
35743 + }
35744 +}
35745
35746 - void issue_brightnesschange(const unsigned int oldbrt,
35747 - const unsigned int newbrt)
35748 - {
35749 - unsigned int i = oldbrt;
35750 +static void issue_brightnesschange(const unsigned int oldbrt,
35751 + const unsigned int newbrt,
35752 + const u32 event_mask)
35753 +{
35754 + unsigned int i = oldbrt;
35755
35756 - while (i > newbrt) {
35757 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35758 - i--;
35759 - }
35760 - while (i < newbrt) {
35761 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35762 - i++;
35763 - }
35764 + while (i > newbrt) {
35765 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35766 + i--;
35767 + }
35768 + while (i < newbrt) {
35769 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35770 + i++;
35771 }
35772 +}
35773
35774 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35775 + struct tp_nvram_state *newn,
35776 + const u32 event_mask)
35777 +{
35778 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35779 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35780 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35781 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35782 oldn->volume_level != newn->volume_level) {
35783 /* recently muted, or repeated mute keypress, or
35784 * multiple presses ending in mute */
35785 - issue_volchange(oldn->volume_level, newn->volume_level);
35786 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35787 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35788 }
35789 } else {
35790 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35791 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35792 }
35793 if (oldn->volume_level != newn->volume_level) {
35794 - issue_volchange(oldn->volume_level, newn->volume_level);
35795 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35796 } else if (oldn->volume_toggle != newn->volume_toggle) {
35797 /* repeated vol up/down keypress at end of scale ? */
35798 if (newn->volume_level == 0)
35799 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35800 /* handle brightness */
35801 if (oldn->brightness_level != newn->brightness_level) {
35802 issue_brightnesschange(oldn->brightness_level,
35803 - newn->brightness_level);
35804 + newn->brightness_level,
35805 + event_mask);
35806 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35807 /* repeated key presses that didn't change state */
35808 if (newn->brightness_level == 0)
35809 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35810 && !tp_features.bright_unkfw)
35811 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35812 }
35813 +}
35814
35815 #undef TPACPI_COMPARE_KEY
35816 #undef TPACPI_MAY_SEND_KEY
35817 -}
35818
35819 /*
35820 * Polling driver
35821 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35822 index 769d265..a3a05ca 100644
35823 --- a/drivers/pnp/pnpbios/bioscalls.c
35824 +++ b/drivers/pnp/pnpbios/bioscalls.c
35825 @@ -58,7 +58,7 @@ do { \
35826 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35827 } while(0)
35828
35829 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35830 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35831 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35832
35833 /*
35834 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35835
35836 cpu = get_cpu();
35837 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35838 +
35839 + pax_open_kernel();
35840 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35841 + pax_close_kernel();
35842
35843 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35844 spin_lock_irqsave(&pnp_bios_lock, flags);
35845 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35846 :"memory");
35847 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35848
35849 + pax_open_kernel();
35850 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35851 + pax_close_kernel();
35852 +
35853 put_cpu();
35854
35855 /* If we get here and this is set then the PnP BIOS faulted on us. */
35856 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35857 return status;
35858 }
35859
35860 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35861 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35862 {
35863 int i;
35864
35865 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35866 pnp_bios_callpoint.offset = header->fields.pm16offset;
35867 pnp_bios_callpoint.segment = PNP_CS16;
35868
35869 + pax_open_kernel();
35870 +
35871 for_each_possible_cpu(i) {
35872 struct desc_struct *gdt = get_cpu_gdt_table(i);
35873 if (!gdt)
35874 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35875 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35876 (unsigned long)__va(header->fields.pm16dseg));
35877 }
35878 +
35879 + pax_close_kernel();
35880 }
35881 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35882 index b0ecacb..7c9da2e 100644
35883 --- a/drivers/pnp/resource.c
35884 +++ b/drivers/pnp/resource.c
35885 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35886 return 1;
35887
35888 /* check if the resource is valid */
35889 - if (*irq < 0 || *irq > 15)
35890 + if (*irq > 15)
35891 return 0;
35892
35893 /* check if the resource is reserved */
35894 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35895 return 1;
35896
35897 /* check if the resource is valid */
35898 - if (*dma < 0 || *dma == 4 || *dma > 7)
35899 + if (*dma == 4 || *dma > 7)
35900 return 0;
35901
35902 /* check if the resource is reserved */
35903 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35904 index 222ccd8..6275fa5 100644
35905 --- a/drivers/power/bq27x00_battery.c
35906 +++ b/drivers/power/bq27x00_battery.c
35907 @@ -72,7 +72,7 @@
35908 struct bq27x00_device_info;
35909 struct bq27x00_access_methods {
35910 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35911 -};
35912 +} __no_const;
35913
35914 enum bq27x00_chip { BQ27000, BQ27500 };
35915
35916 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35917 index 4c5b053..104263e 100644
35918 --- a/drivers/regulator/max8660.c
35919 +++ b/drivers/regulator/max8660.c
35920 @@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35921 max8660->shadow_regs[MAX8660_OVER1] = 5;
35922 } else {
35923 /* Otherwise devices can be toggled via software */
35924 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35925 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35926 + pax_open_kernel();
35927 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35928 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35929 + pax_close_kernel();
35930 }
35931
35932 /*
35933 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35934 index 845aa22..99ec402 100644
35935 --- a/drivers/regulator/mc13892-regulator.c
35936 +++ b/drivers/regulator/mc13892-regulator.c
35937 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35938 }
35939 mc13xxx_unlock(mc13892);
35940
35941 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35942 + pax_open_kernel();
35943 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35944 = mc13892_vcam_set_mode;
35945 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35946 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35947 = mc13892_vcam_get_mode;
35948 + pax_close_kernel();
35949
35950 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
35951 ARRAY_SIZE(mc13892_regulators));
35952 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35953 index cace6d3..f623fda 100644
35954 --- a/drivers/rtc/rtc-dev.c
35955 +++ b/drivers/rtc/rtc-dev.c
35956 @@ -14,6 +14,7 @@
35957 #include <linux/module.h>
35958 #include <linux/rtc.h>
35959 #include <linux/sched.h>
35960 +#include <linux/grsecurity.h>
35961 #include "rtc-core.h"
35962
35963 static dev_t rtc_devt;
35964 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35965 if (copy_from_user(&tm, uarg, sizeof(tm)))
35966 return -EFAULT;
35967
35968 + gr_log_timechange();
35969 +
35970 return rtc_set_time(rtc, &tm);
35971
35972 case RTC_PIE_ON:
35973 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35974 index 3fcf627..f334910 100644
35975 --- a/drivers/scsi/aacraid/aacraid.h
35976 +++ b/drivers/scsi/aacraid/aacraid.h
35977 @@ -492,7 +492,7 @@ struct adapter_ops
35978 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35979 /* Administrative operations */
35980 int (*adapter_comm)(struct aac_dev * dev, int comm);
35981 -};
35982 +} __no_const;
35983
35984 /*
35985 * Define which interrupt handler needs to be installed
35986 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35987 index 0d279c44..3d25a97 100644
35988 --- a/drivers/scsi/aacraid/linit.c
35989 +++ b/drivers/scsi/aacraid/linit.c
35990 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35991 #elif defined(__devinitconst)
35992 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35993 #else
35994 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35995 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35996 #endif
35997 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35998 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35999 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36000 index ff80552..1c4120c 100644
36001 --- a/drivers/scsi/aic94xx/aic94xx_init.c
36002 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
36003 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36004 .lldd_ata_set_dmamode = asd_set_dmamode,
36005 };
36006
36007 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36008 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36009 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36010 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36011 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36012 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36013 index 4ad7e36..d004679 100644
36014 --- a/drivers/scsi/bfa/bfa.h
36015 +++ b/drivers/scsi/bfa/bfa.h
36016 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
36017 u32 *end);
36018 int cpe_vec_q0;
36019 int rme_vec_q0;
36020 -};
36021 +} __no_const;
36022 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36023
36024 struct bfa_faa_cbfn_s {
36025 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36026 index f0f80e2..8ec946b 100644
36027 --- a/drivers/scsi/bfa/bfa_fcpim.c
36028 +++ b/drivers/scsi/bfa/bfa_fcpim.c
36029 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36030
36031 bfa_iotag_attach(fcp);
36032
36033 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36034 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36035 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36036 (fcp->num_itns * sizeof(struct bfa_itn_s));
36037 memset(fcp->itn_arr, 0,
36038 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36039 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36040 {
36041 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36042 - struct bfa_itn_s *itn;
36043 + bfa_itn_s_no_const *itn;
36044
36045 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36046 itn->isr = isr;
36047 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36048 index 36f26da..38a34a8 100644
36049 --- a/drivers/scsi/bfa/bfa_fcpim.h
36050 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36051 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36052 struct bfa_itn_s {
36053 bfa_isr_func_t isr;
36054 };
36055 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36056
36057 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36058 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36059 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36060 struct list_head iotag_tio_free_q; /* free IO resources */
36061 struct list_head iotag_unused_q; /* unused IO resources*/
36062 struct bfa_iotag_s *iotag_arr;
36063 - struct bfa_itn_s *itn_arr;
36064 + bfa_itn_s_no_const *itn_arr;
36065 int num_ioim_reqs;
36066 int num_fwtio_reqs;
36067 int num_itns;
36068 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36069 index 1a99d4b..e85d64b 100644
36070 --- a/drivers/scsi/bfa/bfa_ioc.h
36071 +++ b/drivers/scsi/bfa/bfa_ioc.h
36072 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36073 bfa_ioc_disable_cbfn_t disable_cbfn;
36074 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36075 bfa_ioc_reset_cbfn_t reset_cbfn;
36076 -};
36077 +} __no_const;
36078
36079 /*
36080 * IOC event notification mechanism.
36081 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36082 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36083 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36084 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36085 -};
36086 +} __no_const;
36087
36088 /*
36089 * Queue element to wait for room in request queue. FIFO order is
36090 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36091 index a3a056a..b9bbc2f 100644
36092 --- a/drivers/scsi/hosts.c
36093 +++ b/drivers/scsi/hosts.c
36094 @@ -42,7 +42,7 @@
36095 #include "scsi_logging.h"
36096
36097
36098 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36099 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36100
36101
36102 static void scsi_host_cls_release(struct device *dev)
36103 @@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36104 * subtract one because we increment first then return, but we need to
36105 * know what the next host number was before increment
36106 */
36107 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36108 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36109 shost->dma_channel = 0xff;
36110
36111 /* These three are default values which can be overridden */
36112 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36113 index 500e20d..ebd3059 100644
36114 --- a/drivers/scsi/hpsa.c
36115 +++ b/drivers/scsi/hpsa.c
36116 @@ -521,7 +521,7 @@ static inline u32 next_command(struct ctlr_info *h)
36117 u32 a;
36118
36119 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36120 - return h->access.command_completed(h);
36121 + return h->access->command_completed(h);
36122
36123 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36124 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36125 @@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h)
36126 while (!list_empty(&h->reqQ)) {
36127 c = list_entry(h->reqQ.next, struct CommandList, list);
36128 /* can't do anything if fifo is full */
36129 - if ((h->access.fifo_full(h))) {
36130 + if ((h->access->fifo_full(h))) {
36131 dev_warn(&h->pdev->dev, "fifo full\n");
36132 break;
36133 }
36134 @@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h)
36135 h->Qdepth--;
36136
36137 /* Tell the controller execute command */
36138 - h->access.submit_command(h, c);
36139 + h->access->submit_command(h, c);
36140
36141 /* Put job onto the completed Q */
36142 addQ(&h->cmpQ, c);
36143 @@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h)
36144
36145 static inline unsigned long get_next_completion(struct ctlr_info *h)
36146 {
36147 - return h->access.command_completed(h);
36148 + return h->access->command_completed(h);
36149 }
36150
36151 static inline bool interrupt_pending(struct ctlr_info *h)
36152 {
36153 - return h->access.intr_pending(h);
36154 + return h->access->intr_pending(h);
36155 }
36156
36157 static inline long interrupt_not_for_us(struct ctlr_info *h)
36158 {
36159 - return (h->access.intr_pending(h) == 0) ||
36160 + return (h->access->intr_pending(h) == 0) ||
36161 (h->interrupts_enabled == 0);
36162 }
36163
36164 @@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36165 if (prod_index < 0)
36166 return -ENODEV;
36167 h->product_name = products[prod_index].product_name;
36168 - h->access = *(products[prod_index].access);
36169 + h->access = products[prod_index].access;
36170
36171 if (hpsa_board_disabled(h->pdev)) {
36172 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36173 @@ -4175,7 +4175,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36174
36175 assert_spin_locked(&lockup_detector_lock);
36176 remove_ctlr_from_lockup_detector_list(h);
36177 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36178 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36179 spin_lock_irqsave(&h->lock, flags);
36180 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36181 spin_unlock_irqrestore(&h->lock, flags);
36182 @@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
36183 }
36184
36185 /* make sure the board interrupts are off */
36186 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36187 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36188
36189 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36190 goto clean2;
36191 @@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
36192 * fake ones to scoop up any residual completions.
36193 */
36194 spin_lock_irqsave(&h->lock, flags);
36195 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36196 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36197 spin_unlock_irqrestore(&h->lock, flags);
36198 free_irq(h->intr[h->intr_mode], h);
36199 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36200 @@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
36201 dev_info(&h->pdev->dev, "Board READY.\n");
36202 dev_info(&h->pdev->dev,
36203 "Waiting for stale completions to drain.\n");
36204 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36205 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36206 msleep(10000);
36207 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36208 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36209
36210 rc = controller_reset_failed(h->cfgtable);
36211 if (rc)
36212 @@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
36213 }
36214
36215 /* Turn the interrupts on so we can service requests */
36216 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36217 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36218
36219 hpsa_hba_inquiry(h);
36220 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36221 @@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36222 * To write all data in the battery backed cache to disks
36223 */
36224 hpsa_flush_cache(h);
36225 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36226 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36227 free_irq(h->intr[h->intr_mode], h);
36228 #ifdef CONFIG_PCI_MSI
36229 if (h->msix_vector)
36230 @@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36231 return;
36232 }
36233 /* Change the access methods to the performant access methods */
36234 - h->access = SA5_performant_access;
36235 + h->access = &SA5_performant_access;
36236 h->transMethod = CFGTBL_Trans_Performant;
36237 }
36238
36239 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36240 index 7b28d54..952f23a 100644
36241 --- a/drivers/scsi/hpsa.h
36242 +++ b/drivers/scsi/hpsa.h
36243 @@ -72,7 +72,7 @@ struct ctlr_info {
36244 unsigned int msix_vector;
36245 unsigned int msi_vector;
36246 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36247 - struct access_method access;
36248 + struct access_method *access;
36249
36250 /* queue and queue Info */
36251 struct list_head reqQ;
36252 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36253 index f2df059..a3a9930 100644
36254 --- a/drivers/scsi/ips.h
36255 +++ b/drivers/scsi/ips.h
36256 @@ -1027,7 +1027,7 @@ typedef struct {
36257 int (*intr)(struct ips_ha *);
36258 void (*enableint)(struct ips_ha *);
36259 uint32_t (*statupd)(struct ips_ha *);
36260 -} ips_hw_func_t;
36261 +} __no_const ips_hw_func_t;
36262
36263 typedef struct ips_ha {
36264 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36265 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36266 index aceffad..c35c08d 100644
36267 --- a/drivers/scsi/libfc/fc_exch.c
36268 +++ b/drivers/scsi/libfc/fc_exch.c
36269 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36270 * all together if not used XXX
36271 */
36272 struct {
36273 - atomic_t no_free_exch;
36274 - atomic_t no_free_exch_xid;
36275 - atomic_t xid_not_found;
36276 - atomic_t xid_busy;
36277 - atomic_t seq_not_found;
36278 - atomic_t non_bls_resp;
36279 + atomic_unchecked_t no_free_exch;
36280 + atomic_unchecked_t no_free_exch_xid;
36281 + atomic_unchecked_t xid_not_found;
36282 + atomic_unchecked_t xid_busy;
36283 + atomic_unchecked_t seq_not_found;
36284 + atomic_unchecked_t non_bls_resp;
36285 } stats;
36286 };
36287
36288 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36289 /* allocate memory for exchange */
36290 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36291 if (!ep) {
36292 - atomic_inc(&mp->stats.no_free_exch);
36293 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36294 goto out;
36295 }
36296 memset(ep, 0, sizeof(*ep));
36297 @@ -780,7 +780,7 @@ out:
36298 return ep;
36299 err:
36300 spin_unlock_bh(&pool->lock);
36301 - atomic_inc(&mp->stats.no_free_exch_xid);
36302 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36303 mempool_free(ep, mp->ep_pool);
36304 return NULL;
36305 }
36306 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36307 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36308 ep = fc_exch_find(mp, xid);
36309 if (!ep) {
36310 - atomic_inc(&mp->stats.xid_not_found);
36311 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36312 reject = FC_RJT_OX_ID;
36313 goto out;
36314 }
36315 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36316 ep = fc_exch_find(mp, xid);
36317 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36318 if (ep) {
36319 - atomic_inc(&mp->stats.xid_busy);
36320 + atomic_inc_unchecked(&mp->stats.xid_busy);
36321 reject = FC_RJT_RX_ID;
36322 goto rel;
36323 }
36324 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36325 }
36326 xid = ep->xid; /* get our XID */
36327 } else if (!ep) {
36328 - atomic_inc(&mp->stats.xid_not_found);
36329 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36330 reject = FC_RJT_RX_ID; /* XID not found */
36331 goto out;
36332 }
36333 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36334 } else {
36335 sp = &ep->seq;
36336 if (sp->id != fh->fh_seq_id) {
36337 - atomic_inc(&mp->stats.seq_not_found);
36338 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36339 if (f_ctl & FC_FC_END_SEQ) {
36340 /*
36341 * Update sequence_id based on incoming last
36342 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36343
36344 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36345 if (!ep) {
36346 - atomic_inc(&mp->stats.xid_not_found);
36347 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36348 goto out;
36349 }
36350 if (ep->esb_stat & ESB_ST_COMPLETE) {
36351 - atomic_inc(&mp->stats.xid_not_found);
36352 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36353 goto rel;
36354 }
36355 if (ep->rxid == FC_XID_UNKNOWN)
36356 ep->rxid = ntohs(fh->fh_rx_id);
36357 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36358 - atomic_inc(&mp->stats.xid_not_found);
36359 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36360 goto rel;
36361 }
36362 if (ep->did != ntoh24(fh->fh_s_id) &&
36363 ep->did != FC_FID_FLOGI) {
36364 - atomic_inc(&mp->stats.xid_not_found);
36365 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36366 goto rel;
36367 }
36368 sof = fr_sof(fp);
36369 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36370 sp->ssb_stat |= SSB_ST_RESP;
36371 sp->id = fh->fh_seq_id;
36372 } else if (sp->id != fh->fh_seq_id) {
36373 - atomic_inc(&mp->stats.seq_not_found);
36374 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36375 goto rel;
36376 }
36377
36378 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36379 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36380
36381 if (!sp)
36382 - atomic_inc(&mp->stats.xid_not_found);
36383 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36384 else
36385 - atomic_inc(&mp->stats.non_bls_resp);
36386 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36387
36388 fc_frame_free(fp);
36389 }
36390 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36391 index 441d88a..689ad71 100644
36392 --- a/drivers/scsi/libsas/sas_ata.c
36393 +++ b/drivers/scsi/libsas/sas_ata.c
36394 @@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36395 .postreset = ata_std_postreset,
36396 .error_handler = ata_std_error_handler,
36397 .post_internal_cmd = sas_ata_post_internal,
36398 - .qc_defer = ata_std_qc_defer,
36399 + .qc_defer = ata_std_qc_defer,
36400 .qc_prep = ata_noop_qc_prep,
36401 .qc_issue = sas_ata_qc_issue,
36402 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36403 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36404 index 3a1ffdd..8eb7c71 100644
36405 --- a/drivers/scsi/lpfc/lpfc.h
36406 +++ b/drivers/scsi/lpfc/lpfc.h
36407 @@ -413,7 +413,7 @@ struct lpfc_vport {
36408 struct dentry *debug_nodelist;
36409 struct dentry *vport_debugfs_root;
36410 struct lpfc_debugfs_trc *disc_trc;
36411 - atomic_t disc_trc_cnt;
36412 + atomic_unchecked_t disc_trc_cnt;
36413 #endif
36414 uint8_t stat_data_enabled;
36415 uint8_t stat_data_blocked;
36416 @@ -826,8 +826,8 @@ struct lpfc_hba {
36417 struct timer_list fabric_block_timer;
36418 unsigned long bit_flags;
36419 #define FABRIC_COMANDS_BLOCKED 0
36420 - atomic_t num_rsrc_err;
36421 - atomic_t num_cmd_success;
36422 + atomic_unchecked_t num_rsrc_err;
36423 + atomic_unchecked_t num_cmd_success;
36424 unsigned long last_rsrc_error_time;
36425 unsigned long last_ramp_down_time;
36426 unsigned long last_ramp_up_time;
36427 @@ -863,7 +863,7 @@ struct lpfc_hba {
36428
36429 struct dentry *debug_slow_ring_trc;
36430 struct lpfc_debugfs_trc *slow_ring_trc;
36431 - atomic_t slow_ring_trc_cnt;
36432 + atomic_unchecked_t slow_ring_trc_cnt;
36433 /* iDiag debugfs sub-directory */
36434 struct dentry *idiag_root;
36435 struct dentry *idiag_pci_cfg;
36436 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36437 index af04b0d..8f1a97e 100644
36438 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36439 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36440 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36441
36442 #include <linux/debugfs.h>
36443
36444 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36445 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36446 static unsigned long lpfc_debugfs_start_time = 0L;
36447
36448 /* iDiag */
36449 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36450 lpfc_debugfs_enable = 0;
36451
36452 len = 0;
36453 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36454 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36455 (lpfc_debugfs_max_disc_trc - 1);
36456 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36457 dtp = vport->disc_trc + i;
36458 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36459 lpfc_debugfs_enable = 0;
36460
36461 len = 0;
36462 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36463 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36464 (lpfc_debugfs_max_slow_ring_trc - 1);
36465 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36466 dtp = phba->slow_ring_trc + i;
36467 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36468 !vport || !vport->disc_trc)
36469 return;
36470
36471 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36472 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36473 (lpfc_debugfs_max_disc_trc - 1);
36474 dtp = vport->disc_trc + index;
36475 dtp->fmt = fmt;
36476 dtp->data1 = data1;
36477 dtp->data2 = data2;
36478 dtp->data3 = data3;
36479 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36480 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36481 dtp->jif = jiffies;
36482 #endif
36483 return;
36484 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36485 !phba || !phba->slow_ring_trc)
36486 return;
36487
36488 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36489 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36490 (lpfc_debugfs_max_slow_ring_trc - 1);
36491 dtp = phba->slow_ring_trc + index;
36492 dtp->fmt = fmt;
36493 dtp->data1 = data1;
36494 dtp->data2 = data2;
36495 dtp->data3 = data3;
36496 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36497 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36498 dtp->jif = jiffies;
36499 #endif
36500 return;
36501 @@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36502 "slow_ring buffer\n");
36503 goto debug_failed;
36504 }
36505 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36506 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36507 memset(phba->slow_ring_trc, 0,
36508 (sizeof(struct lpfc_debugfs_trc) *
36509 lpfc_debugfs_max_slow_ring_trc));
36510 @@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36511 "buffer\n");
36512 goto debug_failed;
36513 }
36514 - atomic_set(&vport->disc_trc_cnt, 0);
36515 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36516
36517 snprintf(name, sizeof(name), "discovery_trace");
36518 vport->debug_disc_trc =
36519 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36520 index 9598fdc..7e9f3d9 100644
36521 --- a/drivers/scsi/lpfc/lpfc_init.c
36522 +++ b/drivers/scsi/lpfc/lpfc_init.c
36523 @@ -10266,8 +10266,10 @@ lpfc_init(void)
36524 "misc_register returned with status %d", error);
36525
36526 if (lpfc_enable_npiv) {
36527 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36528 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36529 + pax_open_kernel();
36530 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36531 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36532 + pax_close_kernel();
36533 }
36534 lpfc_transport_template =
36535 fc_attach_transport(&lpfc_transport_functions);
36536 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36537 index 88f3a83..686d3fa 100644
36538 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36539 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36540 @@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36541 uint32_t evt_posted;
36542
36543 spin_lock_irqsave(&phba->hbalock, flags);
36544 - atomic_inc(&phba->num_rsrc_err);
36545 + atomic_inc_unchecked(&phba->num_rsrc_err);
36546 phba->last_rsrc_error_time = jiffies;
36547
36548 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36549 @@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36550 unsigned long flags;
36551 struct lpfc_hba *phba = vport->phba;
36552 uint32_t evt_posted;
36553 - atomic_inc(&phba->num_cmd_success);
36554 + atomic_inc_unchecked(&phba->num_cmd_success);
36555
36556 if (vport->cfg_lun_queue_depth <= queue_depth)
36557 return;
36558 @@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36559 unsigned long num_rsrc_err, num_cmd_success;
36560 int i;
36561
36562 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36563 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36564 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36565 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36566
36567 vports = lpfc_create_vport_work_array(phba);
36568 if (vports != NULL)
36569 @@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36570 }
36571 }
36572 lpfc_destroy_vport_work_array(phba, vports);
36573 - atomic_set(&phba->num_rsrc_err, 0);
36574 - atomic_set(&phba->num_cmd_success, 0);
36575 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36576 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36577 }
36578
36579 /**
36580 @@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36581 }
36582 }
36583 lpfc_destroy_vport_work_array(phba, vports);
36584 - atomic_set(&phba->num_rsrc_err, 0);
36585 - atomic_set(&phba->num_cmd_success, 0);
36586 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36587 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36588 }
36589
36590 /**
36591 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36592 index ea8a0b4..812a124 100644
36593 --- a/drivers/scsi/pmcraid.c
36594 +++ b/drivers/scsi/pmcraid.c
36595 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36596 res->scsi_dev = scsi_dev;
36597 scsi_dev->hostdata = res;
36598 res->change_detected = 0;
36599 - atomic_set(&res->read_failures, 0);
36600 - atomic_set(&res->write_failures, 0);
36601 + atomic_set_unchecked(&res->read_failures, 0);
36602 + atomic_set_unchecked(&res->write_failures, 0);
36603 rc = 0;
36604 }
36605 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36606 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36607
36608 /* If this was a SCSI read/write command keep count of errors */
36609 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36610 - atomic_inc(&res->read_failures);
36611 + atomic_inc_unchecked(&res->read_failures);
36612 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36613 - atomic_inc(&res->write_failures);
36614 + atomic_inc_unchecked(&res->write_failures);
36615
36616 if (!RES_IS_GSCSI(res->cfg_entry) &&
36617 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36618 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36619 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36620 * hrrq_id assigned here in queuecommand
36621 */
36622 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36623 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36624 pinstance->num_hrrq;
36625 cmd->cmd_done = pmcraid_io_done;
36626
36627 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36628 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36629 * hrrq_id assigned here in queuecommand
36630 */
36631 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36632 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36633 pinstance->num_hrrq;
36634
36635 if (request_size) {
36636 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36637
36638 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36639 /* add resources only after host is added into system */
36640 - if (!atomic_read(&pinstance->expose_resources))
36641 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36642 return;
36643
36644 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36645 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36646 init_waitqueue_head(&pinstance->reset_wait_q);
36647
36648 atomic_set(&pinstance->outstanding_cmds, 0);
36649 - atomic_set(&pinstance->last_message_id, 0);
36650 - atomic_set(&pinstance->expose_resources, 0);
36651 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36652 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36653
36654 INIT_LIST_HEAD(&pinstance->free_res_q);
36655 INIT_LIST_HEAD(&pinstance->used_res_q);
36656 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36657 /* Schedule worker thread to handle CCN and take care of adding and
36658 * removing devices to OS
36659 */
36660 - atomic_set(&pinstance->expose_resources, 1);
36661 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36662 schedule_work(&pinstance->worker_q);
36663 return rc;
36664
36665 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36666 index e1d150f..6c6df44 100644
36667 --- a/drivers/scsi/pmcraid.h
36668 +++ b/drivers/scsi/pmcraid.h
36669 @@ -748,7 +748,7 @@ struct pmcraid_instance {
36670 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36671
36672 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36673 - atomic_t last_message_id;
36674 + atomic_unchecked_t last_message_id;
36675
36676 /* configuration table */
36677 struct pmcraid_config_table *cfg_table;
36678 @@ -777,7 +777,7 @@ struct pmcraid_instance {
36679 atomic_t outstanding_cmds;
36680
36681 /* should add/delete resources to mid-layer now ?*/
36682 - atomic_t expose_resources;
36683 + atomic_unchecked_t expose_resources;
36684
36685
36686
36687 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36688 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36689 };
36690 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36691 - atomic_t read_failures; /* count of failed READ commands */
36692 - atomic_t write_failures; /* count of failed WRITE commands */
36693 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36694 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36695
36696 /* To indicate add/delete/modify during CCN */
36697 u8 change_detected;
36698 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36699 index a244303..6015eb7 100644
36700 --- a/drivers/scsi/qla2xxx/qla_def.h
36701 +++ b/drivers/scsi/qla2xxx/qla_def.h
36702 @@ -2264,7 +2264,7 @@ struct isp_operations {
36703 int (*start_scsi) (srb_t *);
36704 int (*abort_isp) (struct scsi_qla_host *);
36705 int (*iospace_config)(struct qla_hw_data*);
36706 -};
36707 +} __no_const;
36708
36709 /* MSI-X Support *************************************************************/
36710
36711 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36712 index 7f2492e..5113877 100644
36713 --- a/drivers/scsi/qla4xxx/ql4_def.h
36714 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36715 @@ -268,7 +268,7 @@ struct ddb_entry {
36716 * (4000 only) */
36717 atomic_t relogin_timer; /* Max Time to wait for
36718 * relogin to complete */
36719 - atomic_t relogin_retry_count; /* Num of times relogin has been
36720 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36721 * retried */
36722 uint32_t default_time2wait; /* Default Min time between
36723 * relogins (+aens) */
36724 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36725 index ee47820..a83b1f4 100644
36726 --- a/drivers/scsi/qla4xxx/ql4_os.c
36727 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36728 @@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36729 */
36730 if (!iscsi_is_session_online(cls_sess)) {
36731 /* Reset retry relogin timer */
36732 - atomic_inc(&ddb_entry->relogin_retry_count);
36733 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36734 DEBUG2(ql4_printk(KERN_INFO, ha,
36735 "%s: index[%d] relogin timed out-retrying"
36736 " relogin (%d), retry (%d)\n", __func__,
36737 ddb_entry->fw_ddb_index,
36738 - atomic_read(&ddb_entry->relogin_retry_count),
36739 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36740 ddb_entry->default_time2wait + 4));
36741 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36742 atomic_set(&ddb_entry->retry_relogin_timer,
36743 @@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36744
36745 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36746 atomic_set(&ddb_entry->relogin_timer, 0);
36747 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36748 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36749 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36750 ddb_entry->default_relogin_timeout =
36751 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
36752 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36753 index 07322ec..91ccc23 100644
36754 --- a/drivers/scsi/scsi.c
36755 +++ b/drivers/scsi/scsi.c
36756 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36757 unsigned long timeout;
36758 int rtn = 0;
36759
36760 - atomic_inc(&cmd->device->iorequest_cnt);
36761 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36762
36763 /* check if the device is still usable */
36764 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36765 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36766 index 4037fd5..a19fcc7 100644
36767 --- a/drivers/scsi/scsi_lib.c
36768 +++ b/drivers/scsi/scsi_lib.c
36769 @@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36770 shost = sdev->host;
36771 scsi_init_cmd_errh(cmd);
36772 cmd->result = DID_NO_CONNECT << 16;
36773 - atomic_inc(&cmd->device->iorequest_cnt);
36774 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36775
36776 /*
36777 * SCSI request completion path will do scsi_device_unbusy(),
36778 @@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
36779
36780 INIT_LIST_HEAD(&cmd->eh_entry);
36781
36782 - atomic_inc(&cmd->device->iodone_cnt);
36783 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36784 if (cmd->result)
36785 - atomic_inc(&cmd->device->ioerr_cnt);
36786 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36787
36788 disposition = scsi_decide_disposition(cmd);
36789 if (disposition != SUCCESS &&
36790 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36791 index 04c2a27..9d8bd66 100644
36792 --- a/drivers/scsi/scsi_sysfs.c
36793 +++ b/drivers/scsi/scsi_sysfs.c
36794 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36795 char *buf) \
36796 { \
36797 struct scsi_device *sdev = to_scsi_device(dev); \
36798 - unsigned long long count = atomic_read(&sdev->field); \
36799 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36800 return snprintf(buf, 20, "0x%llx\n", count); \
36801 } \
36802 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36803 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36804 index 84a1fdf..693b0d6 100644
36805 --- a/drivers/scsi/scsi_tgt_lib.c
36806 +++ b/drivers/scsi/scsi_tgt_lib.c
36807 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36808 int err;
36809
36810 dprintk("%lx %u\n", uaddr, len);
36811 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36812 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36813 if (err) {
36814 /*
36815 * TODO: need to fixup sg_tablesize, max_segment_size,
36816 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36817 index 80fbe2a..efa223b 100644
36818 --- a/drivers/scsi/scsi_transport_fc.c
36819 +++ b/drivers/scsi/scsi_transport_fc.c
36820 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36821 * Netlink Infrastructure
36822 */
36823
36824 -static atomic_t fc_event_seq;
36825 +static atomic_unchecked_t fc_event_seq;
36826
36827 /**
36828 * fc_get_event_number - Obtain the next sequential FC event number
36829 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
36830 u32
36831 fc_get_event_number(void)
36832 {
36833 - return atomic_add_return(1, &fc_event_seq);
36834 + return atomic_add_return_unchecked(1, &fc_event_seq);
36835 }
36836 EXPORT_SYMBOL(fc_get_event_number);
36837
36838 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
36839 {
36840 int error;
36841
36842 - atomic_set(&fc_event_seq, 0);
36843 + atomic_set_unchecked(&fc_event_seq, 0);
36844
36845 error = transport_class_register(&fc_host_class);
36846 if (error)
36847 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36848 char *cp;
36849
36850 *val = simple_strtoul(buf, &cp, 0);
36851 - if ((*cp && (*cp != '\n')) || (*val < 0))
36852 + if (*cp && (*cp != '\n'))
36853 return -EINVAL;
36854 /*
36855 * Check for overflow; dev_loss_tmo is u32
36856 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36857 index 1cf640e..78e9014 100644
36858 --- a/drivers/scsi/scsi_transport_iscsi.c
36859 +++ b/drivers/scsi/scsi_transport_iscsi.c
36860 @@ -79,7 +79,7 @@ struct iscsi_internal {
36861 struct transport_container session_cont;
36862 };
36863
36864 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36865 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36866 static struct workqueue_struct *iscsi_eh_timer_workq;
36867
36868 static DEFINE_IDA(iscsi_sess_ida);
36869 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36870 int err;
36871
36872 ihost = shost->shost_data;
36873 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36874 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36875
36876 if (target_id == ISCSI_MAX_TARGET) {
36877 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36878 @@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
36879 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36880 ISCSI_TRANSPORT_VERSION);
36881
36882 - atomic_set(&iscsi_session_nr, 0);
36883 + atomic_set_unchecked(&iscsi_session_nr, 0);
36884
36885 err = class_register(&iscsi_transport_class);
36886 if (err)
36887 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36888 index 21a045e..ec89e03 100644
36889 --- a/drivers/scsi/scsi_transport_srp.c
36890 +++ b/drivers/scsi/scsi_transport_srp.c
36891 @@ -33,7 +33,7 @@
36892 #include "scsi_transport_srp_internal.h"
36893
36894 struct srp_host_attrs {
36895 - atomic_t next_port_id;
36896 + atomic_unchecked_t next_port_id;
36897 };
36898 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36899
36900 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36901 struct Scsi_Host *shost = dev_to_shost(dev);
36902 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36903
36904 - atomic_set(&srp_host->next_port_id, 0);
36905 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36906 return 0;
36907 }
36908
36909 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36910 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36911 rport->roles = ids->roles;
36912
36913 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36914 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36915 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36916
36917 transport_setup_device(&rport->dev);
36918 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36919 index eacd46b..e3f4d62 100644
36920 --- a/drivers/scsi/sg.c
36921 +++ b/drivers/scsi/sg.c
36922 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36923 sdp->disk->disk_name,
36924 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36925 NULL,
36926 - (char *)arg);
36927 + (char __user *)arg);
36928 case BLKTRACESTART:
36929 return blk_trace_startstop(sdp->device->request_queue, 1);
36930 case BLKTRACESTOP:
36931 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
36932 const struct file_operations * fops;
36933 };
36934
36935 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36936 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36937 {"allow_dio", &adio_fops},
36938 {"debug", &debug_fops},
36939 {"def_reserved_size", &dressz_fops},
36940 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
36941 if (!sg_proc_sgp)
36942 return 1;
36943 for (k = 0; k < num_leaves; ++k) {
36944 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36945 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36946 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
36947 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
36948 }
36949 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36950 index 3d8f662..070f1a5 100644
36951 --- a/drivers/spi/spi.c
36952 +++ b/drivers/spi/spi.c
36953 @@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
36954 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36955
36956 /* portable code must never pass more than 32 bytes */
36957 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36958 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36959
36960 static u8 *buf;
36961
36962 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
36963 index d91751f..a3a9e36 100644
36964 --- a/drivers/staging/octeon/ethernet-rx.c
36965 +++ b/drivers/staging/octeon/ethernet-rx.c
36966 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36967 /* Increment RX stats for virtual ports */
36968 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
36969 #ifdef CONFIG_64BIT
36970 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
36971 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
36972 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
36973 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
36974 #else
36975 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
36976 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
36977 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
36978 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
36979 #endif
36980 }
36981 netif_receive_skb(skb);
36982 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36983 dev->name);
36984 */
36985 #ifdef CONFIG_64BIT
36986 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
36987 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36988 #else
36989 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
36990 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
36991 #endif
36992 dev_kfree_skb_irq(skb);
36993 }
36994 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
36995 index 60cba81..71eb239 100644
36996 --- a/drivers/staging/octeon/ethernet.c
36997 +++ b/drivers/staging/octeon/ethernet.c
36998 @@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
36999 * since the RX tasklet also increments it.
37000 */
37001 #ifdef CONFIG_64BIT
37002 - atomic64_add(rx_status.dropped_packets,
37003 - (atomic64_t *)&priv->stats.rx_dropped);
37004 + atomic64_add_unchecked(rx_status.dropped_packets,
37005 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37006 #else
37007 - atomic_add(rx_status.dropped_packets,
37008 - (atomic_t *)&priv->stats.rx_dropped);
37009 + atomic_add_unchecked(rx_status.dropped_packets,
37010 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37011 #endif
37012 }
37013
37014 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37015 index d3d8727..f9327bb8 100644
37016 --- a/drivers/staging/rtl8712/rtl871x_io.h
37017 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37018 @@ -108,7 +108,7 @@ struct _io_ops {
37019 u8 *pmem);
37020 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37021 u8 *pmem);
37022 -};
37023 +} __no_const;
37024
37025 struct io_req {
37026 struct list_head list;
37027 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37028 index c7b5e8b..783d6cb 100644
37029 --- a/drivers/staging/sbe-2t3e3/netdev.c
37030 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37031 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37032 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37033
37034 if (rlen)
37035 - if (copy_to_user(data, &resp, rlen))
37036 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37037 return -EFAULT;
37038
37039 return 0;
37040 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37041 index 42cdafe..2769103 100644
37042 --- a/drivers/staging/speakup/speakup_soft.c
37043 +++ b/drivers/staging/speakup/speakup_soft.c
37044 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37045 break;
37046 } else if (!initialized) {
37047 if (*init) {
37048 - ch = *init;
37049 init++;
37050 } else {
37051 initialized = 1;
37052 }
37053 + ch = *init;
37054 } else {
37055 ch = synth_buffer_getc();
37056 }
37057 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37058 index c7b888c..c94be93 100644
37059 --- a/drivers/staging/usbip/usbip_common.h
37060 +++ b/drivers/staging/usbip/usbip_common.h
37061 @@ -289,7 +289,7 @@ struct usbip_device {
37062 void (*shutdown)(struct usbip_device *);
37063 void (*reset)(struct usbip_device *);
37064 void (*unusable)(struct usbip_device *);
37065 - } eh_ops;
37066 + } __no_const eh_ops;
37067 };
37068
37069 /* usbip_common.c */
37070 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37071 index 88b3298..3783eee 100644
37072 --- a/drivers/staging/usbip/vhci.h
37073 +++ b/drivers/staging/usbip/vhci.h
37074 @@ -88,7 +88,7 @@ struct vhci_hcd {
37075 unsigned resuming:1;
37076 unsigned long re_timeout;
37077
37078 - atomic_t seqnum;
37079 + atomic_unchecked_t seqnum;
37080
37081 /*
37082 * NOTE:
37083 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37084 index dca9bf1..80735c9 100644
37085 --- a/drivers/staging/usbip/vhci_hcd.c
37086 +++ b/drivers/staging/usbip/vhci_hcd.c
37087 @@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37088 return;
37089 }
37090
37091 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37092 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37093 if (priv->seqnum == 0xffff)
37094 dev_info(&urb->dev->dev, "seqnum max\n");
37095
37096 @@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37097 return -ENOMEM;
37098 }
37099
37100 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37101 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37102 if (unlink->seqnum == 0xffff)
37103 pr_info("seqnum max\n");
37104
37105 @@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37106 vdev->rhport = rhport;
37107 }
37108
37109 - atomic_set(&vhci->seqnum, 0);
37110 + atomic_set_unchecked(&vhci->seqnum, 0);
37111 spin_lock_init(&vhci->lock);
37112
37113 hcd->power_budget = 0; /* no limit */
37114 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37115 index f5fba732..210a16c 100644
37116 --- a/drivers/staging/usbip/vhci_rx.c
37117 +++ b/drivers/staging/usbip/vhci_rx.c
37118 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37119 if (!urb) {
37120 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37121 pr_info("max seqnum %d\n",
37122 - atomic_read(&the_controller->seqnum));
37123 + atomic_read_unchecked(&the_controller->seqnum));
37124 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37125 return;
37126 }
37127 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37128 index 7735027..30eed13 100644
37129 --- a/drivers/staging/vt6655/hostap.c
37130 +++ b/drivers/staging/vt6655/hostap.c
37131 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37132 *
37133 */
37134
37135 +static net_device_ops_no_const apdev_netdev_ops;
37136 +
37137 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37138 {
37139 PSDevice apdev_priv;
37140 struct net_device *dev = pDevice->dev;
37141 int ret;
37142 - const struct net_device_ops apdev_netdev_ops = {
37143 - .ndo_start_xmit = pDevice->tx_80211,
37144 - };
37145
37146 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37147
37148 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37149 *apdev_priv = *pDevice;
37150 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37151
37152 + /* only half broken now */
37153 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37154 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37155
37156 pDevice->apdev->type = ARPHRD_IEEE80211;
37157 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37158 index 51b5adf..098e320 100644
37159 --- a/drivers/staging/vt6656/hostap.c
37160 +++ b/drivers/staging/vt6656/hostap.c
37161 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37162 *
37163 */
37164
37165 +static net_device_ops_no_const apdev_netdev_ops;
37166 +
37167 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37168 {
37169 PSDevice apdev_priv;
37170 struct net_device *dev = pDevice->dev;
37171 int ret;
37172 - const struct net_device_ops apdev_netdev_ops = {
37173 - .ndo_start_xmit = pDevice->tx_80211,
37174 - };
37175
37176 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37177
37178 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37179 *apdev_priv = *pDevice;
37180 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37181
37182 + /* only half broken now */
37183 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37184 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37185
37186 pDevice->apdev->type = ARPHRD_IEEE80211;
37187 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37188 index 7843dfd..3db105f 100644
37189 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37190 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37191 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37192
37193 struct usbctlx_completor {
37194 int (*complete) (struct usbctlx_completor *);
37195 -};
37196 +} __no_const;
37197
37198 static int
37199 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37200 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37201 index 1ca66ea..76f1343 100644
37202 --- a/drivers/staging/zcache/tmem.c
37203 +++ b/drivers/staging/zcache/tmem.c
37204 @@ -39,7 +39,7 @@
37205 * A tmem host implementation must use this function to register callbacks
37206 * for memory allocation.
37207 */
37208 -static struct tmem_hostops tmem_hostops;
37209 +static tmem_hostops_no_const tmem_hostops;
37210
37211 static void tmem_objnode_tree_init(void);
37212
37213 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37214 * A tmem host implementation must use this function to register
37215 * callbacks for a page-accessible memory (PAM) implementation
37216 */
37217 -static struct tmem_pamops tmem_pamops;
37218 +static tmem_pamops_no_const tmem_pamops;
37219
37220 void tmem_register_pamops(struct tmem_pamops *m)
37221 {
37222 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37223 index 0d4aa82..f7832d4 100644
37224 --- a/drivers/staging/zcache/tmem.h
37225 +++ b/drivers/staging/zcache/tmem.h
37226 @@ -180,6 +180,7 @@ struct tmem_pamops {
37227 void (*new_obj)(struct tmem_obj *);
37228 int (*replace_in_obj)(void *, struct tmem_obj *);
37229 };
37230 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37231 extern void tmem_register_pamops(struct tmem_pamops *m);
37232
37233 /* memory allocation methods provided by the host implementation */
37234 @@ -189,6 +190,7 @@ struct tmem_hostops {
37235 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37236 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37237 };
37238 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37239 extern void tmem_register_hostops(struct tmem_hostops *m);
37240
37241 /* core tmem accessor functions */
37242 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37243 index f015839..b15dfc4 100644
37244 --- a/drivers/target/target_core_tmr.c
37245 +++ b/drivers/target/target_core_tmr.c
37246 @@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
37247 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37248 cmd->t_task_list_num,
37249 atomic_read(&cmd->t_task_cdbs_left),
37250 - atomic_read(&cmd->t_task_cdbs_sent),
37251 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37252 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37253 (cmd->transport_state & CMD_T_STOP) != 0,
37254 (cmd->transport_state & CMD_T_SENT) != 0);
37255 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37256 index 443704f..92d3517 100644
37257 --- a/drivers/target/target_core_transport.c
37258 +++ b/drivers/target/target_core_transport.c
37259 @@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_to_core_hba(
37260 spin_lock_init(&dev->se_port_lock);
37261 spin_lock_init(&dev->se_tmr_lock);
37262 spin_lock_init(&dev->qf_cmd_lock);
37263 - atomic_set(&dev->dev_ordered_id, 0);
37264 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37265
37266 se_dev_set_default_attribs(dev, dev_limits);
37267
37268 @@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37269 * Used to determine when ORDERED commands should go from
37270 * Dormant to Active status.
37271 */
37272 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37273 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37274 smp_mb__after_atomic_inc();
37275 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37276 cmd->se_ordered_id, cmd->sam_task_attr,
37277 @@ -1956,7 +1956,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
37278 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
37279 cmd->t_task_list_num,
37280 atomic_read(&cmd->t_task_cdbs_left),
37281 - atomic_read(&cmd->t_task_cdbs_sent),
37282 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37283 atomic_read(&cmd->t_task_cdbs_ex_left),
37284 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37285 (cmd->transport_state & CMD_T_STOP) != 0,
37286 @@ -2216,9 +2216,9 @@ check_depth:
37287 cmd = task->task_se_cmd;
37288 spin_lock_irqsave(&cmd->t_state_lock, flags);
37289 task->task_flags |= (TF_ACTIVE | TF_SENT);
37290 - atomic_inc(&cmd->t_task_cdbs_sent);
37291 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37292
37293 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37294 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37295 cmd->t_task_list_num)
37296 cmd->transport_state |= CMD_T_SENT;
37297
37298 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37299 index 3436436..772237b 100644
37300 --- a/drivers/tty/hvc/hvcs.c
37301 +++ b/drivers/tty/hvc/hvcs.c
37302 @@ -83,6 +83,7 @@
37303 #include <asm/hvcserver.h>
37304 #include <asm/uaccess.h>
37305 #include <asm/vio.h>
37306 +#include <asm/local.h>
37307
37308 /*
37309 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37310 @@ -270,7 +271,7 @@ struct hvcs_struct {
37311 unsigned int index;
37312
37313 struct tty_struct *tty;
37314 - int open_count;
37315 + local_t open_count;
37316
37317 /*
37318 * Used to tell the driver kernel_thread what operations need to take
37319 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37320
37321 spin_lock_irqsave(&hvcsd->lock, flags);
37322
37323 - if (hvcsd->open_count > 0) {
37324 + if (local_read(&hvcsd->open_count) > 0) {
37325 spin_unlock_irqrestore(&hvcsd->lock, flags);
37326 printk(KERN_INFO "HVCS: vterm state unchanged. "
37327 "The hvcs device node is still in use.\n");
37328 @@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37329 if ((retval = hvcs_partner_connect(hvcsd)))
37330 goto error_release;
37331
37332 - hvcsd->open_count = 1;
37333 + local_set(&hvcsd->open_count, 1);
37334 hvcsd->tty = tty;
37335 tty->driver_data = hvcsd;
37336
37337 @@ -1172,7 +1173,7 @@ fast_open:
37338
37339 spin_lock_irqsave(&hvcsd->lock, flags);
37340 kref_get(&hvcsd->kref);
37341 - hvcsd->open_count++;
37342 + local_inc(&hvcsd->open_count);
37343 hvcsd->todo_mask |= HVCS_SCHED_READ;
37344 spin_unlock_irqrestore(&hvcsd->lock, flags);
37345
37346 @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37347 hvcsd = tty->driver_data;
37348
37349 spin_lock_irqsave(&hvcsd->lock, flags);
37350 - if (--hvcsd->open_count == 0) {
37351 + if (local_dec_and_test(&hvcsd->open_count)) {
37352
37353 vio_disable_interrupts(hvcsd->vdev);
37354
37355 @@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37356 free_irq(irq, hvcsd);
37357 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37358 return;
37359 - } else if (hvcsd->open_count < 0) {
37360 + } else if (local_read(&hvcsd->open_count) < 0) {
37361 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37362 " is missmanaged.\n",
37363 - hvcsd->vdev->unit_address, hvcsd->open_count);
37364 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37365 }
37366
37367 spin_unlock_irqrestore(&hvcsd->lock, flags);
37368 @@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37369
37370 spin_lock_irqsave(&hvcsd->lock, flags);
37371 /* Preserve this so that we know how many kref refs to put */
37372 - temp_open_count = hvcsd->open_count;
37373 + temp_open_count = local_read(&hvcsd->open_count);
37374
37375 /*
37376 * Don't kref put inside the spinlock because the destruction
37377 @@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37378 hvcsd->tty->driver_data = NULL;
37379 hvcsd->tty = NULL;
37380
37381 - hvcsd->open_count = 0;
37382 + local_set(&hvcsd->open_count, 0);
37383
37384 /* This will drop any buffered data on the floor which is OK in a hangup
37385 * scenario. */
37386 @@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
37387 * the middle of a write operation? This is a crummy place to do this
37388 * but we want to keep it all in the spinlock.
37389 */
37390 - if (hvcsd->open_count <= 0) {
37391 + if (local_read(&hvcsd->open_count) <= 0) {
37392 spin_unlock_irqrestore(&hvcsd->lock, flags);
37393 return -ENODEV;
37394 }
37395 @@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37396 {
37397 struct hvcs_struct *hvcsd = tty->driver_data;
37398
37399 - if (!hvcsd || hvcsd->open_count <= 0)
37400 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37401 return 0;
37402
37403 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37404 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37405 index 4daf962..b4a2281 100644
37406 --- a/drivers/tty/ipwireless/tty.c
37407 +++ b/drivers/tty/ipwireless/tty.c
37408 @@ -29,6 +29,7 @@
37409 #include <linux/tty_driver.h>
37410 #include <linux/tty_flip.h>
37411 #include <linux/uaccess.h>
37412 +#include <asm/local.h>
37413
37414 #include "tty.h"
37415 #include "network.h"
37416 @@ -51,7 +52,7 @@ struct ipw_tty {
37417 int tty_type;
37418 struct ipw_network *network;
37419 struct tty_struct *linux_tty;
37420 - int open_count;
37421 + local_t open_count;
37422 unsigned int control_lines;
37423 struct mutex ipw_tty_mutex;
37424 int tx_bytes_queued;
37425 @@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37426 mutex_unlock(&tty->ipw_tty_mutex);
37427 return -ENODEV;
37428 }
37429 - if (tty->open_count == 0)
37430 + if (local_read(&tty->open_count) == 0)
37431 tty->tx_bytes_queued = 0;
37432
37433 - tty->open_count++;
37434 + local_inc(&tty->open_count);
37435
37436 tty->linux_tty = linux_tty;
37437 linux_tty->driver_data = tty;
37438 @@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37439
37440 static void do_ipw_close(struct ipw_tty *tty)
37441 {
37442 - tty->open_count--;
37443 -
37444 - if (tty->open_count == 0) {
37445 + if (local_dec_return(&tty->open_count) == 0) {
37446 struct tty_struct *linux_tty = tty->linux_tty;
37447
37448 if (linux_tty != NULL) {
37449 @@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37450 return;
37451
37452 mutex_lock(&tty->ipw_tty_mutex);
37453 - if (tty->open_count == 0) {
37454 + if (local_read(&tty->open_count) == 0) {
37455 mutex_unlock(&tty->ipw_tty_mutex);
37456 return;
37457 }
37458 @@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37459 return;
37460 }
37461
37462 - if (!tty->open_count) {
37463 + if (!local_read(&tty->open_count)) {
37464 mutex_unlock(&tty->ipw_tty_mutex);
37465 return;
37466 }
37467 @@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37468 return -ENODEV;
37469
37470 mutex_lock(&tty->ipw_tty_mutex);
37471 - if (!tty->open_count) {
37472 + if (!local_read(&tty->open_count)) {
37473 mutex_unlock(&tty->ipw_tty_mutex);
37474 return -EINVAL;
37475 }
37476 @@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37477 if (!tty)
37478 return -ENODEV;
37479
37480 - if (!tty->open_count)
37481 + if (!local_read(&tty->open_count))
37482 return -EINVAL;
37483
37484 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37485 @@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37486 if (!tty)
37487 return 0;
37488
37489 - if (!tty->open_count)
37490 + if (!local_read(&tty->open_count))
37491 return 0;
37492
37493 return tty->tx_bytes_queued;
37494 @@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37495 if (!tty)
37496 return -ENODEV;
37497
37498 - if (!tty->open_count)
37499 + if (!local_read(&tty->open_count))
37500 return -EINVAL;
37501
37502 return get_control_lines(tty);
37503 @@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37504 if (!tty)
37505 return -ENODEV;
37506
37507 - if (!tty->open_count)
37508 + if (!local_read(&tty->open_count))
37509 return -EINVAL;
37510
37511 return set_control_lines(tty, set, clear);
37512 @@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37513 if (!tty)
37514 return -ENODEV;
37515
37516 - if (!tty->open_count)
37517 + if (!local_read(&tty->open_count))
37518 return -EINVAL;
37519
37520 /* FIXME: Exactly how is the tty object locked here .. */
37521 @@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37522 against a parallel ioctl etc */
37523 mutex_lock(&ttyj->ipw_tty_mutex);
37524 }
37525 - while (ttyj->open_count)
37526 + while (local_read(&ttyj->open_count))
37527 do_ipw_close(ttyj);
37528 ipwireless_disassociate_network_ttys(network,
37529 ttyj->channel_idx);
37530 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37531 index c43b683..0a88f1c 100644
37532 --- a/drivers/tty/n_gsm.c
37533 +++ b/drivers/tty/n_gsm.c
37534 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37535 kref_init(&dlci->ref);
37536 mutex_init(&dlci->mutex);
37537 dlci->fifo = &dlci->_fifo;
37538 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37539 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37540 kfree(dlci);
37541 return NULL;
37542 }
37543 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37544 index 94b6eda..15f7cec 100644
37545 --- a/drivers/tty/n_tty.c
37546 +++ b/drivers/tty/n_tty.c
37547 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37548 {
37549 *ops = tty_ldisc_N_TTY;
37550 ops->owner = NULL;
37551 - ops->refcount = ops->flags = 0;
37552 + atomic_set(&ops->refcount, 0);
37553 + ops->flags = 0;
37554 }
37555 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37556 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37557 index eeae7fa..177a743 100644
37558 --- a/drivers/tty/pty.c
37559 +++ b/drivers/tty/pty.c
37560 @@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
37561 panic("Couldn't register Unix98 pts driver");
37562
37563 /* Now create the /dev/ptmx special device */
37564 + pax_open_kernel();
37565 tty_default_fops(&ptmx_fops);
37566 - ptmx_fops.open = ptmx_open;
37567 + *(void **)&ptmx_fops.open = ptmx_open;
37568 + pax_close_kernel();
37569
37570 cdev_init(&ptmx_cdev, &ptmx_fops);
37571 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37572 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37573 index 2b42a01..32a2ed3 100644
37574 --- a/drivers/tty/serial/kgdboc.c
37575 +++ b/drivers/tty/serial/kgdboc.c
37576 @@ -24,8 +24,9 @@
37577 #define MAX_CONFIG_LEN 40
37578
37579 static struct kgdb_io kgdboc_io_ops;
37580 +static struct kgdb_io kgdboc_io_ops_console;
37581
37582 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37583 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37584 static int configured = -1;
37585
37586 static char config[MAX_CONFIG_LEN];
37587 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37588 kgdboc_unregister_kbd();
37589 if (configured == 1)
37590 kgdb_unregister_io_module(&kgdboc_io_ops);
37591 + else if (configured == 2)
37592 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37593 }
37594
37595 static int configure_kgdboc(void)
37596 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37597 int err;
37598 char *cptr = config;
37599 struct console *cons;
37600 + int is_console = 0;
37601
37602 err = kgdboc_option_setup(config);
37603 if (err || !strlen(config) || isspace(config[0]))
37604 goto noconfig;
37605
37606 err = -ENODEV;
37607 - kgdboc_io_ops.is_console = 0;
37608 kgdb_tty_driver = NULL;
37609
37610 kgdboc_use_kms = 0;
37611 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37612 int idx;
37613 if (cons->device && cons->device(cons, &idx) == p &&
37614 idx == tty_line) {
37615 - kgdboc_io_ops.is_console = 1;
37616 + is_console = 1;
37617 break;
37618 }
37619 cons = cons->next;
37620 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37621 kgdb_tty_line = tty_line;
37622
37623 do_register:
37624 - err = kgdb_register_io_module(&kgdboc_io_ops);
37625 + if (is_console) {
37626 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37627 + configured = 2;
37628 + } else {
37629 + err = kgdb_register_io_module(&kgdboc_io_ops);
37630 + configured = 1;
37631 + }
37632 if (err)
37633 goto noconfig;
37634
37635 - configured = 1;
37636 -
37637 return 0;
37638
37639 noconfig:
37640 @@ -213,7 +220,7 @@ noconfig:
37641 static int __init init_kgdboc(void)
37642 {
37643 /* Already configured? */
37644 - if (configured == 1)
37645 + if (configured >= 1)
37646 return 0;
37647
37648 return configure_kgdboc();
37649 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37650 if (config[len - 1] == '\n')
37651 config[len - 1] = '\0';
37652
37653 - if (configured == 1)
37654 + if (configured >= 1)
37655 cleanup_kgdboc();
37656
37657 /* Go and configure with the new params. */
37658 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37659 .post_exception = kgdboc_post_exp_handler,
37660 };
37661
37662 +static struct kgdb_io kgdboc_io_ops_console = {
37663 + .name = "kgdboc",
37664 + .read_char = kgdboc_get_char,
37665 + .write_char = kgdboc_put_char,
37666 + .pre_exception = kgdboc_pre_exp_handler,
37667 + .post_exception = kgdboc_post_exp_handler,
37668 + .is_console = 1
37669 +};
37670 +
37671 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37672 /* This is only available if kgdboc is a built in for early debugging */
37673 static int __init kgdboc_early_init(char *opt)
37674 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
37675 index 05728894..b9d44c6 100644
37676 --- a/drivers/tty/sysrq.c
37677 +++ b/drivers/tty/sysrq.c
37678 @@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
37679 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
37680 size_t count, loff_t *ppos)
37681 {
37682 - if (count) {
37683 + if (count && capable(CAP_SYS_ADMIN)) {
37684 char c;
37685
37686 if (get_user(c, buf))
37687 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37688 index d939bd7..33d92cd 100644
37689 --- a/drivers/tty/tty_io.c
37690 +++ b/drivers/tty/tty_io.c
37691 @@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37692
37693 void tty_default_fops(struct file_operations *fops)
37694 {
37695 - *fops = tty_fops;
37696 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37697 }
37698
37699 /*
37700 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37701 index 24b95db..9c078d0 100644
37702 --- a/drivers/tty/tty_ldisc.c
37703 +++ b/drivers/tty/tty_ldisc.c
37704 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37705 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37706 struct tty_ldisc_ops *ldo = ld->ops;
37707
37708 - ldo->refcount--;
37709 + atomic_dec(&ldo->refcount);
37710 module_put(ldo->owner);
37711 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37712
37713 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37714 spin_lock_irqsave(&tty_ldisc_lock, flags);
37715 tty_ldiscs[disc] = new_ldisc;
37716 new_ldisc->num = disc;
37717 - new_ldisc->refcount = 0;
37718 + atomic_set(&new_ldisc->refcount, 0);
37719 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37720
37721 return ret;
37722 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
37723 return -EINVAL;
37724
37725 spin_lock_irqsave(&tty_ldisc_lock, flags);
37726 - if (tty_ldiscs[disc]->refcount)
37727 + if (atomic_read(&tty_ldiscs[disc]->refcount))
37728 ret = -EBUSY;
37729 else
37730 tty_ldiscs[disc] = NULL;
37731 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37732 if (ldops) {
37733 ret = ERR_PTR(-EAGAIN);
37734 if (try_module_get(ldops->owner)) {
37735 - ldops->refcount++;
37736 + atomic_inc(&ldops->refcount);
37737 ret = ldops;
37738 }
37739 }
37740 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37741 unsigned long flags;
37742
37743 spin_lock_irqsave(&tty_ldisc_lock, flags);
37744 - ldops->refcount--;
37745 + atomic_dec(&ldops->refcount);
37746 module_put(ldops->owner);
37747 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37748 }
37749 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37750 index 3b0c4e3..f98a992 100644
37751 --- a/drivers/tty/vt/keyboard.c
37752 +++ b/drivers/tty/vt/keyboard.c
37753 @@ -663,6 +663,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37754 kbd->kbdmode == VC_OFF) &&
37755 value != KVAL(K_SAK))
37756 return; /* SAK is allowed even in raw mode */
37757 +
37758 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37759 + {
37760 + void *func = fn_handler[value];
37761 + if (func == fn_show_state || func == fn_show_ptregs ||
37762 + func == fn_show_mem)
37763 + return;
37764 + }
37765 +#endif
37766 +
37767 fn_handler[value](vc);
37768 }
37769
37770 @@ -1812,9 +1822,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37771 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37772 return -EFAULT;
37773
37774 - if (!capable(CAP_SYS_TTY_CONFIG))
37775 - perm = 0;
37776 -
37777 switch (cmd) {
37778 case KDGKBENT:
37779 /* Ensure another thread doesn't free it under us */
37780 @@ -1829,6 +1836,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37781 spin_unlock_irqrestore(&kbd_event_lock, flags);
37782 return put_user(val, &user_kbe->kb_value);
37783 case KDSKBENT:
37784 + if (!capable(CAP_SYS_TTY_CONFIG))
37785 + perm = 0;
37786 +
37787 if (!perm)
37788 return -EPERM;
37789 if (!i && v == K_NOSUCHMAP) {
37790 @@ -1919,9 +1929,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37791 int i, j, k;
37792 int ret;
37793
37794 - if (!capable(CAP_SYS_TTY_CONFIG))
37795 - perm = 0;
37796 -
37797 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37798 if (!kbs) {
37799 ret = -ENOMEM;
37800 @@ -1955,6 +1962,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37801 kfree(kbs);
37802 return ((p && *p) ? -EOVERFLOW : 0);
37803 case KDSKBSENT:
37804 + if (!capable(CAP_SYS_TTY_CONFIG))
37805 + perm = 0;
37806 +
37807 if (!perm) {
37808 ret = -EPERM;
37809 goto reterr;
37810 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37811 index a783d53..cb30d94 100644
37812 --- a/drivers/uio/uio.c
37813 +++ b/drivers/uio/uio.c
37814 @@ -25,6 +25,7 @@
37815 #include <linux/kobject.h>
37816 #include <linux/cdev.h>
37817 #include <linux/uio_driver.h>
37818 +#include <asm/local.h>
37819
37820 #define UIO_MAX_DEVICES (1U << MINORBITS)
37821
37822 @@ -32,10 +33,10 @@ struct uio_device {
37823 struct module *owner;
37824 struct device *dev;
37825 int minor;
37826 - atomic_t event;
37827 + atomic_unchecked_t event;
37828 struct fasync_struct *async_queue;
37829 wait_queue_head_t wait;
37830 - int vma_count;
37831 + local_t vma_count;
37832 struct uio_info *info;
37833 struct kobject *map_dir;
37834 struct kobject *portio_dir;
37835 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37836 struct device_attribute *attr, char *buf)
37837 {
37838 struct uio_device *idev = dev_get_drvdata(dev);
37839 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37840 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37841 }
37842
37843 static struct device_attribute uio_class_attributes[] = {
37844 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37845 {
37846 struct uio_device *idev = info->uio_dev;
37847
37848 - atomic_inc(&idev->event);
37849 + atomic_inc_unchecked(&idev->event);
37850 wake_up_interruptible(&idev->wait);
37851 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37852 }
37853 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37854 }
37855
37856 listener->dev = idev;
37857 - listener->event_count = atomic_read(&idev->event);
37858 + listener->event_count = atomic_read_unchecked(&idev->event);
37859 filep->private_data = listener;
37860
37861 if (idev->info->open) {
37862 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37863 return -EIO;
37864
37865 poll_wait(filep, &idev->wait, wait);
37866 - if (listener->event_count != atomic_read(&idev->event))
37867 + if (listener->event_count != atomic_read_unchecked(&idev->event))
37868 return POLLIN | POLLRDNORM;
37869 return 0;
37870 }
37871 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37872 do {
37873 set_current_state(TASK_INTERRUPTIBLE);
37874
37875 - event_count = atomic_read(&idev->event);
37876 + event_count = atomic_read_unchecked(&idev->event);
37877 if (event_count != listener->event_count) {
37878 if (copy_to_user(buf, &event_count, count))
37879 retval = -EFAULT;
37880 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37881 static void uio_vma_open(struct vm_area_struct *vma)
37882 {
37883 struct uio_device *idev = vma->vm_private_data;
37884 - idev->vma_count++;
37885 + local_inc(&idev->vma_count);
37886 }
37887
37888 static void uio_vma_close(struct vm_area_struct *vma)
37889 {
37890 struct uio_device *idev = vma->vm_private_data;
37891 - idev->vma_count--;
37892 + local_dec(&idev->vma_count);
37893 }
37894
37895 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37896 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
37897 idev->owner = owner;
37898 idev->info = info;
37899 init_waitqueue_head(&idev->wait);
37900 - atomic_set(&idev->event, 0);
37901 + atomic_set_unchecked(&idev->event, 0);
37902
37903 ret = uio_get_minor(idev);
37904 if (ret)
37905 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
37906 index 98b89fe..aff824e 100644
37907 --- a/drivers/usb/atm/cxacru.c
37908 +++ b/drivers/usb/atm/cxacru.c
37909 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
37910 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37911 if (ret < 2)
37912 return -EINVAL;
37913 - if (index < 0 || index > 0x7f)
37914 + if (index > 0x7f)
37915 return -EINVAL;
37916 pos += tmp;
37917
37918 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
37919 index d3448ca..d2864ca 100644
37920 --- a/drivers/usb/atm/usbatm.c
37921 +++ b/drivers/usb/atm/usbatm.c
37922 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37923 if (printk_ratelimit())
37924 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37925 __func__, vpi, vci);
37926 - atomic_inc(&vcc->stats->rx_err);
37927 + atomic_inc_unchecked(&vcc->stats->rx_err);
37928 return;
37929 }
37930
37931 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37932 if (length > ATM_MAX_AAL5_PDU) {
37933 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37934 __func__, length, vcc);
37935 - atomic_inc(&vcc->stats->rx_err);
37936 + atomic_inc_unchecked(&vcc->stats->rx_err);
37937 goto out;
37938 }
37939
37940 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37941 if (sarb->len < pdu_length) {
37942 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37943 __func__, pdu_length, sarb->len, vcc);
37944 - atomic_inc(&vcc->stats->rx_err);
37945 + atomic_inc_unchecked(&vcc->stats->rx_err);
37946 goto out;
37947 }
37948
37949 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37950 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37951 __func__, vcc);
37952 - atomic_inc(&vcc->stats->rx_err);
37953 + atomic_inc_unchecked(&vcc->stats->rx_err);
37954 goto out;
37955 }
37956
37957 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37958 if (printk_ratelimit())
37959 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37960 __func__, length);
37961 - atomic_inc(&vcc->stats->rx_drop);
37962 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37963 goto out;
37964 }
37965
37966 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37967
37968 vcc->push(vcc, skb);
37969
37970 - atomic_inc(&vcc->stats->rx);
37971 + atomic_inc_unchecked(&vcc->stats->rx);
37972 out:
37973 skb_trim(sarb, 0);
37974 }
37975 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
37976 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37977
37978 usbatm_pop(vcc, skb);
37979 - atomic_inc(&vcc->stats->tx);
37980 + atomic_inc_unchecked(&vcc->stats->tx);
37981
37982 skb = skb_dequeue(&instance->sndqueue);
37983 }
37984 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
37985 if (!left--)
37986 return sprintf(page,
37987 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37988 - atomic_read(&atm_dev->stats.aal5.tx),
37989 - atomic_read(&atm_dev->stats.aal5.tx_err),
37990 - atomic_read(&atm_dev->stats.aal5.rx),
37991 - atomic_read(&atm_dev->stats.aal5.rx_err),
37992 - atomic_read(&atm_dev->stats.aal5.rx_drop));
37993 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37994 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37995 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37996 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37997 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37998
37999 if (!left--) {
38000 if (instance->disconnected)
38001 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38002 index d956965..4179a77 100644
38003 --- a/drivers/usb/core/devices.c
38004 +++ b/drivers/usb/core/devices.c
38005 @@ -126,7 +126,7 @@ static const char format_endpt[] =
38006 * time it gets called.
38007 */
38008 static struct device_connect_event {
38009 - atomic_t count;
38010 + atomic_unchecked_t count;
38011 wait_queue_head_t wait;
38012 } device_event = {
38013 .count = ATOMIC_INIT(1),
38014 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38015
38016 void usbfs_conn_disc_event(void)
38017 {
38018 - atomic_add(2, &device_event.count);
38019 + atomic_add_unchecked(2, &device_event.count);
38020 wake_up(&device_event.wait);
38021 }
38022
38023 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38024
38025 poll_wait(file, &device_event.wait, wait);
38026
38027 - event_count = atomic_read(&device_event.count);
38028 + event_count = atomic_read_unchecked(&device_event.count);
38029 if (file->f_version != event_count) {
38030 file->f_version = event_count;
38031 return POLLIN | POLLRDNORM;
38032 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38033 index 1fc8f12..20647c1 100644
38034 --- a/drivers/usb/early/ehci-dbgp.c
38035 +++ b/drivers/usb/early/ehci-dbgp.c
38036 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38037
38038 #ifdef CONFIG_KGDB
38039 static struct kgdb_io kgdbdbgp_io_ops;
38040 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38041 +static struct kgdb_io kgdbdbgp_io_ops_console;
38042 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38043 #else
38044 #define dbgp_kgdb_mode (0)
38045 #endif
38046 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38047 .write_char = kgdbdbgp_write_char,
38048 };
38049
38050 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38051 + .name = "kgdbdbgp",
38052 + .read_char = kgdbdbgp_read_char,
38053 + .write_char = kgdbdbgp_write_char,
38054 + .is_console = 1
38055 +};
38056 +
38057 static int kgdbdbgp_wait_time;
38058
38059 static int __init kgdbdbgp_parse_config(char *str)
38060 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38061 ptr++;
38062 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38063 }
38064 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38065 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38066 + if (early_dbgp_console.index != -1)
38067 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38068 + else
38069 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38070
38071 return 0;
38072 }
38073 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38074 index d6bea3e..60b250e 100644
38075 --- a/drivers/usb/wusbcore/wa-hc.h
38076 +++ b/drivers/usb/wusbcore/wa-hc.h
38077 @@ -192,7 +192,7 @@ struct wahc {
38078 struct list_head xfer_delayed_list;
38079 spinlock_t xfer_list_lock;
38080 struct work_struct xfer_work;
38081 - atomic_t xfer_id_count;
38082 + atomic_unchecked_t xfer_id_count;
38083 };
38084
38085
38086 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38087 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38088 spin_lock_init(&wa->xfer_list_lock);
38089 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38090 - atomic_set(&wa->xfer_id_count, 1);
38091 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38092 }
38093
38094 /**
38095 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38096 index 57c01ab..8a05959 100644
38097 --- a/drivers/usb/wusbcore/wa-xfer.c
38098 +++ b/drivers/usb/wusbcore/wa-xfer.c
38099 @@ -296,7 +296,7 @@ out:
38100 */
38101 static void wa_xfer_id_init(struct wa_xfer *xfer)
38102 {
38103 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38104 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38105 }
38106
38107 /*
38108 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38109 index 51e4c1e..9d87e2a 100644
38110 --- a/drivers/vhost/vhost.c
38111 +++ b/drivers/vhost/vhost.c
38112 @@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38113 return 0;
38114 }
38115
38116 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38117 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38118 {
38119 struct file *eventfp, *filep = NULL,
38120 *pollstart = NULL, *pollstop = NULL;
38121 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38122 index b0b2ac3..89a4399 100644
38123 --- a/drivers/video/aty/aty128fb.c
38124 +++ b/drivers/video/aty/aty128fb.c
38125 @@ -148,7 +148,7 @@ enum {
38126 };
38127
38128 /* Must match above enum */
38129 -static const char *r128_family[] __devinitdata = {
38130 +static const char *r128_family[] __devinitconst = {
38131 "AGP",
38132 "PCI",
38133 "PRO AGP",
38134 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38135 index 5c3960d..15cf8fc 100644
38136 --- a/drivers/video/fbcmap.c
38137 +++ b/drivers/video/fbcmap.c
38138 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38139 rc = -ENODEV;
38140 goto out;
38141 }
38142 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38143 - !info->fbops->fb_setcmap)) {
38144 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38145 rc = -EINVAL;
38146 goto out1;
38147 }
38148 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38149 index c6ce416..3b9b642 100644
38150 --- a/drivers/video/fbmem.c
38151 +++ b/drivers/video/fbmem.c
38152 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38153 image->dx += image->width + 8;
38154 }
38155 } else if (rotate == FB_ROTATE_UD) {
38156 - for (x = 0; x < num && image->dx >= 0; x++) {
38157 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38158 info->fbops->fb_imageblit(info, image);
38159 image->dx -= image->width + 8;
38160 }
38161 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38162 image->dy += image->height + 8;
38163 }
38164 } else if (rotate == FB_ROTATE_CCW) {
38165 - for (x = 0; x < num && image->dy >= 0; x++) {
38166 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38167 info->fbops->fb_imageblit(info, image);
38168 image->dy -= image->height + 8;
38169 }
38170 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38171 return -EFAULT;
38172 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38173 return -EINVAL;
38174 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38175 + if (con2fb.framebuffer >= FB_MAX)
38176 return -EINVAL;
38177 if (!registered_fb[con2fb.framebuffer])
38178 request_module("fb%d", con2fb.framebuffer);
38179 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38180 index 5a5d092..265c5ed 100644
38181 --- a/drivers/video/geode/gx1fb_core.c
38182 +++ b/drivers/video/geode/gx1fb_core.c
38183 @@ -29,7 +29,7 @@ static int crt_option = 1;
38184 static char panel_option[32] = "";
38185
38186 /* Modes relevant to the GX1 (taken from modedb.c) */
38187 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38188 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38189 /* 640x480-60 VESA */
38190 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38191 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38192 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38193 index 0fad23f..0e9afa4 100644
38194 --- a/drivers/video/gxt4500.c
38195 +++ b/drivers/video/gxt4500.c
38196 @@ -156,7 +156,7 @@ struct gxt4500_par {
38197 static char *mode_option;
38198
38199 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38200 -static const struct fb_videomode defaultmode __devinitdata = {
38201 +static const struct fb_videomode defaultmode __devinitconst = {
38202 .refresh = 60,
38203 .xres = 1280,
38204 .yres = 1024,
38205 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38206 return 0;
38207 }
38208
38209 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38210 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38211 .id = "IBM GXT4500P",
38212 .type = FB_TYPE_PACKED_PIXELS,
38213 .visual = FB_VISUAL_PSEUDOCOLOR,
38214 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38215 index 7672d2e..b56437f 100644
38216 --- a/drivers/video/i810/i810_accel.c
38217 +++ b/drivers/video/i810/i810_accel.c
38218 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38219 }
38220 }
38221 printk("ringbuffer lockup!!!\n");
38222 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38223 i810_report_error(mmio);
38224 par->dev_flags |= LOCKUP;
38225 info->pixmap.scan_align = 1;
38226 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38227 index b83f361..2b05a91 100644
38228 --- a/drivers/video/i810/i810_main.c
38229 +++ b/drivers/video/i810/i810_main.c
38230 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38231 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38232
38233 /* PCI */
38234 -static const char *i810_pci_list[] __devinitdata = {
38235 +static const char *i810_pci_list[] __devinitconst = {
38236 "Intel(R) 810 Framebuffer Device" ,
38237 "Intel(R) 810-DC100 Framebuffer Device" ,
38238 "Intel(R) 810E Framebuffer Device" ,
38239 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38240 index de36693..3c63fc2 100644
38241 --- a/drivers/video/jz4740_fb.c
38242 +++ b/drivers/video/jz4740_fb.c
38243 @@ -136,7 +136,7 @@ struct jzfb {
38244 uint32_t pseudo_palette[16];
38245 };
38246
38247 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38248 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38249 .id = "JZ4740 FB",
38250 .type = FB_TYPE_PACKED_PIXELS,
38251 .visual = FB_VISUAL_TRUECOLOR,
38252 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38253 index 3c14e43..eafa544 100644
38254 --- a/drivers/video/logo/logo_linux_clut224.ppm
38255 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38256 @@ -1,1604 +1,1123 @@
38257 P3
38258 -# Standard 224-color Linux logo
38259 80 80
38260 255
38261 - 0 0 0 0 0 0 0 0 0 0 0 0
38262 - 0 0 0 0 0 0 0 0 0 0 0 0
38263 - 0 0 0 0 0 0 0 0 0 0 0 0
38264 - 0 0 0 0 0 0 0 0 0 0 0 0
38265 - 0 0 0 0 0 0 0 0 0 0 0 0
38266 - 0 0 0 0 0 0 0 0 0 0 0 0
38267 - 0 0 0 0 0 0 0 0 0 0 0 0
38268 - 0 0 0 0 0 0 0 0 0 0 0 0
38269 - 0 0 0 0 0 0 0 0 0 0 0 0
38270 - 6 6 6 6 6 6 10 10 10 10 10 10
38271 - 10 10 10 6 6 6 6 6 6 6 6 6
38272 - 0 0 0 0 0 0 0 0 0 0 0 0
38273 - 0 0 0 0 0 0 0 0 0 0 0 0
38274 - 0 0 0 0 0 0 0 0 0 0 0 0
38275 - 0 0 0 0 0 0 0 0 0 0 0 0
38276 - 0 0 0 0 0 0 0 0 0 0 0 0
38277 - 0 0 0 0 0 0 0 0 0 0 0 0
38278 - 0 0 0 0 0 0 0 0 0 0 0 0
38279 - 0 0 0 0 0 0 0 0 0 0 0 0
38280 - 0 0 0 0 0 0 0 0 0 0 0 0
38281 - 0 0 0 0 0 0 0 0 0 0 0 0
38282 - 0 0 0 0 0 0 0 0 0 0 0 0
38283 - 0 0 0 0 0 0 0 0 0 0 0 0
38284 - 0 0 0 0 0 0 0 0 0 0 0 0
38285 - 0 0 0 0 0 0 0 0 0 0 0 0
38286 - 0 0 0 0 0 0 0 0 0 0 0 0
38287 - 0 0 0 0 0 0 0 0 0 0 0 0
38288 - 0 0 0 0 0 0 0 0 0 0 0 0
38289 - 0 0 0 6 6 6 10 10 10 14 14 14
38290 - 22 22 22 26 26 26 30 30 30 34 34 34
38291 - 30 30 30 30 30 30 26 26 26 18 18 18
38292 - 14 14 14 10 10 10 6 6 6 0 0 0
38293 - 0 0 0 0 0 0 0 0 0 0 0 0
38294 - 0 0 0 0 0 0 0 0 0 0 0 0
38295 - 0 0 0 0 0 0 0 0 0 0 0 0
38296 - 0 0 0 0 0 0 0 0 0 0 0 0
38297 - 0 0 0 0 0 0 0 0 0 0 0 0
38298 - 0 0 0 0 0 0 0 0 0 0 0 0
38299 - 0 0 0 0 0 0 0 0 0 0 0 0
38300 - 0 0 0 0 0 0 0 0 0 0 0 0
38301 - 0 0 0 0 0 0 0 0 0 0 0 0
38302 - 0 0 0 0 0 1 0 0 1 0 0 0
38303 - 0 0 0 0 0 0 0 0 0 0 0 0
38304 - 0 0 0 0 0 0 0 0 0 0 0 0
38305 - 0 0 0 0 0 0 0 0 0 0 0 0
38306 - 0 0 0 0 0 0 0 0 0 0 0 0
38307 - 0 0 0 0 0 0 0 0 0 0 0 0
38308 - 0 0 0 0 0 0 0 0 0 0 0 0
38309 - 6 6 6 14 14 14 26 26 26 42 42 42
38310 - 54 54 54 66 66 66 78 78 78 78 78 78
38311 - 78 78 78 74 74 74 66 66 66 54 54 54
38312 - 42 42 42 26 26 26 18 18 18 10 10 10
38313 - 6 6 6 0 0 0 0 0 0 0 0 0
38314 - 0 0 0 0 0 0 0 0 0 0 0 0
38315 - 0 0 0 0 0 0 0 0 0 0 0 0
38316 - 0 0 0 0 0 0 0 0 0 0 0 0
38317 - 0 0 0 0 0 0 0 0 0 0 0 0
38318 - 0 0 0 0 0 0 0 0 0 0 0 0
38319 - 0 0 0 0 0 0 0 0 0 0 0 0
38320 - 0 0 0 0 0 0 0 0 0 0 0 0
38321 - 0 0 0 0 0 0 0 0 0 0 0 0
38322 - 0 0 1 0 0 0 0 0 0 0 0 0
38323 - 0 0 0 0 0 0 0 0 0 0 0 0
38324 - 0 0 0 0 0 0 0 0 0 0 0 0
38325 - 0 0 0 0 0 0 0 0 0 0 0 0
38326 - 0 0 0 0 0 0 0 0 0 0 0 0
38327 - 0 0 0 0 0 0 0 0 0 0 0 0
38328 - 0 0 0 0 0 0 0 0 0 10 10 10
38329 - 22 22 22 42 42 42 66 66 66 86 86 86
38330 - 66 66 66 38 38 38 38 38 38 22 22 22
38331 - 26 26 26 34 34 34 54 54 54 66 66 66
38332 - 86 86 86 70 70 70 46 46 46 26 26 26
38333 - 14 14 14 6 6 6 0 0 0 0 0 0
38334 - 0 0 0 0 0 0 0 0 0 0 0 0
38335 - 0 0 0 0 0 0 0 0 0 0 0 0
38336 - 0 0 0 0 0 0 0 0 0 0 0 0
38337 - 0 0 0 0 0 0 0 0 0 0 0 0
38338 - 0 0 0 0 0 0 0 0 0 0 0 0
38339 - 0 0 0 0 0 0 0 0 0 0 0 0
38340 - 0 0 0 0 0 0 0 0 0 0 0 0
38341 - 0 0 0 0 0 0 0 0 0 0 0 0
38342 - 0 0 1 0 0 1 0 0 1 0 0 0
38343 - 0 0 0 0 0 0 0 0 0 0 0 0
38344 - 0 0 0 0 0 0 0 0 0 0 0 0
38345 - 0 0 0 0 0 0 0 0 0 0 0 0
38346 - 0 0 0 0 0 0 0 0 0 0 0 0
38347 - 0 0 0 0 0 0 0 0 0 0 0 0
38348 - 0 0 0 0 0 0 10 10 10 26 26 26
38349 - 50 50 50 82 82 82 58 58 58 6 6 6
38350 - 2 2 6 2 2 6 2 2 6 2 2 6
38351 - 2 2 6 2 2 6 2 2 6 2 2 6
38352 - 6 6 6 54 54 54 86 86 86 66 66 66
38353 - 38 38 38 18 18 18 6 6 6 0 0 0
38354 - 0 0 0 0 0 0 0 0 0 0 0 0
38355 - 0 0 0 0 0 0 0 0 0 0 0 0
38356 - 0 0 0 0 0 0 0 0 0 0 0 0
38357 - 0 0 0 0 0 0 0 0 0 0 0 0
38358 - 0 0 0 0 0 0 0 0 0 0 0 0
38359 - 0 0 0 0 0 0 0 0 0 0 0 0
38360 - 0 0 0 0 0 0 0 0 0 0 0 0
38361 - 0 0 0 0 0 0 0 0 0 0 0 0
38362 - 0 0 0 0 0 0 0 0 0 0 0 0
38363 - 0 0 0 0 0 0 0 0 0 0 0 0
38364 - 0 0 0 0 0 0 0 0 0 0 0 0
38365 - 0 0 0 0 0 0 0 0 0 0 0 0
38366 - 0 0 0 0 0 0 0 0 0 0 0 0
38367 - 0 0 0 0 0 0 0 0 0 0 0 0
38368 - 0 0 0 6 6 6 22 22 22 50 50 50
38369 - 78 78 78 34 34 34 2 2 6 2 2 6
38370 - 2 2 6 2 2 6 2 2 6 2 2 6
38371 - 2 2 6 2 2 6 2 2 6 2 2 6
38372 - 2 2 6 2 2 6 6 6 6 70 70 70
38373 - 78 78 78 46 46 46 22 22 22 6 6 6
38374 - 0 0 0 0 0 0 0 0 0 0 0 0
38375 - 0 0 0 0 0 0 0 0 0 0 0 0
38376 - 0 0 0 0 0 0 0 0 0 0 0 0
38377 - 0 0 0 0 0 0 0 0 0 0 0 0
38378 - 0 0 0 0 0 0 0 0 0 0 0 0
38379 - 0 0 0 0 0 0 0 0 0 0 0 0
38380 - 0 0 0 0 0 0 0 0 0 0 0 0
38381 - 0 0 0 0 0 0 0 0 0 0 0 0
38382 - 0 0 1 0 0 1 0 0 1 0 0 0
38383 - 0 0 0 0 0 0 0 0 0 0 0 0
38384 - 0 0 0 0 0 0 0 0 0 0 0 0
38385 - 0 0 0 0 0 0 0 0 0 0 0 0
38386 - 0 0 0 0 0 0 0 0 0 0 0 0
38387 - 0 0 0 0 0 0 0 0 0 0 0 0
38388 - 6 6 6 18 18 18 42 42 42 82 82 82
38389 - 26 26 26 2 2 6 2 2 6 2 2 6
38390 - 2 2 6 2 2 6 2 2 6 2 2 6
38391 - 2 2 6 2 2 6 2 2 6 14 14 14
38392 - 46 46 46 34 34 34 6 6 6 2 2 6
38393 - 42 42 42 78 78 78 42 42 42 18 18 18
38394 - 6 6 6 0 0 0 0 0 0 0 0 0
38395 - 0 0 0 0 0 0 0 0 0 0 0 0
38396 - 0 0 0 0 0 0 0 0 0 0 0 0
38397 - 0 0 0 0 0 0 0 0 0 0 0 0
38398 - 0 0 0 0 0 0 0 0 0 0 0 0
38399 - 0 0 0 0 0 0 0 0 0 0 0 0
38400 - 0 0 0 0 0 0 0 0 0 0 0 0
38401 - 0 0 0 0 0 0 0 0 0 0 0 0
38402 - 0 0 1 0 0 0 0 0 1 0 0 0
38403 - 0 0 0 0 0 0 0 0 0 0 0 0
38404 - 0 0 0 0 0 0 0 0 0 0 0 0
38405 - 0 0 0 0 0 0 0 0 0 0 0 0
38406 - 0 0 0 0 0 0 0 0 0 0 0 0
38407 - 0 0 0 0 0 0 0 0 0 0 0 0
38408 - 10 10 10 30 30 30 66 66 66 58 58 58
38409 - 2 2 6 2 2 6 2 2 6 2 2 6
38410 - 2 2 6 2 2 6 2 2 6 2 2 6
38411 - 2 2 6 2 2 6 2 2 6 26 26 26
38412 - 86 86 86 101 101 101 46 46 46 10 10 10
38413 - 2 2 6 58 58 58 70 70 70 34 34 34
38414 - 10 10 10 0 0 0 0 0 0 0 0 0
38415 - 0 0 0 0 0 0 0 0 0 0 0 0
38416 - 0 0 0 0 0 0 0 0 0 0 0 0
38417 - 0 0 0 0 0 0 0 0 0 0 0 0
38418 - 0 0 0 0 0 0 0 0 0 0 0 0
38419 - 0 0 0 0 0 0 0 0 0 0 0 0
38420 - 0 0 0 0 0 0 0 0 0 0 0 0
38421 - 0 0 0 0 0 0 0 0 0 0 0 0
38422 - 0 0 1 0 0 1 0 0 1 0 0 0
38423 - 0 0 0 0 0 0 0 0 0 0 0 0
38424 - 0 0 0 0 0 0 0 0 0 0 0 0
38425 - 0 0 0 0 0 0 0 0 0 0 0 0
38426 - 0 0 0 0 0 0 0 0 0 0 0 0
38427 - 0 0 0 0 0 0 0 0 0 0 0 0
38428 - 14 14 14 42 42 42 86 86 86 10 10 10
38429 - 2 2 6 2 2 6 2 2 6 2 2 6
38430 - 2 2 6 2 2 6 2 2 6 2 2 6
38431 - 2 2 6 2 2 6 2 2 6 30 30 30
38432 - 94 94 94 94 94 94 58 58 58 26 26 26
38433 - 2 2 6 6 6 6 78 78 78 54 54 54
38434 - 22 22 22 6 6 6 0 0 0 0 0 0
38435 - 0 0 0 0 0 0 0 0 0 0 0 0
38436 - 0 0 0 0 0 0 0 0 0 0 0 0
38437 - 0 0 0 0 0 0 0 0 0 0 0 0
38438 - 0 0 0 0 0 0 0 0 0 0 0 0
38439 - 0 0 0 0 0 0 0 0 0 0 0 0
38440 - 0 0 0 0 0 0 0 0 0 0 0 0
38441 - 0 0 0 0 0 0 0 0 0 0 0 0
38442 - 0 0 0 0 0 0 0 0 0 0 0 0
38443 - 0 0 0 0 0 0 0 0 0 0 0 0
38444 - 0 0 0 0 0 0 0 0 0 0 0 0
38445 - 0 0 0 0 0 0 0 0 0 0 0 0
38446 - 0 0 0 0 0 0 0 0 0 0 0 0
38447 - 0 0 0 0 0 0 0 0 0 6 6 6
38448 - 22 22 22 62 62 62 62 62 62 2 2 6
38449 - 2 2 6 2 2 6 2 2 6 2 2 6
38450 - 2 2 6 2 2 6 2 2 6 2 2 6
38451 - 2 2 6 2 2 6 2 2 6 26 26 26
38452 - 54 54 54 38 38 38 18 18 18 10 10 10
38453 - 2 2 6 2 2 6 34 34 34 82 82 82
38454 - 38 38 38 14 14 14 0 0 0 0 0 0
38455 - 0 0 0 0 0 0 0 0 0 0 0 0
38456 - 0 0 0 0 0 0 0 0 0 0 0 0
38457 - 0 0 0 0 0 0 0 0 0 0 0 0
38458 - 0 0 0 0 0 0 0 0 0 0 0 0
38459 - 0 0 0 0 0 0 0 0 0 0 0 0
38460 - 0 0 0 0 0 0 0 0 0 0 0 0
38461 - 0 0 0 0 0 0 0 0 0 0 0 0
38462 - 0 0 0 0 0 1 0 0 1 0 0 0
38463 - 0 0 0 0 0 0 0 0 0 0 0 0
38464 - 0 0 0 0 0 0 0 0 0 0 0 0
38465 - 0 0 0 0 0 0 0 0 0 0 0 0
38466 - 0 0 0 0 0 0 0 0 0 0 0 0
38467 - 0 0 0 0 0 0 0 0 0 6 6 6
38468 - 30 30 30 78 78 78 30 30 30 2 2 6
38469 - 2 2 6 2 2 6 2 2 6 2 2 6
38470 - 2 2 6 2 2 6 2 2 6 2 2 6
38471 - 2 2 6 2 2 6 2 2 6 10 10 10
38472 - 10 10 10 2 2 6 2 2 6 2 2 6
38473 - 2 2 6 2 2 6 2 2 6 78 78 78
38474 - 50 50 50 18 18 18 6 6 6 0 0 0
38475 - 0 0 0 0 0 0 0 0 0 0 0 0
38476 - 0 0 0 0 0 0 0 0 0 0 0 0
38477 - 0 0 0 0 0 0 0 0 0 0 0 0
38478 - 0 0 0 0 0 0 0 0 0 0 0 0
38479 - 0 0 0 0 0 0 0 0 0 0 0 0
38480 - 0 0 0 0 0 0 0 0 0 0 0 0
38481 - 0 0 0 0 0 0 0 0 0 0 0 0
38482 - 0 0 1 0 0 0 0 0 0 0 0 0
38483 - 0 0 0 0 0 0 0 0 0 0 0 0
38484 - 0 0 0 0 0 0 0 0 0 0 0 0
38485 - 0 0 0 0 0 0 0 0 0 0 0 0
38486 - 0 0 0 0 0 0 0 0 0 0 0 0
38487 - 0 0 0 0 0 0 0 0 0 10 10 10
38488 - 38 38 38 86 86 86 14 14 14 2 2 6
38489 - 2 2 6 2 2 6 2 2 6 2 2 6
38490 - 2 2 6 2 2 6 2 2 6 2 2 6
38491 - 2 2 6 2 2 6 2 2 6 2 2 6
38492 - 2 2 6 2 2 6 2 2 6 2 2 6
38493 - 2 2 6 2 2 6 2 2 6 54 54 54
38494 - 66 66 66 26 26 26 6 6 6 0 0 0
38495 - 0 0 0 0 0 0 0 0 0 0 0 0
38496 - 0 0 0 0 0 0 0 0 0 0 0 0
38497 - 0 0 0 0 0 0 0 0 0 0 0 0
38498 - 0 0 0 0 0 0 0 0 0 0 0 0
38499 - 0 0 0 0 0 0 0 0 0 0 0 0
38500 - 0 0 0 0 0 0 0 0 0 0 0 0
38501 - 0 0 0 0 0 0 0 0 0 0 0 0
38502 - 0 0 0 0 0 1 0 0 1 0 0 0
38503 - 0 0 0 0 0 0 0 0 0 0 0 0
38504 - 0 0 0 0 0 0 0 0 0 0 0 0
38505 - 0 0 0 0 0 0 0 0 0 0 0 0
38506 - 0 0 0 0 0 0 0 0 0 0 0 0
38507 - 0 0 0 0 0 0 0 0 0 14 14 14
38508 - 42 42 42 82 82 82 2 2 6 2 2 6
38509 - 2 2 6 6 6 6 10 10 10 2 2 6
38510 - 2 2 6 2 2 6 2 2 6 2 2 6
38511 - 2 2 6 2 2 6 2 2 6 6 6 6
38512 - 14 14 14 10 10 10 2 2 6 2 2 6
38513 - 2 2 6 2 2 6 2 2 6 18 18 18
38514 - 82 82 82 34 34 34 10 10 10 0 0 0
38515 - 0 0 0 0 0 0 0 0 0 0 0 0
38516 - 0 0 0 0 0 0 0 0 0 0 0 0
38517 - 0 0 0 0 0 0 0 0 0 0 0 0
38518 - 0 0 0 0 0 0 0 0 0 0 0 0
38519 - 0 0 0 0 0 0 0 0 0 0 0 0
38520 - 0 0 0 0 0 0 0 0 0 0 0 0
38521 - 0 0 0 0 0 0 0 0 0 0 0 0
38522 - 0 0 1 0 0 0 0 0 0 0 0 0
38523 - 0 0 0 0 0 0 0 0 0 0 0 0
38524 - 0 0 0 0 0 0 0 0 0 0 0 0
38525 - 0 0 0 0 0 0 0 0 0 0 0 0
38526 - 0 0 0 0 0 0 0 0 0 0 0 0
38527 - 0 0 0 0 0 0 0 0 0 14 14 14
38528 - 46 46 46 86 86 86 2 2 6 2 2 6
38529 - 6 6 6 6 6 6 22 22 22 34 34 34
38530 - 6 6 6 2 2 6 2 2 6 2 2 6
38531 - 2 2 6 2 2 6 18 18 18 34 34 34
38532 - 10 10 10 50 50 50 22 22 22 2 2 6
38533 - 2 2 6 2 2 6 2 2 6 10 10 10
38534 - 86 86 86 42 42 42 14 14 14 0 0 0
38535 - 0 0 0 0 0 0 0 0 0 0 0 0
38536 - 0 0 0 0 0 0 0 0 0 0 0 0
38537 - 0 0 0 0 0 0 0 0 0 0 0 0
38538 - 0 0 0 0 0 0 0 0 0 0 0 0
38539 - 0 0 0 0 0 0 0 0 0 0 0 0
38540 - 0 0 0 0 0 0 0 0 0 0 0 0
38541 - 0 0 0 0 0 0 0 0 0 0 0 0
38542 - 0 0 1 0 0 1 0 0 1 0 0 0
38543 - 0 0 0 0 0 0 0 0 0 0 0 0
38544 - 0 0 0 0 0 0 0 0 0 0 0 0
38545 - 0 0 0 0 0 0 0 0 0 0 0 0
38546 - 0 0 0 0 0 0 0 0 0 0 0 0
38547 - 0 0 0 0 0 0 0 0 0 14 14 14
38548 - 46 46 46 86 86 86 2 2 6 2 2 6
38549 - 38 38 38 116 116 116 94 94 94 22 22 22
38550 - 22 22 22 2 2 6 2 2 6 2 2 6
38551 - 14 14 14 86 86 86 138 138 138 162 162 162
38552 -154 154 154 38 38 38 26 26 26 6 6 6
38553 - 2 2 6 2 2 6 2 2 6 2 2 6
38554 - 86 86 86 46 46 46 14 14 14 0 0 0
38555 - 0 0 0 0 0 0 0 0 0 0 0 0
38556 - 0 0 0 0 0 0 0 0 0 0 0 0
38557 - 0 0 0 0 0 0 0 0 0 0 0 0
38558 - 0 0 0 0 0 0 0 0 0 0 0 0
38559 - 0 0 0 0 0 0 0 0 0 0 0 0
38560 - 0 0 0 0 0 0 0 0 0 0 0 0
38561 - 0 0 0 0 0 0 0 0 0 0 0 0
38562 - 0 0 0 0 0 0 0 0 0 0 0 0
38563 - 0 0 0 0 0 0 0 0 0 0 0 0
38564 - 0 0 0 0 0 0 0 0 0 0 0 0
38565 - 0 0 0 0 0 0 0 0 0 0 0 0
38566 - 0 0 0 0 0 0 0 0 0 0 0 0
38567 - 0 0 0 0 0 0 0 0 0 14 14 14
38568 - 46 46 46 86 86 86 2 2 6 14 14 14
38569 -134 134 134 198 198 198 195 195 195 116 116 116
38570 - 10 10 10 2 2 6 2 2 6 6 6 6
38571 -101 98 89 187 187 187 210 210 210 218 218 218
38572 -214 214 214 134 134 134 14 14 14 6 6 6
38573 - 2 2 6 2 2 6 2 2 6 2 2 6
38574 - 86 86 86 50 50 50 18 18 18 6 6 6
38575 - 0 0 0 0 0 0 0 0 0 0 0 0
38576 - 0 0 0 0 0 0 0 0 0 0 0 0
38577 - 0 0 0 0 0 0 0 0 0 0 0 0
38578 - 0 0 0 0 0 0 0 0 0 0 0 0
38579 - 0 0 0 0 0 0 0 0 0 0 0 0
38580 - 0 0 0 0 0 0 0 0 0 0 0 0
38581 - 0 0 0 0 0 0 0 0 1 0 0 0
38582 - 0 0 1 0 0 1 0 0 1 0 0 0
38583 - 0 0 0 0 0 0 0 0 0 0 0 0
38584 - 0 0 0 0 0 0 0 0 0 0 0 0
38585 - 0 0 0 0 0 0 0 0 0 0 0 0
38586 - 0 0 0 0 0 0 0 0 0 0 0 0
38587 - 0 0 0 0 0 0 0 0 0 14 14 14
38588 - 46 46 46 86 86 86 2 2 6 54 54 54
38589 -218 218 218 195 195 195 226 226 226 246 246 246
38590 - 58 58 58 2 2 6 2 2 6 30 30 30
38591 -210 210 210 253 253 253 174 174 174 123 123 123
38592 -221 221 221 234 234 234 74 74 74 2 2 6
38593 - 2 2 6 2 2 6 2 2 6 2 2 6
38594 - 70 70 70 58 58 58 22 22 22 6 6 6
38595 - 0 0 0 0 0 0 0 0 0 0 0 0
38596 - 0 0 0 0 0 0 0 0 0 0 0 0
38597 - 0 0 0 0 0 0 0 0 0 0 0 0
38598 - 0 0 0 0 0 0 0 0 0 0 0 0
38599 - 0 0 0 0 0 0 0 0 0 0 0 0
38600 - 0 0 0 0 0 0 0 0 0 0 0 0
38601 - 0 0 0 0 0 0 0 0 0 0 0 0
38602 - 0 0 0 0 0 0 0 0 0 0 0 0
38603 - 0 0 0 0 0 0 0 0 0 0 0 0
38604 - 0 0 0 0 0 0 0 0 0 0 0 0
38605 - 0 0 0 0 0 0 0 0 0 0 0 0
38606 - 0 0 0 0 0 0 0 0 0 0 0 0
38607 - 0 0 0 0 0 0 0 0 0 14 14 14
38608 - 46 46 46 82 82 82 2 2 6 106 106 106
38609 -170 170 170 26 26 26 86 86 86 226 226 226
38610 -123 123 123 10 10 10 14 14 14 46 46 46
38611 -231 231 231 190 190 190 6 6 6 70 70 70
38612 - 90 90 90 238 238 238 158 158 158 2 2 6
38613 - 2 2 6 2 2 6 2 2 6 2 2 6
38614 - 70 70 70 58 58 58 22 22 22 6 6 6
38615 - 0 0 0 0 0 0 0 0 0 0 0 0
38616 - 0 0 0 0 0 0 0 0 0 0 0 0
38617 - 0 0 0 0 0 0 0 0 0 0 0 0
38618 - 0 0 0 0 0 0 0 0 0 0 0 0
38619 - 0 0 0 0 0 0 0 0 0 0 0 0
38620 - 0 0 0 0 0 0 0 0 0 0 0 0
38621 - 0 0 0 0 0 0 0 0 1 0 0 0
38622 - 0 0 1 0 0 1 0 0 1 0 0 0
38623 - 0 0 0 0 0 0 0 0 0 0 0 0
38624 - 0 0 0 0 0 0 0 0 0 0 0 0
38625 - 0 0 0 0 0 0 0 0 0 0 0 0
38626 - 0 0 0 0 0 0 0 0 0 0 0 0
38627 - 0 0 0 0 0 0 0 0 0 14 14 14
38628 - 42 42 42 86 86 86 6 6 6 116 116 116
38629 -106 106 106 6 6 6 70 70 70 149 149 149
38630 -128 128 128 18 18 18 38 38 38 54 54 54
38631 -221 221 221 106 106 106 2 2 6 14 14 14
38632 - 46 46 46 190 190 190 198 198 198 2 2 6
38633 - 2 2 6 2 2 6 2 2 6 2 2 6
38634 - 74 74 74 62 62 62 22 22 22 6 6 6
38635 - 0 0 0 0 0 0 0 0 0 0 0 0
38636 - 0 0 0 0 0 0 0 0 0 0 0 0
38637 - 0 0 0 0 0 0 0 0 0 0 0 0
38638 - 0 0 0 0 0 0 0 0 0 0 0 0
38639 - 0 0 0 0 0 0 0 0 0 0 0 0
38640 - 0 0 0 0 0 0 0 0 0 0 0 0
38641 - 0 0 0 0 0 0 0 0 1 0 0 0
38642 - 0 0 1 0 0 0 0 0 1 0 0 0
38643 - 0 0 0 0 0 0 0 0 0 0 0 0
38644 - 0 0 0 0 0 0 0 0 0 0 0 0
38645 - 0 0 0 0 0 0 0 0 0 0 0 0
38646 - 0 0 0 0 0 0 0 0 0 0 0 0
38647 - 0 0 0 0 0 0 0 0 0 14 14 14
38648 - 42 42 42 94 94 94 14 14 14 101 101 101
38649 -128 128 128 2 2 6 18 18 18 116 116 116
38650 -118 98 46 121 92 8 121 92 8 98 78 10
38651 -162 162 162 106 106 106 2 2 6 2 2 6
38652 - 2 2 6 195 195 195 195 195 195 6 6 6
38653 - 2 2 6 2 2 6 2 2 6 2 2 6
38654 - 74 74 74 62 62 62 22 22 22 6 6 6
38655 - 0 0 0 0 0 0 0 0 0 0 0 0
38656 - 0 0 0 0 0 0 0 0 0 0 0 0
38657 - 0 0 0 0 0 0 0 0 0 0 0 0
38658 - 0 0 0 0 0 0 0 0 0 0 0 0
38659 - 0 0 0 0 0 0 0 0 0 0 0 0
38660 - 0 0 0 0 0 0 0 0 0 0 0 0
38661 - 0 0 0 0 0 0 0 0 1 0 0 1
38662 - 0 0 1 0 0 0 0 0 1 0 0 0
38663 - 0 0 0 0 0 0 0 0 0 0 0 0
38664 - 0 0 0 0 0 0 0 0 0 0 0 0
38665 - 0 0 0 0 0 0 0 0 0 0 0 0
38666 - 0 0 0 0 0 0 0 0 0 0 0 0
38667 - 0 0 0 0 0 0 0 0 0 10 10 10
38668 - 38 38 38 90 90 90 14 14 14 58 58 58
38669 -210 210 210 26 26 26 54 38 6 154 114 10
38670 -226 170 11 236 186 11 225 175 15 184 144 12
38671 -215 174 15 175 146 61 37 26 9 2 2 6
38672 - 70 70 70 246 246 246 138 138 138 2 2 6
38673 - 2 2 6 2 2 6 2 2 6 2 2 6
38674 - 70 70 70 66 66 66 26 26 26 6 6 6
38675 - 0 0 0 0 0 0 0 0 0 0 0 0
38676 - 0 0 0 0 0 0 0 0 0 0 0 0
38677 - 0 0 0 0 0 0 0 0 0 0 0 0
38678 - 0 0 0 0 0 0 0 0 0 0 0 0
38679 - 0 0 0 0 0 0 0 0 0 0 0 0
38680 - 0 0 0 0 0 0 0 0 0 0 0 0
38681 - 0 0 0 0 0 0 0 0 0 0 0 0
38682 - 0 0 0 0 0 0 0 0 0 0 0 0
38683 - 0 0 0 0 0 0 0 0 0 0 0 0
38684 - 0 0 0 0 0 0 0 0 0 0 0 0
38685 - 0 0 0 0 0 0 0 0 0 0 0 0
38686 - 0 0 0 0 0 0 0 0 0 0 0 0
38687 - 0 0 0 0 0 0 0 0 0 10 10 10
38688 - 38 38 38 86 86 86 14 14 14 10 10 10
38689 -195 195 195 188 164 115 192 133 9 225 175 15
38690 -239 182 13 234 190 10 232 195 16 232 200 30
38691 -245 207 45 241 208 19 232 195 16 184 144 12
38692 -218 194 134 211 206 186 42 42 42 2 2 6
38693 - 2 2 6 2 2 6 2 2 6 2 2 6
38694 - 50 50 50 74 74 74 30 30 30 6 6 6
38695 - 0 0 0 0 0 0 0 0 0 0 0 0
38696 - 0 0 0 0 0 0 0 0 0 0 0 0
38697 - 0 0 0 0 0 0 0 0 0 0 0 0
38698 - 0 0 0 0 0 0 0 0 0 0 0 0
38699 - 0 0 0 0 0 0 0 0 0 0 0 0
38700 - 0 0 0 0 0 0 0 0 0 0 0 0
38701 - 0 0 0 0 0 0 0 0 0 0 0 0
38702 - 0 0 0 0 0 0 0 0 0 0 0 0
38703 - 0 0 0 0 0 0 0 0 0 0 0 0
38704 - 0 0 0 0 0 0 0 0 0 0 0 0
38705 - 0 0 0 0 0 0 0 0 0 0 0 0
38706 - 0 0 0 0 0 0 0 0 0 0 0 0
38707 - 0 0 0 0 0 0 0 0 0 10 10 10
38708 - 34 34 34 86 86 86 14 14 14 2 2 6
38709 -121 87 25 192 133 9 219 162 10 239 182 13
38710 -236 186 11 232 195 16 241 208 19 244 214 54
38711 -246 218 60 246 218 38 246 215 20 241 208 19
38712 -241 208 19 226 184 13 121 87 25 2 2 6
38713 - 2 2 6 2 2 6 2 2 6 2 2 6
38714 - 50 50 50 82 82 82 34 34 34 10 10 10
38715 - 0 0 0 0 0 0 0 0 0 0 0 0
38716 - 0 0 0 0 0 0 0 0 0 0 0 0
38717 - 0 0 0 0 0 0 0 0 0 0 0 0
38718 - 0 0 0 0 0 0 0 0 0 0 0 0
38719 - 0 0 0 0 0 0 0 0 0 0 0 0
38720 - 0 0 0 0 0 0 0 0 0 0 0 0
38721 - 0 0 0 0 0 0 0 0 0 0 0 0
38722 - 0 0 0 0 0 0 0 0 0 0 0 0
38723 - 0 0 0 0 0 0 0 0 0 0 0 0
38724 - 0 0 0 0 0 0 0 0 0 0 0 0
38725 - 0 0 0 0 0 0 0 0 0 0 0 0
38726 - 0 0 0 0 0 0 0 0 0 0 0 0
38727 - 0 0 0 0 0 0 0 0 0 10 10 10
38728 - 34 34 34 82 82 82 30 30 30 61 42 6
38729 -180 123 7 206 145 10 230 174 11 239 182 13
38730 -234 190 10 238 202 15 241 208 19 246 218 74
38731 -246 218 38 246 215 20 246 215 20 246 215 20
38732 -226 184 13 215 174 15 184 144 12 6 6 6
38733 - 2 2 6 2 2 6 2 2 6 2 2 6
38734 - 26 26 26 94 94 94 42 42 42 14 14 14
38735 - 0 0 0 0 0 0 0 0 0 0 0 0
38736 - 0 0 0 0 0 0 0 0 0 0 0 0
38737 - 0 0 0 0 0 0 0 0 0 0 0 0
38738 - 0 0 0 0 0 0 0 0 0 0 0 0
38739 - 0 0 0 0 0 0 0 0 0 0 0 0
38740 - 0 0 0 0 0 0 0 0 0 0 0 0
38741 - 0 0 0 0 0 0 0 0 0 0 0 0
38742 - 0 0 0 0 0 0 0 0 0 0 0 0
38743 - 0 0 0 0 0 0 0 0 0 0 0 0
38744 - 0 0 0 0 0 0 0 0 0 0 0 0
38745 - 0 0 0 0 0 0 0 0 0 0 0 0
38746 - 0 0 0 0 0 0 0 0 0 0 0 0
38747 - 0 0 0 0 0 0 0 0 0 10 10 10
38748 - 30 30 30 78 78 78 50 50 50 104 69 6
38749 -192 133 9 216 158 10 236 178 12 236 186 11
38750 -232 195 16 241 208 19 244 214 54 245 215 43
38751 -246 215 20 246 215 20 241 208 19 198 155 10
38752 -200 144 11 216 158 10 156 118 10 2 2 6
38753 - 2 2 6 2 2 6 2 2 6 2 2 6
38754 - 6 6 6 90 90 90 54 54 54 18 18 18
38755 - 6 6 6 0 0 0 0 0 0 0 0 0
38756 - 0 0 0 0 0 0 0 0 0 0 0 0
38757 - 0 0 0 0 0 0 0 0 0 0 0 0
38758 - 0 0 0 0 0 0 0 0 0 0 0 0
38759 - 0 0 0 0 0 0 0 0 0 0 0 0
38760 - 0 0 0 0 0 0 0 0 0 0 0 0
38761 - 0 0 0 0 0 0 0 0 0 0 0 0
38762 - 0 0 0 0 0 0 0 0 0 0 0 0
38763 - 0 0 0 0 0 0 0 0 0 0 0 0
38764 - 0 0 0 0 0 0 0 0 0 0 0 0
38765 - 0 0 0 0 0 0 0 0 0 0 0 0
38766 - 0 0 0 0 0 0 0 0 0 0 0 0
38767 - 0 0 0 0 0 0 0 0 0 10 10 10
38768 - 30 30 30 78 78 78 46 46 46 22 22 22
38769 -137 92 6 210 162 10 239 182 13 238 190 10
38770 -238 202 15 241 208 19 246 215 20 246 215 20
38771 -241 208 19 203 166 17 185 133 11 210 150 10
38772 -216 158 10 210 150 10 102 78 10 2 2 6
38773 - 6 6 6 54 54 54 14 14 14 2 2 6
38774 - 2 2 6 62 62 62 74 74 74 30 30 30
38775 - 10 10 10 0 0 0 0 0 0 0 0 0
38776 - 0 0 0 0 0 0 0 0 0 0 0 0
38777 - 0 0 0 0 0 0 0 0 0 0 0 0
38778 - 0 0 0 0 0 0 0 0 0 0 0 0
38779 - 0 0 0 0 0 0 0 0 0 0 0 0
38780 - 0 0 0 0 0 0 0 0 0 0 0 0
38781 - 0 0 0 0 0 0 0 0 0 0 0 0
38782 - 0 0 0 0 0 0 0 0 0 0 0 0
38783 - 0 0 0 0 0 0 0 0 0 0 0 0
38784 - 0 0 0 0 0 0 0 0 0 0 0 0
38785 - 0 0 0 0 0 0 0 0 0 0 0 0
38786 - 0 0 0 0 0 0 0 0 0 0 0 0
38787 - 0 0 0 0 0 0 0 0 0 10 10 10
38788 - 34 34 34 78 78 78 50 50 50 6 6 6
38789 - 94 70 30 139 102 15 190 146 13 226 184 13
38790 -232 200 30 232 195 16 215 174 15 190 146 13
38791 -168 122 10 192 133 9 210 150 10 213 154 11
38792 -202 150 34 182 157 106 101 98 89 2 2 6
38793 - 2 2 6 78 78 78 116 116 116 58 58 58
38794 - 2 2 6 22 22 22 90 90 90 46 46 46
38795 - 18 18 18 6 6 6 0 0 0 0 0 0
38796 - 0 0 0 0 0 0 0 0 0 0 0 0
38797 - 0 0 0 0 0 0 0 0 0 0 0 0
38798 - 0 0 0 0 0 0 0 0 0 0 0 0
38799 - 0 0 0 0 0 0 0 0 0 0 0 0
38800 - 0 0 0 0 0 0 0 0 0 0 0 0
38801 - 0 0 0 0 0 0 0 0 0 0 0 0
38802 - 0 0 0 0 0 0 0 0 0 0 0 0
38803 - 0 0 0 0 0 0 0 0 0 0 0 0
38804 - 0 0 0 0 0 0 0 0 0 0 0 0
38805 - 0 0 0 0 0 0 0 0 0 0 0 0
38806 - 0 0 0 0 0 0 0 0 0 0 0 0
38807 - 0 0 0 0 0 0 0 0 0 10 10 10
38808 - 38 38 38 86 86 86 50 50 50 6 6 6
38809 -128 128 128 174 154 114 156 107 11 168 122 10
38810 -198 155 10 184 144 12 197 138 11 200 144 11
38811 -206 145 10 206 145 10 197 138 11 188 164 115
38812 -195 195 195 198 198 198 174 174 174 14 14 14
38813 - 2 2 6 22 22 22 116 116 116 116 116 116
38814 - 22 22 22 2 2 6 74 74 74 70 70 70
38815 - 30 30 30 10 10 10 0 0 0 0 0 0
38816 - 0 0 0 0 0 0 0 0 0 0 0 0
38817 - 0 0 0 0 0 0 0 0 0 0 0 0
38818 - 0 0 0 0 0 0 0 0 0 0 0 0
38819 - 0 0 0 0 0 0 0 0 0 0 0 0
38820 - 0 0 0 0 0 0 0 0 0 0 0 0
38821 - 0 0 0 0 0 0 0 0 0 0 0 0
38822 - 0 0 0 0 0 0 0 0 0 0 0 0
38823 - 0 0 0 0 0 0 0 0 0 0 0 0
38824 - 0 0 0 0 0 0 0 0 0 0 0 0
38825 - 0 0 0 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 0 0 0 0 6 6 6 18 18 18
38828 - 50 50 50 101 101 101 26 26 26 10 10 10
38829 -138 138 138 190 190 190 174 154 114 156 107 11
38830 -197 138 11 200 144 11 197 138 11 192 133 9
38831 -180 123 7 190 142 34 190 178 144 187 187 187
38832 -202 202 202 221 221 221 214 214 214 66 66 66
38833 - 2 2 6 2 2 6 50 50 50 62 62 62
38834 - 6 6 6 2 2 6 10 10 10 90 90 90
38835 - 50 50 50 18 18 18 6 6 6 0 0 0
38836 - 0 0 0 0 0 0 0 0 0 0 0 0
38837 - 0 0 0 0 0 0 0 0 0 0 0 0
38838 - 0 0 0 0 0 0 0 0 0 0 0 0
38839 - 0 0 0 0 0 0 0 0 0 0 0 0
38840 - 0 0 0 0 0 0 0 0 0 0 0 0
38841 - 0 0 0 0 0 0 0 0 0 0 0 0
38842 - 0 0 0 0 0 0 0 0 0 0 0 0
38843 - 0 0 0 0 0 0 0 0 0 0 0 0
38844 - 0 0 0 0 0 0 0 0 0 0 0 0
38845 - 0 0 0 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 0 0 0 0 10 10 10 34 34 34
38848 - 74 74 74 74 74 74 2 2 6 6 6 6
38849 -144 144 144 198 198 198 190 190 190 178 166 146
38850 -154 121 60 156 107 11 156 107 11 168 124 44
38851 -174 154 114 187 187 187 190 190 190 210 210 210
38852 -246 246 246 253 253 253 253 253 253 182 182 182
38853 - 6 6 6 2 2 6 2 2 6 2 2 6
38854 - 2 2 6 2 2 6 2 2 6 62 62 62
38855 - 74 74 74 34 34 34 14 14 14 0 0 0
38856 - 0 0 0 0 0 0 0 0 0 0 0 0
38857 - 0 0 0 0 0 0 0 0 0 0 0 0
38858 - 0 0 0 0 0 0 0 0 0 0 0 0
38859 - 0 0 0 0 0 0 0 0 0 0 0 0
38860 - 0 0 0 0 0 0 0 0 0 0 0 0
38861 - 0 0 0 0 0 0 0 0 0 0 0 0
38862 - 0 0 0 0 0 0 0 0 0 0 0 0
38863 - 0 0 0 0 0 0 0 0 0 0 0 0
38864 - 0 0 0 0 0 0 0 0 0 0 0 0
38865 - 0 0 0 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 10 10 10 22 22 22 54 54 54
38868 - 94 94 94 18 18 18 2 2 6 46 46 46
38869 -234 234 234 221 221 221 190 190 190 190 190 190
38870 -190 190 190 187 187 187 187 187 187 190 190 190
38871 -190 190 190 195 195 195 214 214 214 242 242 242
38872 -253 253 253 253 253 253 253 253 253 253 253 253
38873 - 82 82 82 2 2 6 2 2 6 2 2 6
38874 - 2 2 6 2 2 6 2 2 6 14 14 14
38875 - 86 86 86 54 54 54 22 22 22 6 6 6
38876 - 0 0 0 0 0 0 0 0 0 0 0 0
38877 - 0 0 0 0 0 0 0 0 0 0 0 0
38878 - 0 0 0 0 0 0 0 0 0 0 0 0
38879 - 0 0 0 0 0 0 0 0 0 0 0 0
38880 - 0 0 0 0 0 0 0 0 0 0 0 0
38881 - 0 0 0 0 0 0 0 0 0 0 0 0
38882 - 0 0 0 0 0 0 0 0 0 0 0 0
38883 - 0 0 0 0 0 0 0 0 0 0 0 0
38884 - 0 0 0 0 0 0 0 0 0 0 0 0
38885 - 0 0 0 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 6 6 6 18 18 18 46 46 46 90 90 90
38888 - 46 46 46 18 18 18 6 6 6 182 182 182
38889 -253 253 253 246 246 246 206 206 206 190 190 190
38890 -190 190 190 190 190 190 190 190 190 190 190 190
38891 -206 206 206 231 231 231 250 250 250 253 253 253
38892 -253 253 253 253 253 253 253 253 253 253 253 253
38893 -202 202 202 14 14 14 2 2 6 2 2 6
38894 - 2 2 6 2 2 6 2 2 6 2 2 6
38895 - 42 42 42 86 86 86 42 42 42 18 18 18
38896 - 6 6 6 0 0 0 0 0 0 0 0 0
38897 - 0 0 0 0 0 0 0 0 0 0 0 0
38898 - 0 0 0 0 0 0 0 0 0 0 0 0
38899 - 0 0 0 0 0 0 0 0 0 0 0 0
38900 - 0 0 0 0 0 0 0 0 0 0 0 0
38901 - 0 0 0 0 0 0 0 0 0 0 0 0
38902 - 0 0 0 0 0 0 0 0 0 0 0 0
38903 - 0 0 0 0 0 0 0 0 0 0 0 0
38904 - 0 0 0 0 0 0 0 0 0 0 0 0
38905 - 0 0 0 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 6 6 6
38907 - 14 14 14 38 38 38 74 74 74 66 66 66
38908 - 2 2 6 6 6 6 90 90 90 250 250 250
38909 -253 253 253 253 253 253 238 238 238 198 198 198
38910 -190 190 190 190 190 190 195 195 195 221 221 221
38911 -246 246 246 253 253 253 253 253 253 253 253 253
38912 -253 253 253 253 253 253 253 253 253 253 253 253
38913 -253 253 253 82 82 82 2 2 6 2 2 6
38914 - 2 2 6 2 2 6 2 2 6 2 2 6
38915 - 2 2 6 78 78 78 70 70 70 34 34 34
38916 - 14 14 14 6 6 6 0 0 0 0 0 0
38917 - 0 0 0 0 0 0 0 0 0 0 0 0
38918 - 0 0 0 0 0 0 0 0 0 0 0 0
38919 - 0 0 0 0 0 0 0 0 0 0 0 0
38920 - 0 0 0 0 0 0 0 0 0 0 0 0
38921 - 0 0 0 0 0 0 0 0 0 0 0 0
38922 - 0 0 0 0 0 0 0 0 0 0 0 0
38923 - 0 0 0 0 0 0 0 0 0 0 0 0
38924 - 0 0 0 0 0 0 0 0 0 0 0 0
38925 - 0 0 0 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 14 14 14
38927 - 34 34 34 66 66 66 78 78 78 6 6 6
38928 - 2 2 6 18 18 18 218 218 218 253 253 253
38929 -253 253 253 253 253 253 253 253 253 246 246 246
38930 -226 226 226 231 231 231 246 246 246 253 253 253
38931 -253 253 253 253 253 253 253 253 253 253 253 253
38932 -253 253 253 253 253 253 253 253 253 253 253 253
38933 -253 253 253 178 178 178 2 2 6 2 2 6
38934 - 2 2 6 2 2 6 2 2 6 2 2 6
38935 - 2 2 6 18 18 18 90 90 90 62 62 62
38936 - 30 30 30 10 10 10 0 0 0 0 0 0
38937 - 0 0 0 0 0 0 0 0 0 0 0 0
38938 - 0 0 0 0 0 0 0 0 0 0 0 0
38939 - 0 0 0 0 0 0 0 0 0 0 0 0
38940 - 0 0 0 0 0 0 0 0 0 0 0 0
38941 - 0 0 0 0 0 0 0 0 0 0 0 0
38942 - 0 0 0 0 0 0 0 0 0 0 0 0
38943 - 0 0 0 0 0 0 0 0 0 0 0 0
38944 - 0 0 0 0 0 0 0 0 0 0 0 0
38945 - 0 0 0 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 10 10 10 26 26 26
38947 - 58 58 58 90 90 90 18 18 18 2 2 6
38948 - 2 2 6 110 110 110 253 253 253 253 253 253
38949 -253 253 253 253 253 253 253 253 253 253 253 253
38950 -250 250 250 253 253 253 253 253 253 253 253 253
38951 -253 253 253 253 253 253 253 253 253 253 253 253
38952 -253 253 253 253 253 253 253 253 253 253 253 253
38953 -253 253 253 231 231 231 18 18 18 2 2 6
38954 - 2 2 6 2 2 6 2 2 6 2 2 6
38955 - 2 2 6 2 2 6 18 18 18 94 94 94
38956 - 54 54 54 26 26 26 10 10 10 0 0 0
38957 - 0 0 0 0 0 0 0 0 0 0 0 0
38958 - 0 0 0 0 0 0 0 0 0 0 0 0
38959 - 0 0 0 0 0 0 0 0 0 0 0 0
38960 - 0 0 0 0 0 0 0 0 0 0 0 0
38961 - 0 0 0 0 0 0 0 0 0 0 0 0
38962 - 0 0 0 0 0 0 0 0 0 0 0 0
38963 - 0 0 0 0 0 0 0 0 0 0 0 0
38964 - 0 0 0 0 0 0 0 0 0 0 0 0
38965 - 0 0 0 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 6 6 6 22 22 22 50 50 50
38967 - 90 90 90 26 26 26 2 2 6 2 2 6
38968 - 14 14 14 195 195 195 250 250 250 253 253 253
38969 -253 253 253 253 253 253 253 253 253 253 253 253
38970 -253 253 253 253 253 253 253 253 253 253 253 253
38971 -253 253 253 253 253 253 253 253 253 253 253 253
38972 -253 253 253 253 253 253 253 253 253 253 253 253
38973 -250 250 250 242 242 242 54 54 54 2 2 6
38974 - 2 2 6 2 2 6 2 2 6 2 2 6
38975 - 2 2 6 2 2 6 2 2 6 38 38 38
38976 - 86 86 86 50 50 50 22 22 22 6 6 6
38977 - 0 0 0 0 0 0 0 0 0 0 0 0
38978 - 0 0 0 0 0 0 0 0 0 0 0 0
38979 - 0 0 0 0 0 0 0 0 0 0 0 0
38980 - 0 0 0 0 0 0 0 0 0 0 0 0
38981 - 0 0 0 0 0 0 0 0 0 0 0 0
38982 - 0 0 0 0 0 0 0 0 0 0 0 0
38983 - 0 0 0 0 0 0 0 0 0 0 0 0
38984 - 0 0 0 0 0 0 0 0 0 0 0 0
38985 - 0 0 0 0 0 0 0 0 0 0 0 0
38986 - 6 6 6 14 14 14 38 38 38 82 82 82
38987 - 34 34 34 2 2 6 2 2 6 2 2 6
38988 - 42 42 42 195 195 195 246 246 246 253 253 253
38989 -253 253 253 253 253 253 253 253 253 250 250 250
38990 -242 242 242 242 242 242 250 250 250 253 253 253
38991 -253 253 253 253 253 253 253 253 253 253 253 253
38992 -253 253 253 250 250 250 246 246 246 238 238 238
38993 -226 226 226 231 231 231 101 101 101 6 6 6
38994 - 2 2 6 2 2 6 2 2 6 2 2 6
38995 - 2 2 6 2 2 6 2 2 6 2 2 6
38996 - 38 38 38 82 82 82 42 42 42 14 14 14
38997 - 6 6 6 0 0 0 0 0 0 0 0 0
38998 - 0 0 0 0 0 0 0 0 0 0 0 0
38999 - 0 0 0 0 0 0 0 0 0 0 0 0
39000 - 0 0 0 0 0 0 0 0 0 0 0 0
39001 - 0 0 0 0 0 0 0 0 0 0 0 0
39002 - 0 0 0 0 0 0 0 0 0 0 0 0
39003 - 0 0 0 0 0 0 0 0 0 0 0 0
39004 - 0 0 0 0 0 0 0 0 0 0 0 0
39005 - 0 0 0 0 0 0 0 0 0 0 0 0
39006 - 10 10 10 26 26 26 62 62 62 66 66 66
39007 - 2 2 6 2 2 6 2 2 6 6 6 6
39008 - 70 70 70 170 170 170 206 206 206 234 234 234
39009 -246 246 246 250 250 250 250 250 250 238 238 238
39010 -226 226 226 231 231 231 238 238 238 250 250 250
39011 -250 250 250 250 250 250 246 246 246 231 231 231
39012 -214 214 214 206 206 206 202 202 202 202 202 202
39013 -198 198 198 202 202 202 182 182 182 18 18 18
39014 - 2 2 6 2 2 6 2 2 6 2 2 6
39015 - 2 2 6 2 2 6 2 2 6 2 2 6
39016 - 2 2 6 62 62 62 66 66 66 30 30 30
39017 - 10 10 10 0 0 0 0 0 0 0 0 0
39018 - 0 0 0 0 0 0 0 0 0 0 0 0
39019 - 0 0 0 0 0 0 0 0 0 0 0 0
39020 - 0 0 0 0 0 0 0 0 0 0 0 0
39021 - 0 0 0 0 0 0 0 0 0 0 0 0
39022 - 0 0 0 0 0 0 0 0 0 0 0 0
39023 - 0 0 0 0 0 0 0 0 0 0 0 0
39024 - 0 0 0 0 0 0 0 0 0 0 0 0
39025 - 0 0 0 0 0 0 0 0 0 0 0 0
39026 - 14 14 14 42 42 42 82 82 82 18 18 18
39027 - 2 2 6 2 2 6 2 2 6 10 10 10
39028 - 94 94 94 182 182 182 218 218 218 242 242 242
39029 -250 250 250 253 253 253 253 253 253 250 250 250
39030 -234 234 234 253 253 253 253 253 253 253 253 253
39031 -253 253 253 253 253 253 253 253 253 246 246 246
39032 -238 238 238 226 226 226 210 210 210 202 202 202
39033 -195 195 195 195 195 195 210 210 210 158 158 158
39034 - 6 6 6 14 14 14 50 50 50 14 14 14
39035 - 2 2 6 2 2 6 2 2 6 2 2 6
39036 - 2 2 6 6 6 6 86 86 86 46 46 46
39037 - 18 18 18 6 6 6 0 0 0 0 0 0
39038 - 0 0 0 0 0 0 0 0 0 0 0 0
39039 - 0 0 0 0 0 0 0 0 0 0 0 0
39040 - 0 0 0 0 0 0 0 0 0 0 0 0
39041 - 0 0 0 0 0 0 0 0 0 0 0 0
39042 - 0 0 0 0 0 0 0 0 0 0 0 0
39043 - 0 0 0 0 0 0 0 0 0 0 0 0
39044 - 0 0 0 0 0 0 0 0 0 0 0 0
39045 - 0 0 0 0 0 0 0 0 0 6 6 6
39046 - 22 22 22 54 54 54 70 70 70 2 2 6
39047 - 2 2 6 10 10 10 2 2 6 22 22 22
39048 -166 166 166 231 231 231 250 250 250 253 253 253
39049 -253 253 253 253 253 253 253 253 253 250 250 250
39050 -242 242 242 253 253 253 253 253 253 253 253 253
39051 -253 253 253 253 253 253 253 253 253 253 253 253
39052 -253 253 253 253 253 253 253 253 253 246 246 246
39053 -231 231 231 206 206 206 198 198 198 226 226 226
39054 - 94 94 94 2 2 6 6 6 6 38 38 38
39055 - 30 30 30 2 2 6 2 2 6 2 2 6
39056 - 2 2 6 2 2 6 62 62 62 66 66 66
39057 - 26 26 26 10 10 10 0 0 0 0 0 0
39058 - 0 0 0 0 0 0 0 0 0 0 0 0
39059 - 0 0 0 0 0 0 0 0 0 0 0 0
39060 - 0 0 0 0 0 0 0 0 0 0 0 0
39061 - 0 0 0 0 0 0 0 0 0 0 0 0
39062 - 0 0 0 0 0 0 0 0 0 0 0 0
39063 - 0 0 0 0 0 0 0 0 0 0 0 0
39064 - 0 0 0 0 0 0 0 0 0 0 0 0
39065 - 0 0 0 0 0 0 0 0 0 10 10 10
39066 - 30 30 30 74 74 74 50 50 50 2 2 6
39067 - 26 26 26 26 26 26 2 2 6 106 106 106
39068 -238 238 238 253 253 253 253 253 253 253 253 253
39069 -253 253 253 253 253 253 253 253 253 253 253 253
39070 -253 253 253 253 253 253 253 253 253 253 253 253
39071 -253 253 253 253 253 253 253 253 253 253 253 253
39072 -253 253 253 253 253 253 253 253 253 253 253 253
39073 -253 253 253 246 246 246 218 218 218 202 202 202
39074 -210 210 210 14 14 14 2 2 6 2 2 6
39075 - 30 30 30 22 22 22 2 2 6 2 2 6
39076 - 2 2 6 2 2 6 18 18 18 86 86 86
39077 - 42 42 42 14 14 14 0 0 0 0 0 0
39078 - 0 0 0 0 0 0 0 0 0 0 0 0
39079 - 0 0 0 0 0 0 0 0 0 0 0 0
39080 - 0 0 0 0 0 0 0 0 0 0 0 0
39081 - 0 0 0 0 0 0 0 0 0 0 0 0
39082 - 0 0 0 0 0 0 0 0 0 0 0 0
39083 - 0 0 0 0 0 0 0 0 0 0 0 0
39084 - 0 0 0 0 0 0 0 0 0 0 0 0
39085 - 0 0 0 0 0 0 0 0 0 14 14 14
39086 - 42 42 42 90 90 90 22 22 22 2 2 6
39087 - 42 42 42 2 2 6 18 18 18 218 218 218
39088 -253 253 253 253 253 253 253 253 253 253 253 253
39089 -253 253 253 253 253 253 253 253 253 253 253 253
39090 -253 253 253 253 253 253 253 253 253 253 253 253
39091 -253 253 253 253 253 253 253 253 253 253 253 253
39092 -253 253 253 253 253 253 253 253 253 253 253 253
39093 -253 253 253 253 253 253 250 250 250 221 221 221
39094 -218 218 218 101 101 101 2 2 6 14 14 14
39095 - 18 18 18 38 38 38 10 10 10 2 2 6
39096 - 2 2 6 2 2 6 2 2 6 78 78 78
39097 - 58 58 58 22 22 22 6 6 6 0 0 0
39098 - 0 0 0 0 0 0 0 0 0 0 0 0
39099 - 0 0 0 0 0 0 0 0 0 0 0 0
39100 - 0 0 0 0 0 0 0 0 0 0 0 0
39101 - 0 0 0 0 0 0 0 0 0 0 0 0
39102 - 0 0 0 0 0 0 0 0 0 0 0 0
39103 - 0 0 0 0 0 0 0 0 0 0 0 0
39104 - 0 0 0 0 0 0 0 0 0 0 0 0
39105 - 0 0 0 0 0 0 6 6 6 18 18 18
39106 - 54 54 54 82 82 82 2 2 6 26 26 26
39107 - 22 22 22 2 2 6 123 123 123 253 253 253
39108 -253 253 253 253 253 253 253 253 253 253 253 253
39109 -253 253 253 253 253 253 253 253 253 253 253 253
39110 -253 253 253 253 253 253 253 253 253 253 253 253
39111 -253 253 253 253 253 253 253 253 253 253 253 253
39112 -253 253 253 253 253 253 253 253 253 253 253 253
39113 -253 253 253 253 253 253 253 253 253 250 250 250
39114 -238 238 238 198 198 198 6 6 6 38 38 38
39115 - 58 58 58 26 26 26 38 38 38 2 2 6
39116 - 2 2 6 2 2 6 2 2 6 46 46 46
39117 - 78 78 78 30 30 30 10 10 10 0 0 0
39118 - 0 0 0 0 0 0 0 0 0 0 0 0
39119 - 0 0 0 0 0 0 0 0 0 0 0 0
39120 - 0 0 0 0 0 0 0 0 0 0 0 0
39121 - 0 0 0 0 0 0 0 0 0 0 0 0
39122 - 0 0 0 0 0 0 0 0 0 0 0 0
39123 - 0 0 0 0 0 0 0 0 0 0 0 0
39124 - 0 0 0 0 0 0 0 0 0 0 0 0
39125 - 0 0 0 0 0 0 10 10 10 30 30 30
39126 - 74 74 74 58 58 58 2 2 6 42 42 42
39127 - 2 2 6 22 22 22 231 231 231 253 253 253
39128 -253 253 253 253 253 253 253 253 253 253 253 253
39129 -253 253 253 253 253 253 253 253 253 250 250 250
39130 -253 253 253 253 253 253 253 253 253 253 253 253
39131 -253 253 253 253 253 253 253 253 253 253 253 253
39132 -253 253 253 253 253 253 253 253 253 253 253 253
39133 -253 253 253 253 253 253 253 253 253 253 253 253
39134 -253 253 253 246 246 246 46 46 46 38 38 38
39135 - 42 42 42 14 14 14 38 38 38 14 14 14
39136 - 2 2 6 2 2 6 2 2 6 6 6 6
39137 - 86 86 86 46 46 46 14 14 14 0 0 0
39138 - 0 0 0 0 0 0 0 0 0 0 0 0
39139 - 0 0 0 0 0 0 0 0 0 0 0 0
39140 - 0 0 0 0 0 0 0 0 0 0 0 0
39141 - 0 0 0 0 0 0 0 0 0 0 0 0
39142 - 0 0 0 0 0 0 0 0 0 0 0 0
39143 - 0 0 0 0 0 0 0 0 0 0 0 0
39144 - 0 0 0 0 0 0 0 0 0 0 0 0
39145 - 0 0 0 6 6 6 14 14 14 42 42 42
39146 - 90 90 90 18 18 18 18 18 18 26 26 26
39147 - 2 2 6 116 116 116 253 253 253 253 253 253
39148 -253 253 253 253 253 253 253 253 253 253 253 253
39149 -253 253 253 253 253 253 250 250 250 238 238 238
39150 -253 253 253 253 253 253 253 253 253 253 253 253
39151 -253 253 253 253 253 253 253 253 253 253 253 253
39152 -253 253 253 253 253 253 253 253 253 253 253 253
39153 -253 253 253 253 253 253 253 253 253 253 253 253
39154 -253 253 253 253 253 253 94 94 94 6 6 6
39155 - 2 2 6 2 2 6 10 10 10 34 34 34
39156 - 2 2 6 2 2 6 2 2 6 2 2 6
39157 - 74 74 74 58 58 58 22 22 22 6 6 6
39158 - 0 0 0 0 0 0 0 0 0 0 0 0
39159 - 0 0 0 0 0 0 0 0 0 0 0 0
39160 - 0 0 0 0 0 0 0 0 0 0 0 0
39161 - 0 0 0 0 0 0 0 0 0 0 0 0
39162 - 0 0 0 0 0 0 0 0 0 0 0 0
39163 - 0 0 0 0 0 0 0 0 0 0 0 0
39164 - 0 0 0 0 0 0 0 0 0 0 0 0
39165 - 0 0 0 10 10 10 26 26 26 66 66 66
39166 - 82 82 82 2 2 6 38 38 38 6 6 6
39167 - 14 14 14 210 210 210 253 253 253 253 253 253
39168 -253 253 253 253 253 253 253 253 253 253 253 253
39169 -253 253 253 253 253 253 246 246 246 242 242 242
39170 -253 253 253 253 253 253 253 253 253 253 253 253
39171 -253 253 253 253 253 253 253 253 253 253 253 253
39172 -253 253 253 253 253 253 253 253 253 253 253 253
39173 -253 253 253 253 253 253 253 253 253 253 253 253
39174 -253 253 253 253 253 253 144 144 144 2 2 6
39175 - 2 2 6 2 2 6 2 2 6 46 46 46
39176 - 2 2 6 2 2 6 2 2 6 2 2 6
39177 - 42 42 42 74 74 74 30 30 30 10 10 10
39178 - 0 0 0 0 0 0 0 0 0 0 0 0
39179 - 0 0 0 0 0 0 0 0 0 0 0 0
39180 - 0 0 0 0 0 0 0 0 0 0 0 0
39181 - 0 0 0 0 0 0 0 0 0 0 0 0
39182 - 0 0 0 0 0 0 0 0 0 0 0 0
39183 - 0 0 0 0 0 0 0 0 0 0 0 0
39184 - 0 0 0 0 0 0 0 0 0 0 0 0
39185 - 6 6 6 14 14 14 42 42 42 90 90 90
39186 - 26 26 26 6 6 6 42 42 42 2 2 6
39187 - 74 74 74 250 250 250 253 253 253 253 253 253
39188 -253 253 253 253 253 253 253 253 253 253 253 253
39189 -253 253 253 253 253 253 242 242 242 242 242 242
39190 -253 253 253 253 253 253 253 253 253 253 253 253
39191 -253 253 253 253 253 253 253 253 253 253 253 253
39192 -253 253 253 253 253 253 253 253 253 253 253 253
39193 -253 253 253 253 253 253 253 253 253 253 253 253
39194 -253 253 253 253 253 253 182 182 182 2 2 6
39195 - 2 2 6 2 2 6 2 2 6 46 46 46
39196 - 2 2 6 2 2 6 2 2 6 2 2 6
39197 - 10 10 10 86 86 86 38 38 38 10 10 10
39198 - 0 0 0 0 0 0 0 0 0 0 0 0
39199 - 0 0 0 0 0 0 0 0 0 0 0 0
39200 - 0 0 0 0 0 0 0 0 0 0 0 0
39201 - 0 0 0 0 0 0 0 0 0 0 0 0
39202 - 0 0 0 0 0 0 0 0 0 0 0 0
39203 - 0 0 0 0 0 0 0 0 0 0 0 0
39204 - 0 0 0 0 0 0 0 0 0 0 0 0
39205 - 10 10 10 26 26 26 66 66 66 82 82 82
39206 - 2 2 6 22 22 22 18 18 18 2 2 6
39207 -149 149 149 253 253 253 253 253 253 253 253 253
39208 -253 253 253 253 253 253 253 253 253 253 253 253
39209 -253 253 253 253 253 253 234 234 234 242 242 242
39210 -253 253 253 253 253 253 253 253 253 253 253 253
39211 -253 253 253 253 253 253 253 253 253 253 253 253
39212 -253 253 253 253 253 253 253 253 253 253 253 253
39213 -253 253 253 253 253 253 253 253 253 253 253 253
39214 -253 253 253 253 253 253 206 206 206 2 2 6
39215 - 2 2 6 2 2 6 2 2 6 38 38 38
39216 - 2 2 6 2 2 6 2 2 6 2 2 6
39217 - 6 6 6 86 86 86 46 46 46 14 14 14
39218 - 0 0 0 0 0 0 0 0 0 0 0 0
39219 - 0 0 0 0 0 0 0 0 0 0 0 0
39220 - 0 0 0 0 0 0 0 0 0 0 0 0
39221 - 0 0 0 0 0 0 0 0 0 0 0 0
39222 - 0 0 0 0 0 0 0 0 0 0 0 0
39223 - 0 0 0 0 0 0 0 0 0 0 0 0
39224 - 0 0 0 0 0 0 0 0 0 6 6 6
39225 - 18 18 18 46 46 46 86 86 86 18 18 18
39226 - 2 2 6 34 34 34 10 10 10 6 6 6
39227 -210 210 210 253 253 253 253 253 253 253 253 253
39228 -253 253 253 253 253 253 253 253 253 253 253 253
39229 -253 253 253 253 253 253 234 234 234 242 242 242
39230 -253 253 253 253 253 253 253 253 253 253 253 253
39231 -253 253 253 253 253 253 253 253 253 253 253 253
39232 -253 253 253 253 253 253 253 253 253 253 253 253
39233 -253 253 253 253 253 253 253 253 253 253 253 253
39234 -253 253 253 253 253 253 221 221 221 6 6 6
39235 - 2 2 6 2 2 6 6 6 6 30 30 30
39236 - 2 2 6 2 2 6 2 2 6 2 2 6
39237 - 2 2 6 82 82 82 54 54 54 18 18 18
39238 - 6 6 6 0 0 0 0 0 0 0 0 0
39239 - 0 0 0 0 0 0 0 0 0 0 0 0
39240 - 0 0 0 0 0 0 0 0 0 0 0 0
39241 - 0 0 0 0 0 0 0 0 0 0 0 0
39242 - 0 0 0 0 0 0 0 0 0 0 0 0
39243 - 0 0 0 0 0 0 0 0 0 0 0 0
39244 - 0 0 0 0 0 0 0 0 0 10 10 10
39245 - 26 26 26 66 66 66 62 62 62 2 2 6
39246 - 2 2 6 38 38 38 10 10 10 26 26 26
39247 -238 238 238 253 253 253 253 253 253 253 253 253
39248 -253 253 253 253 253 253 253 253 253 253 253 253
39249 -253 253 253 253 253 253 231 231 231 238 238 238
39250 -253 253 253 253 253 253 253 253 253 253 253 253
39251 -253 253 253 253 253 253 253 253 253 253 253 253
39252 -253 253 253 253 253 253 253 253 253 253 253 253
39253 -253 253 253 253 253 253 253 253 253 253 253 253
39254 -253 253 253 253 253 253 231 231 231 6 6 6
39255 - 2 2 6 2 2 6 10 10 10 30 30 30
39256 - 2 2 6 2 2 6 2 2 6 2 2 6
39257 - 2 2 6 66 66 66 58 58 58 22 22 22
39258 - 6 6 6 0 0 0 0 0 0 0 0 0
39259 - 0 0 0 0 0 0 0 0 0 0 0 0
39260 - 0 0 0 0 0 0 0 0 0 0 0 0
39261 - 0 0 0 0 0 0 0 0 0 0 0 0
39262 - 0 0 0 0 0 0 0 0 0 0 0 0
39263 - 0 0 0 0 0 0 0 0 0 0 0 0
39264 - 0 0 0 0 0 0 0 0 0 10 10 10
39265 - 38 38 38 78 78 78 6 6 6 2 2 6
39266 - 2 2 6 46 46 46 14 14 14 42 42 42
39267 -246 246 246 253 253 253 253 253 253 253 253 253
39268 -253 253 253 253 253 253 253 253 253 253 253 253
39269 -253 253 253 253 253 253 231 231 231 242 242 242
39270 -253 253 253 253 253 253 253 253 253 253 253 253
39271 -253 253 253 253 253 253 253 253 253 253 253 253
39272 -253 253 253 253 253 253 253 253 253 253 253 253
39273 -253 253 253 253 253 253 253 253 253 253 253 253
39274 -253 253 253 253 253 253 234 234 234 10 10 10
39275 - 2 2 6 2 2 6 22 22 22 14 14 14
39276 - 2 2 6 2 2 6 2 2 6 2 2 6
39277 - 2 2 6 66 66 66 62 62 62 22 22 22
39278 - 6 6 6 0 0 0 0 0 0 0 0 0
39279 - 0 0 0 0 0 0 0 0 0 0 0 0
39280 - 0 0 0 0 0 0 0 0 0 0 0 0
39281 - 0 0 0 0 0 0 0 0 0 0 0 0
39282 - 0 0 0 0 0 0 0 0 0 0 0 0
39283 - 0 0 0 0 0 0 0 0 0 0 0 0
39284 - 0 0 0 0 0 0 6 6 6 18 18 18
39285 - 50 50 50 74 74 74 2 2 6 2 2 6
39286 - 14 14 14 70 70 70 34 34 34 62 62 62
39287 -250 250 250 253 253 253 253 253 253 253 253 253
39288 -253 253 253 253 253 253 253 253 253 253 253 253
39289 -253 253 253 253 253 253 231 231 231 246 246 246
39290 -253 253 253 253 253 253 253 253 253 253 253 253
39291 -253 253 253 253 253 253 253 253 253 253 253 253
39292 -253 253 253 253 253 253 253 253 253 253 253 253
39293 -253 253 253 253 253 253 253 253 253 253 253 253
39294 -253 253 253 253 253 253 234 234 234 14 14 14
39295 - 2 2 6 2 2 6 30 30 30 2 2 6
39296 - 2 2 6 2 2 6 2 2 6 2 2 6
39297 - 2 2 6 66 66 66 62 62 62 22 22 22
39298 - 6 6 6 0 0 0 0 0 0 0 0 0
39299 - 0 0 0 0 0 0 0 0 0 0 0 0
39300 - 0 0 0 0 0 0 0 0 0 0 0 0
39301 - 0 0 0 0 0 0 0 0 0 0 0 0
39302 - 0 0 0 0 0 0 0 0 0 0 0 0
39303 - 0 0 0 0 0 0 0 0 0 0 0 0
39304 - 0 0 0 0 0 0 6 6 6 18 18 18
39305 - 54 54 54 62 62 62 2 2 6 2 2 6
39306 - 2 2 6 30 30 30 46 46 46 70 70 70
39307 -250 250 250 253 253 253 253 253 253 253 253 253
39308 -253 253 253 253 253 253 253 253 253 253 253 253
39309 -253 253 253 253 253 253 231 231 231 246 246 246
39310 -253 253 253 253 253 253 253 253 253 253 253 253
39311 -253 253 253 253 253 253 253 253 253 253 253 253
39312 -253 253 253 253 253 253 253 253 253 253 253 253
39313 -253 253 253 253 253 253 253 253 253 253 253 253
39314 -253 253 253 253 253 253 226 226 226 10 10 10
39315 - 2 2 6 6 6 6 30 30 30 2 2 6
39316 - 2 2 6 2 2 6 2 2 6 2 2 6
39317 - 2 2 6 66 66 66 58 58 58 22 22 22
39318 - 6 6 6 0 0 0 0 0 0 0 0 0
39319 - 0 0 0 0 0 0 0 0 0 0 0 0
39320 - 0 0 0 0 0 0 0 0 0 0 0 0
39321 - 0 0 0 0 0 0 0 0 0 0 0 0
39322 - 0 0 0 0 0 0 0 0 0 0 0 0
39323 - 0 0 0 0 0 0 0 0 0 0 0 0
39324 - 0 0 0 0 0 0 6 6 6 22 22 22
39325 - 58 58 58 62 62 62 2 2 6 2 2 6
39326 - 2 2 6 2 2 6 30 30 30 78 78 78
39327 -250 250 250 253 253 253 253 253 253 253 253 253
39328 -253 253 253 253 253 253 253 253 253 253 253 253
39329 -253 253 253 253 253 253 231 231 231 246 246 246
39330 -253 253 253 253 253 253 253 253 253 253 253 253
39331 -253 253 253 253 253 253 253 253 253 253 253 253
39332 -253 253 253 253 253 253 253 253 253 253 253 253
39333 -253 253 253 253 253 253 253 253 253 253 253 253
39334 -253 253 253 253 253 253 206 206 206 2 2 6
39335 - 22 22 22 34 34 34 18 14 6 22 22 22
39336 - 26 26 26 18 18 18 6 6 6 2 2 6
39337 - 2 2 6 82 82 82 54 54 54 18 18 18
39338 - 6 6 6 0 0 0 0 0 0 0 0 0
39339 - 0 0 0 0 0 0 0 0 0 0 0 0
39340 - 0 0 0 0 0 0 0 0 0 0 0 0
39341 - 0 0 0 0 0 0 0 0 0 0 0 0
39342 - 0 0 0 0 0 0 0 0 0 0 0 0
39343 - 0 0 0 0 0 0 0 0 0 0 0 0
39344 - 0 0 0 0 0 0 6 6 6 26 26 26
39345 - 62 62 62 106 106 106 74 54 14 185 133 11
39346 -210 162 10 121 92 8 6 6 6 62 62 62
39347 -238 238 238 253 253 253 253 253 253 253 253 253
39348 -253 253 253 253 253 253 253 253 253 253 253 253
39349 -253 253 253 253 253 253 231 231 231 246 246 246
39350 -253 253 253 253 253 253 253 253 253 253 253 253
39351 -253 253 253 253 253 253 253 253 253 253 253 253
39352 -253 253 253 253 253 253 253 253 253 253 253 253
39353 -253 253 253 253 253 253 253 253 253 253 253 253
39354 -253 253 253 253 253 253 158 158 158 18 18 18
39355 - 14 14 14 2 2 6 2 2 6 2 2 6
39356 - 6 6 6 18 18 18 66 66 66 38 38 38
39357 - 6 6 6 94 94 94 50 50 50 18 18 18
39358 - 6 6 6 0 0 0 0 0 0 0 0 0
39359 - 0 0 0 0 0 0 0 0 0 0 0 0
39360 - 0 0 0 0 0 0 0 0 0 0 0 0
39361 - 0 0 0 0 0 0 0 0 0 0 0 0
39362 - 0 0 0 0 0 0 0 0 0 0 0 0
39363 - 0 0 0 0 0 0 0 0 0 6 6 6
39364 - 10 10 10 10 10 10 18 18 18 38 38 38
39365 - 78 78 78 142 134 106 216 158 10 242 186 14
39366 -246 190 14 246 190 14 156 118 10 10 10 10
39367 - 90 90 90 238 238 238 253 253 253 253 253 253
39368 -253 253 253 253 253 253 253 253 253 253 253 253
39369 -253 253 253 253 253 253 231 231 231 250 250 250
39370 -253 253 253 253 253 253 253 253 253 253 253 253
39371 -253 253 253 253 253 253 253 253 253 253 253 253
39372 -253 253 253 253 253 253 253 253 253 253 253 253
39373 -253 253 253 253 253 253 253 253 253 246 230 190
39374 -238 204 91 238 204 91 181 142 44 37 26 9
39375 - 2 2 6 2 2 6 2 2 6 2 2 6
39376 - 2 2 6 2 2 6 38 38 38 46 46 46
39377 - 26 26 26 106 106 106 54 54 54 18 18 18
39378 - 6 6 6 0 0 0 0 0 0 0 0 0
39379 - 0 0 0 0 0 0 0 0 0 0 0 0
39380 - 0 0 0 0 0 0 0 0 0 0 0 0
39381 - 0 0 0 0 0 0 0 0 0 0 0 0
39382 - 0 0 0 0 0 0 0 0 0 0 0 0
39383 - 0 0 0 6 6 6 14 14 14 22 22 22
39384 - 30 30 30 38 38 38 50 50 50 70 70 70
39385 -106 106 106 190 142 34 226 170 11 242 186 14
39386 -246 190 14 246 190 14 246 190 14 154 114 10
39387 - 6 6 6 74 74 74 226 226 226 253 253 253
39388 -253 253 253 253 253 253 253 253 253 253 253 253
39389 -253 253 253 253 253 253 231 231 231 250 250 250
39390 -253 253 253 253 253 253 253 253 253 253 253 253
39391 -253 253 253 253 253 253 253 253 253 253 253 253
39392 -253 253 253 253 253 253 253 253 253 253 253 253
39393 -253 253 253 253 253 253 253 253 253 228 184 62
39394 -241 196 14 241 208 19 232 195 16 38 30 10
39395 - 2 2 6 2 2 6 2 2 6 2 2 6
39396 - 2 2 6 6 6 6 30 30 30 26 26 26
39397 -203 166 17 154 142 90 66 66 66 26 26 26
39398 - 6 6 6 0 0 0 0 0 0 0 0 0
39399 - 0 0 0 0 0 0 0 0 0 0 0 0
39400 - 0 0 0 0 0 0 0 0 0 0 0 0
39401 - 0 0 0 0 0 0 0 0 0 0 0 0
39402 - 0 0 0 0 0 0 0 0 0 0 0 0
39403 - 6 6 6 18 18 18 38 38 38 58 58 58
39404 - 78 78 78 86 86 86 101 101 101 123 123 123
39405 -175 146 61 210 150 10 234 174 13 246 186 14
39406 -246 190 14 246 190 14 246 190 14 238 190 10
39407 -102 78 10 2 2 6 46 46 46 198 198 198
39408 -253 253 253 253 253 253 253 253 253 253 253 253
39409 -253 253 253 253 253 253 234 234 234 242 242 242
39410 -253 253 253 253 253 253 253 253 253 253 253 253
39411 -253 253 253 253 253 253 253 253 253 253 253 253
39412 -253 253 253 253 253 253 253 253 253 253 253 253
39413 -253 253 253 253 253 253 253 253 253 224 178 62
39414 -242 186 14 241 196 14 210 166 10 22 18 6
39415 - 2 2 6 2 2 6 2 2 6 2 2 6
39416 - 2 2 6 2 2 6 6 6 6 121 92 8
39417 -238 202 15 232 195 16 82 82 82 34 34 34
39418 - 10 10 10 0 0 0 0 0 0 0 0 0
39419 - 0 0 0 0 0 0 0 0 0 0 0 0
39420 - 0 0 0 0 0 0 0 0 0 0 0 0
39421 - 0 0 0 0 0 0 0 0 0 0 0 0
39422 - 0 0 0 0 0 0 0 0 0 0 0 0
39423 - 14 14 14 38 38 38 70 70 70 154 122 46
39424 -190 142 34 200 144 11 197 138 11 197 138 11
39425 -213 154 11 226 170 11 242 186 14 246 190 14
39426 -246 190 14 246 190 14 246 190 14 246 190 14
39427 -225 175 15 46 32 6 2 2 6 22 22 22
39428 -158 158 158 250 250 250 253 253 253 253 253 253
39429 -253 253 253 253 253 253 253 253 253 253 253 253
39430 -253 253 253 253 253 253 253 253 253 253 253 253
39431 -253 253 253 253 253 253 253 253 253 253 253 253
39432 -253 253 253 253 253 253 253 253 253 253 253 253
39433 -253 253 253 250 250 250 242 242 242 224 178 62
39434 -239 182 13 236 186 11 213 154 11 46 32 6
39435 - 2 2 6 2 2 6 2 2 6 2 2 6
39436 - 2 2 6 2 2 6 61 42 6 225 175 15
39437 -238 190 10 236 186 11 112 100 78 42 42 42
39438 - 14 14 14 0 0 0 0 0 0 0 0 0
39439 - 0 0 0 0 0 0 0 0 0 0 0 0
39440 - 0 0 0 0 0 0 0 0 0 0 0 0
39441 - 0 0 0 0 0 0 0 0 0 0 0 0
39442 - 0 0 0 0 0 0 0 0 0 6 6 6
39443 - 22 22 22 54 54 54 154 122 46 213 154 11
39444 -226 170 11 230 174 11 226 170 11 226 170 11
39445 -236 178 12 242 186 14 246 190 14 246 190 14
39446 -246 190 14 246 190 14 246 190 14 246 190 14
39447 -241 196 14 184 144 12 10 10 10 2 2 6
39448 - 6 6 6 116 116 116 242 242 242 253 253 253
39449 -253 253 253 253 253 253 253 253 253 253 253 253
39450 -253 253 253 253 253 253 253 253 253 253 253 253
39451 -253 253 253 253 253 253 253 253 253 253 253 253
39452 -253 253 253 253 253 253 253 253 253 253 253 253
39453 -253 253 253 231 231 231 198 198 198 214 170 54
39454 -236 178 12 236 178 12 210 150 10 137 92 6
39455 - 18 14 6 2 2 6 2 2 6 2 2 6
39456 - 6 6 6 70 47 6 200 144 11 236 178 12
39457 -239 182 13 239 182 13 124 112 88 58 58 58
39458 - 22 22 22 6 6 6 0 0 0 0 0 0
39459 - 0 0 0 0 0 0 0 0 0 0 0 0
39460 - 0 0 0 0 0 0 0 0 0 0 0 0
39461 - 0 0 0 0 0 0 0 0 0 0 0 0
39462 - 0 0 0 0 0 0 0 0 0 10 10 10
39463 - 30 30 30 70 70 70 180 133 36 226 170 11
39464 -239 182 13 242 186 14 242 186 14 246 186 14
39465 -246 190 14 246 190 14 246 190 14 246 190 14
39466 -246 190 14 246 190 14 246 190 14 246 190 14
39467 -246 190 14 232 195 16 98 70 6 2 2 6
39468 - 2 2 6 2 2 6 66 66 66 221 221 221
39469 -253 253 253 253 253 253 253 253 253 253 253 253
39470 -253 253 253 253 253 253 253 253 253 253 253 253
39471 -253 253 253 253 253 253 253 253 253 253 253 253
39472 -253 253 253 253 253 253 253 253 253 253 253 253
39473 -253 253 253 206 206 206 198 198 198 214 166 58
39474 -230 174 11 230 174 11 216 158 10 192 133 9
39475 -163 110 8 116 81 8 102 78 10 116 81 8
39476 -167 114 7 197 138 11 226 170 11 239 182 13
39477 -242 186 14 242 186 14 162 146 94 78 78 78
39478 - 34 34 34 14 14 14 6 6 6 0 0 0
39479 - 0 0 0 0 0 0 0 0 0 0 0 0
39480 - 0 0 0 0 0 0 0 0 0 0 0 0
39481 - 0 0 0 0 0 0 0 0 0 0 0 0
39482 - 0 0 0 0 0 0 0 0 0 6 6 6
39483 - 30 30 30 78 78 78 190 142 34 226 170 11
39484 -239 182 13 246 190 14 246 190 14 246 190 14
39485 -246 190 14 246 190 14 246 190 14 246 190 14
39486 -246 190 14 246 190 14 246 190 14 246 190 14
39487 -246 190 14 241 196 14 203 166 17 22 18 6
39488 - 2 2 6 2 2 6 2 2 6 38 38 38
39489 -218 218 218 253 253 253 253 253 253 253 253 253
39490 -253 253 253 253 253 253 253 253 253 253 253 253
39491 -253 253 253 253 253 253 253 253 253 253 253 253
39492 -253 253 253 253 253 253 253 253 253 253 253 253
39493 -250 250 250 206 206 206 198 198 198 202 162 69
39494 -226 170 11 236 178 12 224 166 10 210 150 10
39495 -200 144 11 197 138 11 192 133 9 197 138 11
39496 -210 150 10 226 170 11 242 186 14 246 190 14
39497 -246 190 14 246 186 14 225 175 15 124 112 88
39498 - 62 62 62 30 30 30 14 14 14 6 6 6
39499 - 0 0 0 0 0 0 0 0 0 0 0 0
39500 - 0 0 0 0 0 0 0 0 0 0 0 0
39501 - 0 0 0 0 0 0 0 0 0 0 0 0
39502 - 0 0 0 0 0 0 0 0 0 10 10 10
39503 - 30 30 30 78 78 78 174 135 50 224 166 10
39504 -239 182 13 246 190 14 246 190 14 246 190 14
39505 -246 190 14 246 190 14 246 190 14 246 190 14
39506 -246 190 14 246 190 14 246 190 14 246 190 14
39507 -246 190 14 246 190 14 241 196 14 139 102 15
39508 - 2 2 6 2 2 6 2 2 6 2 2 6
39509 - 78 78 78 250 250 250 253 253 253 253 253 253
39510 -253 253 253 253 253 253 253 253 253 253 253 253
39511 -253 253 253 253 253 253 253 253 253 253 253 253
39512 -253 253 253 253 253 253 253 253 253 253 253 253
39513 -250 250 250 214 214 214 198 198 198 190 150 46
39514 -219 162 10 236 178 12 234 174 13 224 166 10
39515 -216 158 10 213 154 11 213 154 11 216 158 10
39516 -226 170 11 239 182 13 246 190 14 246 190 14
39517 -246 190 14 246 190 14 242 186 14 206 162 42
39518 -101 101 101 58 58 58 30 30 30 14 14 14
39519 - 6 6 6 0 0 0 0 0 0 0 0 0
39520 - 0 0 0 0 0 0 0 0 0 0 0 0
39521 - 0 0 0 0 0 0 0 0 0 0 0 0
39522 - 0 0 0 0 0 0 0 0 0 10 10 10
39523 - 30 30 30 74 74 74 174 135 50 216 158 10
39524 -236 178 12 246 190 14 246 190 14 246 190 14
39525 -246 190 14 246 190 14 246 190 14 246 190 14
39526 -246 190 14 246 190 14 246 190 14 246 190 14
39527 -246 190 14 246 190 14 241 196 14 226 184 13
39528 - 61 42 6 2 2 6 2 2 6 2 2 6
39529 - 22 22 22 238 238 238 253 253 253 253 253 253
39530 -253 253 253 253 253 253 253 253 253 253 253 253
39531 -253 253 253 253 253 253 253 253 253 253 253 253
39532 -253 253 253 253 253 253 253 253 253 253 253 253
39533 -253 253 253 226 226 226 187 187 187 180 133 36
39534 -216 158 10 236 178 12 239 182 13 236 178 12
39535 -230 174 11 226 170 11 226 170 11 230 174 11
39536 -236 178 12 242 186 14 246 190 14 246 190 14
39537 -246 190 14 246 190 14 246 186 14 239 182 13
39538 -206 162 42 106 106 106 66 66 66 34 34 34
39539 - 14 14 14 6 6 6 0 0 0 0 0 0
39540 - 0 0 0 0 0 0 0 0 0 0 0 0
39541 - 0 0 0 0 0 0 0 0 0 0 0 0
39542 - 0 0 0 0 0 0 0 0 0 6 6 6
39543 - 26 26 26 70 70 70 163 133 67 213 154 11
39544 -236 178 12 246 190 14 246 190 14 246 190 14
39545 -246 190 14 246 190 14 246 190 14 246 190 14
39546 -246 190 14 246 190 14 246 190 14 246 190 14
39547 -246 190 14 246 190 14 246 190 14 241 196 14
39548 -190 146 13 18 14 6 2 2 6 2 2 6
39549 - 46 46 46 246 246 246 253 253 253 253 253 253
39550 -253 253 253 253 253 253 253 253 253 253 253 253
39551 -253 253 253 253 253 253 253 253 253 253 253 253
39552 -253 253 253 253 253 253 253 253 253 253 253 253
39553 -253 253 253 221 221 221 86 86 86 156 107 11
39554 -216 158 10 236 178 12 242 186 14 246 186 14
39555 -242 186 14 239 182 13 239 182 13 242 186 14
39556 -242 186 14 246 186 14 246 190 14 246 190 14
39557 -246 190 14 246 190 14 246 190 14 246 190 14
39558 -242 186 14 225 175 15 142 122 72 66 66 66
39559 - 30 30 30 10 10 10 0 0 0 0 0 0
39560 - 0 0 0 0 0 0 0 0 0 0 0 0
39561 - 0 0 0 0 0 0 0 0 0 0 0 0
39562 - 0 0 0 0 0 0 0 0 0 6 6 6
39563 - 26 26 26 70 70 70 163 133 67 210 150 10
39564 -236 178 12 246 190 14 246 190 14 246 190 14
39565 -246 190 14 246 190 14 246 190 14 246 190 14
39566 -246 190 14 246 190 14 246 190 14 246 190 14
39567 -246 190 14 246 190 14 246 190 14 246 190 14
39568 -232 195 16 121 92 8 34 34 34 106 106 106
39569 -221 221 221 253 253 253 253 253 253 253 253 253
39570 -253 253 253 253 253 253 253 253 253 253 253 253
39571 -253 253 253 253 253 253 253 253 253 253 253 253
39572 -253 253 253 253 253 253 253 253 253 253 253 253
39573 -242 242 242 82 82 82 18 14 6 163 110 8
39574 -216 158 10 236 178 12 242 186 14 246 190 14
39575 -246 190 14 246 190 14 246 190 14 246 190 14
39576 -246 190 14 246 190 14 246 190 14 246 190 14
39577 -246 190 14 246 190 14 246 190 14 246 190 14
39578 -246 190 14 246 190 14 242 186 14 163 133 67
39579 - 46 46 46 18 18 18 6 6 6 0 0 0
39580 - 0 0 0 0 0 0 0 0 0 0 0 0
39581 - 0 0 0 0 0 0 0 0 0 0 0 0
39582 - 0 0 0 0 0 0 0 0 0 10 10 10
39583 - 30 30 30 78 78 78 163 133 67 210 150 10
39584 -236 178 12 246 186 14 246 190 14 246 190 14
39585 -246 190 14 246 190 14 246 190 14 246 190 14
39586 -246 190 14 246 190 14 246 190 14 246 190 14
39587 -246 190 14 246 190 14 246 190 14 246 190 14
39588 -241 196 14 215 174 15 190 178 144 253 253 253
39589 -253 253 253 253 253 253 253 253 253 253 253 253
39590 -253 253 253 253 253 253 253 253 253 253 253 253
39591 -253 253 253 253 253 253 253 253 253 253 253 253
39592 -253 253 253 253 253 253 253 253 253 218 218 218
39593 - 58 58 58 2 2 6 22 18 6 167 114 7
39594 -216 158 10 236 178 12 246 186 14 246 190 14
39595 -246 190 14 246 190 14 246 190 14 246 190 14
39596 -246 190 14 246 190 14 246 190 14 246 190 14
39597 -246 190 14 246 190 14 246 190 14 246 190 14
39598 -246 190 14 246 186 14 242 186 14 190 150 46
39599 - 54 54 54 22 22 22 6 6 6 0 0 0
39600 - 0 0 0 0 0 0 0 0 0 0 0 0
39601 - 0 0 0 0 0 0 0 0 0 0 0 0
39602 - 0 0 0 0 0 0 0 0 0 14 14 14
39603 - 38 38 38 86 86 86 180 133 36 213 154 11
39604 -236 178 12 246 186 14 246 190 14 246 190 14
39605 -246 190 14 246 190 14 246 190 14 246 190 14
39606 -246 190 14 246 190 14 246 190 14 246 190 14
39607 -246 190 14 246 190 14 246 190 14 246 190 14
39608 -246 190 14 232 195 16 190 146 13 214 214 214
39609 -253 253 253 253 253 253 253 253 253 253 253 253
39610 -253 253 253 253 253 253 253 253 253 253 253 253
39611 -253 253 253 253 253 253 253 253 253 253 253 253
39612 -253 253 253 250 250 250 170 170 170 26 26 26
39613 - 2 2 6 2 2 6 37 26 9 163 110 8
39614 -219 162 10 239 182 13 246 186 14 246 190 14
39615 -246 190 14 246 190 14 246 190 14 246 190 14
39616 -246 190 14 246 190 14 246 190 14 246 190 14
39617 -246 190 14 246 190 14 246 190 14 246 190 14
39618 -246 186 14 236 178 12 224 166 10 142 122 72
39619 - 46 46 46 18 18 18 6 6 6 0 0 0
39620 - 0 0 0 0 0 0 0 0 0 0 0 0
39621 - 0 0 0 0 0 0 0 0 0 0 0 0
39622 - 0 0 0 0 0 0 6 6 6 18 18 18
39623 - 50 50 50 109 106 95 192 133 9 224 166 10
39624 -242 186 14 246 190 14 246 190 14 246 190 14
39625 -246 190 14 246 190 14 246 190 14 246 190 14
39626 -246 190 14 246 190 14 246 190 14 246 190 14
39627 -246 190 14 246 190 14 246 190 14 246 190 14
39628 -242 186 14 226 184 13 210 162 10 142 110 46
39629 -226 226 226 253 253 253 253 253 253 253 253 253
39630 -253 253 253 253 253 253 253 253 253 253 253 253
39631 -253 253 253 253 253 253 253 253 253 253 253 253
39632 -198 198 198 66 66 66 2 2 6 2 2 6
39633 - 2 2 6 2 2 6 50 34 6 156 107 11
39634 -219 162 10 239 182 13 246 186 14 246 190 14
39635 -246 190 14 246 190 14 246 190 14 246 190 14
39636 -246 190 14 246 190 14 246 190 14 246 190 14
39637 -246 190 14 246 190 14 246 190 14 242 186 14
39638 -234 174 13 213 154 11 154 122 46 66 66 66
39639 - 30 30 30 10 10 10 0 0 0 0 0 0
39640 - 0 0 0 0 0 0 0 0 0 0 0 0
39641 - 0 0 0 0 0 0 0 0 0 0 0 0
39642 - 0 0 0 0 0 0 6 6 6 22 22 22
39643 - 58 58 58 154 121 60 206 145 10 234 174 13
39644 -242 186 14 246 186 14 246 190 14 246 190 14
39645 -246 190 14 246 190 14 246 190 14 246 190 14
39646 -246 190 14 246 190 14 246 190 14 246 190 14
39647 -246 190 14 246 190 14 246 190 14 246 190 14
39648 -246 186 14 236 178 12 210 162 10 163 110 8
39649 - 61 42 6 138 138 138 218 218 218 250 250 250
39650 -253 253 253 253 253 253 253 253 253 250 250 250
39651 -242 242 242 210 210 210 144 144 144 66 66 66
39652 - 6 6 6 2 2 6 2 2 6 2 2 6
39653 - 2 2 6 2 2 6 61 42 6 163 110 8
39654 -216 158 10 236 178 12 246 190 14 246 190 14
39655 -246 190 14 246 190 14 246 190 14 246 190 14
39656 -246 190 14 246 190 14 246 190 14 246 190 14
39657 -246 190 14 239 182 13 230 174 11 216 158 10
39658 -190 142 34 124 112 88 70 70 70 38 38 38
39659 - 18 18 18 6 6 6 0 0 0 0 0 0
39660 - 0 0 0 0 0 0 0 0 0 0 0 0
39661 - 0 0 0 0 0 0 0 0 0 0 0 0
39662 - 0 0 0 0 0 0 6 6 6 22 22 22
39663 - 62 62 62 168 124 44 206 145 10 224 166 10
39664 -236 178 12 239 182 13 242 186 14 242 186 14
39665 -246 186 14 246 190 14 246 190 14 246 190 14
39666 -246 190 14 246 190 14 246 190 14 246 190 14
39667 -246 190 14 246 190 14 246 190 14 246 190 14
39668 -246 190 14 236 178 12 216 158 10 175 118 6
39669 - 80 54 7 2 2 6 6 6 6 30 30 30
39670 - 54 54 54 62 62 62 50 50 50 38 38 38
39671 - 14 14 14 2 2 6 2 2 6 2 2 6
39672 - 2 2 6 2 2 6 2 2 6 2 2 6
39673 - 2 2 6 6 6 6 80 54 7 167 114 7
39674 -213 154 11 236 178 12 246 190 14 246 190 14
39675 -246 190 14 246 190 14 246 190 14 246 190 14
39676 -246 190 14 242 186 14 239 182 13 239 182 13
39677 -230 174 11 210 150 10 174 135 50 124 112 88
39678 - 82 82 82 54 54 54 34 34 34 18 18 18
39679 - 6 6 6 0 0 0 0 0 0 0 0 0
39680 - 0 0 0 0 0 0 0 0 0 0 0 0
39681 - 0 0 0 0 0 0 0 0 0 0 0 0
39682 - 0 0 0 0 0 0 6 6 6 18 18 18
39683 - 50 50 50 158 118 36 192 133 9 200 144 11
39684 -216 158 10 219 162 10 224 166 10 226 170 11
39685 -230 174 11 236 178 12 239 182 13 239 182 13
39686 -242 186 14 246 186 14 246 190 14 246 190 14
39687 -246 190 14 246 190 14 246 190 14 246 190 14
39688 -246 186 14 230 174 11 210 150 10 163 110 8
39689 -104 69 6 10 10 10 2 2 6 2 2 6
39690 - 2 2 6 2 2 6 2 2 6 2 2 6
39691 - 2 2 6 2 2 6 2 2 6 2 2 6
39692 - 2 2 6 2 2 6 2 2 6 2 2 6
39693 - 2 2 6 6 6 6 91 60 6 167 114 7
39694 -206 145 10 230 174 11 242 186 14 246 190 14
39695 -246 190 14 246 190 14 246 186 14 242 186 14
39696 -239 182 13 230 174 11 224 166 10 213 154 11
39697 -180 133 36 124 112 88 86 86 86 58 58 58
39698 - 38 38 38 22 22 22 10 10 10 6 6 6
39699 - 0 0 0 0 0 0 0 0 0 0 0 0
39700 - 0 0 0 0 0 0 0 0 0 0 0 0
39701 - 0 0 0 0 0 0 0 0 0 0 0 0
39702 - 0 0 0 0 0 0 0 0 0 14 14 14
39703 - 34 34 34 70 70 70 138 110 50 158 118 36
39704 -167 114 7 180 123 7 192 133 9 197 138 11
39705 -200 144 11 206 145 10 213 154 11 219 162 10
39706 -224 166 10 230 174 11 239 182 13 242 186 14
39707 -246 186 14 246 186 14 246 186 14 246 186 14
39708 -239 182 13 216 158 10 185 133 11 152 99 6
39709 -104 69 6 18 14 6 2 2 6 2 2 6
39710 - 2 2 6 2 2 6 2 2 6 2 2 6
39711 - 2 2 6 2 2 6 2 2 6 2 2 6
39712 - 2 2 6 2 2 6 2 2 6 2 2 6
39713 - 2 2 6 6 6 6 80 54 7 152 99 6
39714 -192 133 9 219 162 10 236 178 12 239 182 13
39715 -246 186 14 242 186 14 239 182 13 236 178 12
39716 -224 166 10 206 145 10 192 133 9 154 121 60
39717 - 94 94 94 62 62 62 42 42 42 22 22 22
39718 - 14 14 14 6 6 6 0 0 0 0 0 0
39719 - 0 0 0 0 0 0 0 0 0 0 0 0
39720 - 0 0 0 0 0 0 0 0 0 0 0 0
39721 - 0 0 0 0 0 0 0 0 0 0 0 0
39722 - 0 0 0 0 0 0 0 0 0 6 6 6
39723 - 18 18 18 34 34 34 58 58 58 78 78 78
39724 -101 98 89 124 112 88 142 110 46 156 107 11
39725 -163 110 8 167 114 7 175 118 6 180 123 7
39726 -185 133 11 197 138 11 210 150 10 219 162 10
39727 -226 170 11 236 178 12 236 178 12 234 174 13
39728 -219 162 10 197 138 11 163 110 8 130 83 6
39729 - 91 60 6 10 10 10 2 2 6 2 2 6
39730 - 18 18 18 38 38 38 38 38 38 38 38 38
39731 - 38 38 38 38 38 38 38 38 38 38 38 38
39732 - 38 38 38 38 38 38 26 26 26 2 2 6
39733 - 2 2 6 6 6 6 70 47 6 137 92 6
39734 -175 118 6 200 144 11 219 162 10 230 174 11
39735 -234 174 13 230 174 11 219 162 10 210 150 10
39736 -192 133 9 163 110 8 124 112 88 82 82 82
39737 - 50 50 50 30 30 30 14 14 14 6 6 6
39738 - 0 0 0 0 0 0 0 0 0 0 0 0
39739 - 0 0 0 0 0 0 0 0 0 0 0 0
39740 - 0 0 0 0 0 0 0 0 0 0 0 0
39741 - 0 0 0 0 0 0 0 0 0 0 0 0
39742 - 0 0 0 0 0 0 0 0 0 0 0 0
39743 - 6 6 6 14 14 14 22 22 22 34 34 34
39744 - 42 42 42 58 58 58 74 74 74 86 86 86
39745 -101 98 89 122 102 70 130 98 46 121 87 25
39746 -137 92 6 152 99 6 163 110 8 180 123 7
39747 -185 133 11 197 138 11 206 145 10 200 144 11
39748 -180 123 7 156 107 11 130 83 6 104 69 6
39749 - 50 34 6 54 54 54 110 110 110 101 98 89
39750 - 86 86 86 82 82 82 78 78 78 78 78 78
39751 - 78 78 78 78 78 78 78 78 78 78 78 78
39752 - 78 78 78 82 82 82 86 86 86 94 94 94
39753 -106 106 106 101 101 101 86 66 34 124 80 6
39754 -156 107 11 180 123 7 192 133 9 200 144 11
39755 -206 145 10 200 144 11 192 133 9 175 118 6
39756 -139 102 15 109 106 95 70 70 70 42 42 42
39757 - 22 22 22 10 10 10 0 0 0 0 0 0
39758 - 0 0 0 0 0 0 0 0 0 0 0 0
39759 - 0 0 0 0 0 0 0 0 0 0 0 0
39760 - 0 0 0 0 0 0 0 0 0 0 0 0
39761 - 0 0 0 0 0 0 0 0 0 0 0 0
39762 - 0 0 0 0 0 0 0 0 0 0 0 0
39763 - 0 0 0 0 0 0 6 6 6 10 10 10
39764 - 14 14 14 22 22 22 30 30 30 38 38 38
39765 - 50 50 50 62 62 62 74 74 74 90 90 90
39766 -101 98 89 112 100 78 121 87 25 124 80 6
39767 -137 92 6 152 99 6 152 99 6 152 99 6
39768 -138 86 6 124 80 6 98 70 6 86 66 30
39769 -101 98 89 82 82 82 58 58 58 46 46 46
39770 - 38 38 38 34 34 34 34 34 34 34 34 34
39771 - 34 34 34 34 34 34 34 34 34 34 34 34
39772 - 34 34 34 34 34 34 38 38 38 42 42 42
39773 - 54 54 54 82 82 82 94 86 76 91 60 6
39774 -134 86 6 156 107 11 167 114 7 175 118 6
39775 -175 118 6 167 114 7 152 99 6 121 87 25
39776 -101 98 89 62 62 62 34 34 34 18 18 18
39777 - 6 6 6 0 0 0 0 0 0 0 0 0
39778 - 0 0 0 0 0 0 0 0 0 0 0 0
39779 - 0 0 0 0 0 0 0 0 0 0 0 0
39780 - 0 0 0 0 0 0 0 0 0 0 0 0
39781 - 0 0 0 0 0 0 0 0 0 0 0 0
39782 - 0 0 0 0 0 0 0 0 0 0 0 0
39783 - 0 0 0 0 0 0 0 0 0 0 0 0
39784 - 0 0 0 6 6 6 6 6 6 10 10 10
39785 - 18 18 18 22 22 22 30 30 30 42 42 42
39786 - 50 50 50 66 66 66 86 86 86 101 98 89
39787 -106 86 58 98 70 6 104 69 6 104 69 6
39788 -104 69 6 91 60 6 82 62 34 90 90 90
39789 - 62 62 62 38 38 38 22 22 22 14 14 14
39790 - 10 10 10 10 10 10 10 10 10 10 10 10
39791 - 10 10 10 10 10 10 6 6 6 10 10 10
39792 - 10 10 10 10 10 10 10 10 10 14 14 14
39793 - 22 22 22 42 42 42 70 70 70 89 81 66
39794 - 80 54 7 104 69 6 124 80 6 137 92 6
39795 -134 86 6 116 81 8 100 82 52 86 86 86
39796 - 58 58 58 30 30 30 14 14 14 6 6 6
39797 - 0 0 0 0 0 0 0 0 0 0 0 0
39798 - 0 0 0 0 0 0 0 0 0 0 0 0
39799 - 0 0 0 0 0 0 0 0 0 0 0 0
39800 - 0 0 0 0 0 0 0 0 0 0 0 0
39801 - 0 0 0 0 0 0 0 0 0 0 0 0
39802 - 0 0 0 0 0 0 0 0 0 0 0 0
39803 - 0 0 0 0 0 0 0 0 0 0 0 0
39804 - 0 0 0 0 0 0 0 0 0 0 0 0
39805 - 0 0 0 6 6 6 10 10 10 14 14 14
39806 - 18 18 18 26 26 26 38 38 38 54 54 54
39807 - 70 70 70 86 86 86 94 86 76 89 81 66
39808 - 89 81 66 86 86 86 74 74 74 50 50 50
39809 - 30 30 30 14 14 14 6 6 6 0 0 0
39810 - 0 0 0 0 0 0 0 0 0 0 0 0
39811 - 0 0 0 0 0 0 0 0 0 0 0 0
39812 - 0 0 0 0 0 0 0 0 0 0 0 0
39813 - 6 6 6 18 18 18 34 34 34 58 58 58
39814 - 82 82 82 89 81 66 89 81 66 89 81 66
39815 - 94 86 66 94 86 76 74 74 74 50 50 50
39816 - 26 26 26 14 14 14 6 6 6 0 0 0
39817 - 0 0 0 0 0 0 0 0 0 0 0 0
39818 - 0 0 0 0 0 0 0 0 0 0 0 0
39819 - 0 0 0 0 0 0 0 0 0 0 0 0
39820 - 0 0 0 0 0 0 0 0 0 0 0 0
39821 - 0 0 0 0 0 0 0 0 0 0 0 0
39822 - 0 0 0 0 0 0 0 0 0 0 0 0
39823 - 0 0 0 0 0 0 0 0 0 0 0 0
39824 - 0 0 0 0 0 0 0 0 0 0 0 0
39825 - 0 0 0 0 0 0 0 0 0 0 0 0
39826 - 6 6 6 6 6 6 14 14 14 18 18 18
39827 - 30 30 30 38 38 38 46 46 46 54 54 54
39828 - 50 50 50 42 42 42 30 30 30 18 18 18
39829 - 10 10 10 0 0 0 0 0 0 0 0 0
39830 - 0 0 0 0 0 0 0 0 0 0 0 0
39831 - 0 0 0 0 0 0 0 0 0 0 0 0
39832 - 0 0 0 0 0 0 0 0 0 0 0 0
39833 - 0 0 0 6 6 6 14 14 14 26 26 26
39834 - 38 38 38 50 50 50 58 58 58 58 58 58
39835 - 54 54 54 42 42 42 30 30 30 18 18 18
39836 - 10 10 10 0 0 0 0 0 0 0 0 0
39837 - 0 0 0 0 0 0 0 0 0 0 0 0
39838 - 0 0 0 0 0 0 0 0 0 0 0 0
39839 - 0 0 0 0 0 0 0 0 0 0 0 0
39840 - 0 0 0 0 0 0 0 0 0 0 0 0
39841 - 0 0 0 0 0 0 0 0 0 0 0 0
39842 - 0 0 0 0 0 0 0 0 0 0 0 0
39843 - 0 0 0 0 0 0 0 0 0 0 0 0
39844 - 0 0 0 0 0 0 0 0 0 0 0 0
39845 - 0 0 0 0 0 0 0 0 0 0 0 0
39846 - 0 0 0 0 0 0 0 0 0 6 6 6
39847 - 6 6 6 10 10 10 14 14 14 18 18 18
39848 - 18 18 18 14 14 14 10 10 10 6 6 6
39849 - 0 0 0 0 0 0 0 0 0 0 0 0
39850 - 0 0 0 0 0 0 0 0 0 0 0 0
39851 - 0 0 0 0 0 0 0 0 0 0 0 0
39852 - 0 0 0 0 0 0 0 0 0 0 0 0
39853 - 0 0 0 0 0 0 0 0 0 6 6 6
39854 - 14 14 14 18 18 18 22 22 22 22 22 22
39855 - 18 18 18 14 14 14 10 10 10 6 6 6
39856 - 0 0 0 0 0 0 0 0 0 0 0 0
39857 - 0 0 0 0 0 0 0 0 0 0 0 0
39858 - 0 0 0 0 0 0 0 0 0 0 0 0
39859 - 0 0 0 0 0 0 0 0 0 0 0 0
39860 - 0 0 0 0 0 0 0 0 0 0 0 0
39861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39866 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39867 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39868 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39874 +4 4 4 4 4 4
39875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39882 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39888 +4 4 4 4 4 4
39889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39902 +4 4 4 4 4 4
39903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39916 +4 4 4 4 4 4
39917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39930 +4 4 4 4 4 4
39931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39944 +4 4 4 4 4 4
39945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39949 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39950 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39954 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39955 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39956 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39958 +4 4 4 4 4 4
39959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39963 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
39964 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
39965 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39968 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
39969 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
39970 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
39971 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39972 +4 4 4 4 4 4
39973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39977 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
39978 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
39979 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39982 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
39983 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
39984 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
39985 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
39986 +4 4 4 4 4 4
39987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39990 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
39991 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
39992 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
39993 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
39994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39995 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39996 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
39997 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
39998 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
39999 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40000 +4 4 4 4 4 4
40001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40004 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40005 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40006 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40007 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40008 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40009 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40010 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40011 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40012 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40013 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40014 +4 4 4 4 4 4
40015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40018 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40019 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40020 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40021 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40022 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40023 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40024 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40025 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40026 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40027 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40028 +4 4 4 4 4 4
40029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40031 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40032 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40033 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40034 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40035 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40036 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40037 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40038 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40039 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40040 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40041 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40042 +4 4 4 4 4 4
40043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40045 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40046 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40047 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40048 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40049 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40050 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40051 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40052 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40053 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40054 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40055 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40056 +4 4 4 4 4 4
40057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40059 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40060 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40061 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40062 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40063 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40064 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40065 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40066 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40067 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40068 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40069 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40070 +4 4 4 4 4 4
40071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40073 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40074 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40075 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40076 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40077 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40078 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40079 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40080 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40081 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40082 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40083 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40084 +4 4 4 4 4 4
40085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40086 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40087 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40088 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40089 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40090 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40091 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40092 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40093 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40094 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40095 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40096 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40097 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40098 +4 4 4 4 4 4
40099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40100 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40101 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40102 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40103 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40104 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40105 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40106 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40107 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40108 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40109 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40110 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40111 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40112 +0 0 0 4 4 4
40113 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40114 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40115 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40116 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40117 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40118 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40119 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40120 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40121 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40122 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40123 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40124 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40125 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40126 +2 0 0 0 0 0
40127 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40128 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40129 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40130 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40131 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40132 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40133 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40134 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40135 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40136 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40137 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40138 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40139 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40140 +37 38 37 0 0 0
40141 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40142 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40143 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40144 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40145 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40146 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40147 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40148 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40149 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40150 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40151 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40152 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40153 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40154 +85 115 134 4 0 0
40155 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40156 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40157 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40158 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40159 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40160 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40161 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40162 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40163 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40164 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40165 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40166 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40167 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40168 +60 73 81 4 0 0
40169 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40170 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40171 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40172 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40173 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40174 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40175 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40176 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40177 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40178 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40179 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40180 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40181 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40182 +16 19 21 4 0 0
40183 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40184 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40185 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40186 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40187 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40188 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40189 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40190 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40191 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40192 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40193 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40194 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40195 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40196 +4 0 0 4 3 3
40197 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40198 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40199 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40201 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40202 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40203 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40204 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40205 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40206 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40207 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40208 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40209 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40210 +3 2 2 4 4 4
40211 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40212 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40213 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40214 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40215 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40216 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40217 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40218 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40219 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40220 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40221 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40222 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40223 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40224 +4 4 4 4 4 4
40225 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40226 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40227 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40228 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40229 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40230 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40231 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40232 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40233 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40234 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40235 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40236 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40237 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40238 +4 4 4 4 4 4
40239 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40240 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40241 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40242 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40243 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40244 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40245 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40246 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40247 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40248 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40249 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40250 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40251 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40252 +5 5 5 5 5 5
40253 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40254 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40255 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40256 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40257 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40258 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40259 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40260 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40261 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40262 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40263 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40264 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40265 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40266 +5 5 5 4 4 4
40267 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40268 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40269 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40270 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40271 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40272 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40273 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40274 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40275 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40276 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40277 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40278 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280 +4 4 4 4 4 4
40281 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40282 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40283 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40284 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40285 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40286 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40287 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40288 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40289 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40290 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40291 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40292 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294 +4 4 4 4 4 4
40295 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40296 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40297 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40298 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40299 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40300 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40301 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40302 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40303 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40304 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40305 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308 +4 4 4 4 4 4
40309 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40310 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40311 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40312 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40313 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40314 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40315 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40316 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40317 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40318 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40319 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322 +4 4 4 4 4 4
40323 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40324 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40325 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40326 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40327 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40328 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40329 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40330 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40331 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40332 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40333 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40336 +4 4 4 4 4 4
40337 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40338 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40339 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40340 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40341 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40342 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40343 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40344 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40345 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40346 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40347 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40350 +4 4 4 4 4 4
40351 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40352 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40353 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40354 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40355 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40356 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40357 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40358 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40359 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40360 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40361 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40364 +4 4 4 4 4 4
40365 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40366 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40367 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40368 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40369 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40370 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40371 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40372 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40373 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40374 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40375 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40378 +4 4 4 4 4 4
40379 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40380 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40381 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40382 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40383 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40384 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40385 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40386 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40387 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40388 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40389 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40392 +4 4 4 4 4 4
40393 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40394 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40395 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40396 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40397 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40398 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40399 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40400 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40401 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40402 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40403 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40406 +4 4 4 4 4 4
40407 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40408 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40409 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40410 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40411 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40412 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40413 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40414 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40415 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40416 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40417 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420 +4 4 4 4 4 4
40421 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40422 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40423 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40424 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40425 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40426 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40427 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40428 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40429 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40430 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40431 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40434 +4 4 4 4 4 4
40435 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40436 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40437 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40438 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40439 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40440 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40441 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40442 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40443 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40444 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40445 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448 +4 4 4 4 4 4
40449 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40450 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40451 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40452 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40453 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40454 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40455 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40456 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40457 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40458 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40459 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462 +4 4 4 4 4 4
40463 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40464 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40465 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40466 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40467 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40468 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40469 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40470 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40471 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40472 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40473 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476 +4 4 4 4 4 4
40477 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40478 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40479 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40480 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40481 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40482 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40483 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40484 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40485 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40486 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40487 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40490 +4 4 4 4 4 4
40491 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40492 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40493 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40494 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40495 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40496 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40497 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40498 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40499 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40500 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40501 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40504 +4 4 4 4 4 4
40505 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40506 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40507 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40508 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40509 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40510 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40511 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40512 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40513 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40514 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40515 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40518 +4 4 4 4 4 4
40519 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40520 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40521 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40522 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40523 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40524 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40525 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40526 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40527 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40528 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40529 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40532 +4 4 4 4 4 4
40533 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40534 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40535 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40536 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40537 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40538 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40539 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40540 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40541 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40542 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40543 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40546 +4 4 4 4 4 4
40547 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40548 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40549 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40550 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40551 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40552 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40553 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40554 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40555 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40556 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40557 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40560 +4 4 4 4 4 4
40561 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40562 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40563 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40564 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40565 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40566 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40567 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40568 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40569 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40570 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40571 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40574 +4 4 4 4 4 4
40575 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40576 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40577 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40578 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40579 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40580 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40581 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40582 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40583 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40584 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40585 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40588 +4 4 4 4 4 4
40589 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40590 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40591 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40592 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40593 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40594 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40595 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40596 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40597 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40598 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40599 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40602 +4 4 4 4 4 4
40603 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40604 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40605 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40606 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40607 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40608 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40609 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40610 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40611 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40612 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40613 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40616 +4 4 4 4 4 4
40617 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40618 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40619 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40620 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40621 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40622 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40623 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40624 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40625 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40626 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40627 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40630 +4 4 4 4 4 4
40631 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40632 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40633 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40634 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40635 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40636 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40637 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40638 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40639 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40640 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40641 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40644 +4 4 4 4 4 4
40645 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40646 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40647 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40648 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40649 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40650 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40651 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40652 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40653 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40654 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40655 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40658 +4 4 4 4 4 4
40659 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40660 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40661 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40662 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40663 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40664 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40665 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40666 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40667 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40668 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40669 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40672 +4 4 4 4 4 4
40673 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40674 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40675 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40676 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40677 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40678 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40679 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40680 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40681 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40682 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40686 +4 4 4 4 4 4
40687 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40688 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40689 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40690 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40691 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40692 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40693 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40694 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40695 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40696 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40700 +4 4 4 4 4 4
40701 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40702 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40703 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40704 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40705 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40706 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40707 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40708 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40709 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40710 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40714 +4 4 4 4 4 4
40715 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40716 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40717 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40718 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40719 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40720 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40721 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40722 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40723 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40724 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40728 +4 4 4 4 4 4
40729 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40730 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40731 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40732 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40733 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40734 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40735 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40736 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40737 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40741 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40742 +4 4 4 4 4 4
40743 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40744 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40745 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40746 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40747 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40748 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40749 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40750 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40751 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40755 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40756 +4 4 4 4 4 4
40757 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40758 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40759 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40760 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40761 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40762 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40763 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40764 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40765 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40769 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40770 +4 4 4 4 4 4
40771 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40772 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40773 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40774 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40775 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40776 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40777 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40778 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40783 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40784 +4 4 4 4 4 4
40785 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40786 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40787 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40788 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40789 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40790 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40791 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40792 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40797 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40798 +4 4 4 4 4 4
40799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40800 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40801 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40802 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40803 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40804 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40805 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40806 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40810 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40811 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40812 +4 4 4 4 4 4
40813 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40814 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40815 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40816 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40817 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40818 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40819 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40820 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40824 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40825 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40826 +4 4 4 4 4 4
40827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40828 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40829 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40830 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40831 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40832 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40833 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40834 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40838 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40839 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40840 +4 4 4 4 4 4
40841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40843 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40844 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40845 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40846 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40847 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40848 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40852 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40853 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40854 +4 4 4 4 4 4
40855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40858 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40859 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40860 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40861 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40866 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40867 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40868 +4 4 4 4 4 4
40869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40872 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40873 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40874 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40875 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40882 +4 4 4 4 4 4
40883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40886 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40887 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40888 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40889 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40896 +4 4 4 4 4 4
40897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40900 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40901 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40902 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40903 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40910 +4 4 4 4 4 4
40911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40915 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40916 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40917 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40924 +4 4 4 4 4 4
40925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40929 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40930 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40931 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40938 +4 4 4 4 4 4
40939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40943 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40944 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40945 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40952 +4 4 4 4 4 4
40953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40957 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
40958 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
40959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40966 +4 4 4 4 4 4
40967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40971 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40972 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
40973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40980 +4 4 4 4 4 4
40981 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
40982 index a159b63..4ab532d 100644
40983 --- a/drivers/video/udlfb.c
40984 +++ b/drivers/video/udlfb.c
40985 @@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
40986 dlfb_urb_completion(urb);
40987
40988 error:
40989 - atomic_add(bytes_sent, &dev->bytes_sent);
40990 - atomic_add(bytes_identical, &dev->bytes_identical);
40991 - atomic_add(width*height*2, &dev->bytes_rendered);
40992 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40993 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40994 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
40995 end_cycles = get_cycles();
40996 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40997 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40998 >> 10)), /* Kcycles */
40999 &dev->cpu_kcycles_used);
41000
41001 @@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41002 dlfb_urb_completion(urb);
41003
41004 error:
41005 - atomic_add(bytes_sent, &dev->bytes_sent);
41006 - atomic_add(bytes_identical, &dev->bytes_identical);
41007 - atomic_add(bytes_rendered, &dev->bytes_rendered);
41008 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41009 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41010 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41011 end_cycles = get_cycles();
41012 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41013 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41014 >> 10)), /* Kcycles */
41015 &dev->cpu_kcycles_used);
41016 }
41017 @@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41018 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41019 struct dlfb_data *dev = fb_info->par;
41020 return snprintf(buf, PAGE_SIZE, "%u\n",
41021 - atomic_read(&dev->bytes_rendered));
41022 + atomic_read_unchecked(&dev->bytes_rendered));
41023 }
41024
41025 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41026 @@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41027 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41028 struct dlfb_data *dev = fb_info->par;
41029 return snprintf(buf, PAGE_SIZE, "%u\n",
41030 - atomic_read(&dev->bytes_identical));
41031 + atomic_read_unchecked(&dev->bytes_identical));
41032 }
41033
41034 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41035 @@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41036 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41037 struct dlfb_data *dev = fb_info->par;
41038 return snprintf(buf, PAGE_SIZE, "%u\n",
41039 - atomic_read(&dev->bytes_sent));
41040 + atomic_read_unchecked(&dev->bytes_sent));
41041 }
41042
41043 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41044 @@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41045 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41046 struct dlfb_data *dev = fb_info->par;
41047 return snprintf(buf, PAGE_SIZE, "%u\n",
41048 - atomic_read(&dev->cpu_kcycles_used));
41049 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41050 }
41051
41052 static ssize_t edid_show(
41053 @@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41054 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41055 struct dlfb_data *dev = fb_info->par;
41056
41057 - atomic_set(&dev->bytes_rendered, 0);
41058 - atomic_set(&dev->bytes_identical, 0);
41059 - atomic_set(&dev->bytes_sent, 0);
41060 - atomic_set(&dev->cpu_kcycles_used, 0);
41061 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41062 + atomic_set_unchecked(&dev->bytes_identical, 0);
41063 + atomic_set_unchecked(&dev->bytes_sent, 0);
41064 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41065
41066 return count;
41067 }
41068 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41069 index b0e2a42..e2df3ad 100644
41070 --- a/drivers/video/uvesafb.c
41071 +++ b/drivers/video/uvesafb.c
41072 @@ -19,6 +19,7 @@
41073 #include <linux/io.h>
41074 #include <linux/mutex.h>
41075 #include <linux/slab.h>
41076 +#include <linux/moduleloader.h>
41077 #include <video/edid.h>
41078 #include <video/uvesafb.h>
41079 #ifdef CONFIG_X86
41080 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41081 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41082 par->pmi_setpal = par->ypan = 0;
41083 } else {
41084 +
41085 +#ifdef CONFIG_PAX_KERNEXEC
41086 +#ifdef CONFIG_MODULES
41087 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41088 +#endif
41089 + if (!par->pmi_code) {
41090 + par->pmi_setpal = par->ypan = 0;
41091 + return 0;
41092 + }
41093 +#endif
41094 +
41095 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41096 + task->t.regs.edi);
41097 +
41098 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41099 + pax_open_kernel();
41100 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41101 + pax_close_kernel();
41102 +
41103 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41104 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41105 +#else
41106 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41107 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41108 +#endif
41109 +
41110 printk(KERN_INFO "uvesafb: protected mode interface info at "
41111 "%04x:%04x\n",
41112 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41113 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41114 par->ypan = ypan;
41115
41116 if (par->pmi_setpal || par->ypan) {
41117 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41118 if (__supported_pte_mask & _PAGE_NX) {
41119 par->pmi_setpal = par->ypan = 0;
41120 printk(KERN_WARNING "uvesafb: NX protection is actively."
41121 "We have better not to use the PMI.\n");
41122 - } else {
41123 + } else
41124 +#endif
41125 uvesafb_vbe_getpmi(task, par);
41126 - }
41127 }
41128 #else
41129 /* The protected mode interface is not available on non-x86. */
41130 @@ -1836,6 +1860,11 @@ out:
41131 if (par->vbe_modes)
41132 kfree(par->vbe_modes);
41133
41134 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41135 + if (par->pmi_code)
41136 + module_free_exec(NULL, par->pmi_code);
41137 +#endif
41138 +
41139 framebuffer_release(info);
41140 return err;
41141 }
41142 @@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
41143 kfree(par->vbe_state_orig);
41144 if (par->vbe_state_saved)
41145 kfree(par->vbe_state_saved);
41146 +
41147 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41148 + if (par->pmi_code)
41149 + module_free_exec(NULL, par->pmi_code);
41150 +#endif
41151 +
41152 }
41153
41154 framebuffer_release(info);
41155 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41156 index 501b340..86bd4cf 100644
41157 --- a/drivers/video/vesafb.c
41158 +++ b/drivers/video/vesafb.c
41159 @@ -9,6 +9,7 @@
41160 */
41161
41162 #include <linux/module.h>
41163 +#include <linux/moduleloader.h>
41164 #include <linux/kernel.h>
41165 #include <linux/errno.h>
41166 #include <linux/string.h>
41167 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41168 static int vram_total __initdata; /* Set total amount of memory */
41169 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41170 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41171 -static void (*pmi_start)(void) __read_mostly;
41172 -static void (*pmi_pal) (void) __read_mostly;
41173 +static void (*pmi_start)(void) __read_only;
41174 +static void (*pmi_pal) (void) __read_only;
41175 static int depth __read_mostly;
41176 static int vga_compat __read_mostly;
41177 /* --------------------------------------------------------------------- */
41178 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41179 unsigned int size_vmode;
41180 unsigned int size_remap;
41181 unsigned int size_total;
41182 + void *pmi_code = NULL;
41183
41184 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41185 return -ENODEV;
41186 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41187 size_remap = size_total;
41188 vesafb_fix.smem_len = size_remap;
41189
41190 -#ifndef __i386__
41191 - screen_info.vesapm_seg = 0;
41192 -#endif
41193 -
41194 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41195 printk(KERN_WARNING
41196 "vesafb: cannot reserve video memory at 0x%lx\n",
41197 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41198 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41199 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41200
41201 +#ifdef __i386__
41202 +
41203 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41204 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41205 + if (!pmi_code)
41206 +#elif !defined(CONFIG_PAX_KERNEXEC)
41207 + if (0)
41208 +#endif
41209 +
41210 +#endif
41211 + screen_info.vesapm_seg = 0;
41212 +
41213 if (screen_info.vesapm_seg) {
41214 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41215 - screen_info.vesapm_seg,screen_info.vesapm_off);
41216 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41217 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41218 }
41219
41220 if (screen_info.vesapm_seg < 0xc000)
41221 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41222
41223 if (ypan || pmi_setpal) {
41224 unsigned short *pmi_base;
41225 +
41226 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41227 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41228 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41229 +
41230 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41231 + pax_open_kernel();
41232 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41233 +#else
41234 + pmi_code = pmi_base;
41235 +#endif
41236 +
41237 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41238 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41239 +
41240 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41241 + pmi_start = ktva_ktla(pmi_start);
41242 + pmi_pal = ktva_ktla(pmi_pal);
41243 + pax_close_kernel();
41244 +#endif
41245 +
41246 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41247 if (pmi_base[3]) {
41248 printk(KERN_INFO "vesafb: pmi: ports = ");
41249 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41250 info->node, info->fix.id);
41251 return 0;
41252 err:
41253 +
41254 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41255 + module_free_exec(NULL, pmi_code);
41256 +#endif
41257 +
41258 if (info->screen_base)
41259 iounmap(info->screen_base);
41260 framebuffer_release(info);
41261 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41262 index 88714ae..16c2e11 100644
41263 --- a/drivers/video/via/via_clock.h
41264 +++ b/drivers/video/via/via_clock.h
41265 @@ -56,7 +56,7 @@ struct via_clock {
41266
41267 void (*set_engine_pll_state)(u8 state);
41268 void (*set_engine_pll)(struct via_pll_config config);
41269 -};
41270 +} __no_const;
41271
41272
41273 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41274 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41275 index e56c934..fc22f4b 100644
41276 --- a/drivers/xen/xen-pciback/conf_space.h
41277 +++ b/drivers/xen/xen-pciback/conf_space.h
41278 @@ -44,15 +44,15 @@ struct config_field {
41279 struct {
41280 conf_dword_write write;
41281 conf_dword_read read;
41282 - } dw;
41283 + } __no_const dw;
41284 struct {
41285 conf_word_write write;
41286 conf_word_read read;
41287 - } w;
41288 + } __no_const w;
41289 struct {
41290 conf_byte_write write;
41291 conf_byte_read read;
41292 - } b;
41293 + } __no_const b;
41294 } u;
41295 struct list_head list;
41296 };
41297 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41298 index 014c8dd..6f3dfe6 100644
41299 --- a/fs/9p/vfs_inode.c
41300 +++ b/fs/9p/vfs_inode.c
41301 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41302 void
41303 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41304 {
41305 - char *s = nd_get_link(nd);
41306 + const char *s = nd_get_link(nd);
41307
41308 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41309 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41310 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41311 index e95d1b6..3454244 100644
41312 --- a/fs/Kconfig.binfmt
41313 +++ b/fs/Kconfig.binfmt
41314 @@ -89,7 +89,7 @@ config HAVE_AOUT
41315
41316 config BINFMT_AOUT
41317 tristate "Kernel support for a.out and ECOFF binaries"
41318 - depends on HAVE_AOUT
41319 + depends on HAVE_AOUT && BROKEN
41320 ---help---
41321 A.out (Assembler.OUTput) is a set of formats for libraries and
41322 executables used in the earliest versions of UNIX. Linux used
41323 diff --git a/fs/aio.c b/fs/aio.c
41324 index e7f2fad..15ad8a4 100644
41325 --- a/fs/aio.c
41326 +++ b/fs/aio.c
41327 @@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41328 size += sizeof(struct io_event) * nr_events;
41329 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41330
41331 - if (nr_pages < 0)
41332 + if (nr_pages <= 0)
41333 return -EINVAL;
41334
41335 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41336 @@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41337 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41338 {
41339 ssize_t ret;
41340 + struct iovec iovstack;
41341
41342 #ifdef CONFIG_COMPAT
41343 if (compat)
41344 ret = compat_rw_copy_check_uvector(type,
41345 (struct compat_iovec __user *)kiocb->ki_buf,
41346 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41347 + kiocb->ki_nbytes, 1, &iovstack,
41348 &kiocb->ki_iovec, 1);
41349 else
41350 #endif
41351 ret = rw_copy_check_uvector(type,
41352 (struct iovec __user *)kiocb->ki_buf,
41353 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41354 + kiocb->ki_nbytes, 1, &iovstack,
41355 &kiocb->ki_iovec, 1);
41356 if (ret < 0)
41357 goto out;
41358 @@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41359 if (ret < 0)
41360 goto out;
41361
41362 + if (kiocb->ki_iovec == &iovstack) {
41363 + kiocb->ki_inline_vec = iovstack;
41364 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41365 + }
41366 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41367 kiocb->ki_cur_seg = 0;
41368 /* ki_nbytes/left now reflect bytes instead of segs */
41369 diff --git a/fs/attr.c b/fs/attr.c
41370 index d94d1b6..f9bccd6 100644
41371 --- a/fs/attr.c
41372 +++ b/fs/attr.c
41373 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41374 unsigned long limit;
41375
41376 limit = rlimit(RLIMIT_FSIZE);
41377 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41378 if (limit != RLIM_INFINITY && offset > limit)
41379 goto out_sig;
41380 if (offset > inode->i_sb->s_maxbytes)
41381 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41382 index da8876d..9f3e6d8 100644
41383 --- a/fs/autofs4/waitq.c
41384 +++ b/fs/autofs4/waitq.c
41385 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41386 {
41387 unsigned long sigpipe, flags;
41388 mm_segment_t fs;
41389 - const char *data = (const char *)addr;
41390 + const char __user *data = (const char __force_user *)addr;
41391 ssize_t wr = 0;
41392
41393 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41394 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41395 index e18da23..affc30e 100644
41396 --- a/fs/befs/linuxvfs.c
41397 +++ b/fs/befs/linuxvfs.c
41398 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41399 {
41400 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41401 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41402 - char *link = nd_get_link(nd);
41403 + const char *link = nd_get_link(nd);
41404 if (!IS_ERR(link))
41405 kfree(link);
41406 }
41407 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41408 index d146e18..12d1bd1 100644
41409 --- a/fs/binfmt_aout.c
41410 +++ b/fs/binfmt_aout.c
41411 @@ -16,6 +16,7 @@
41412 #include <linux/string.h>
41413 #include <linux/fs.h>
41414 #include <linux/file.h>
41415 +#include <linux/security.h>
41416 #include <linux/stat.h>
41417 #include <linux/fcntl.h>
41418 #include <linux/ptrace.h>
41419 @@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41420 #endif
41421 # define START_STACK(u) ((void __user *)u.start_stack)
41422
41423 + memset(&dump, 0, sizeof(dump));
41424 +
41425 fs = get_fs();
41426 set_fs(KERNEL_DS);
41427 has_dumped = 1;
41428 @@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41429
41430 /* If the size of the dump file exceeds the rlimit, then see what would happen
41431 if we wrote the stack, but not the data area. */
41432 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41433 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41434 dump.u_dsize = 0;
41435
41436 /* Make sure we have enough room to write the stack and data areas. */
41437 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41438 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41439 dump.u_ssize = 0;
41440
41441 @@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41442 rlim = rlimit(RLIMIT_DATA);
41443 if (rlim >= RLIM_INFINITY)
41444 rlim = ~0;
41445 +
41446 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41447 if (ex.a_data + ex.a_bss > rlim)
41448 return -ENOMEM;
41449
41450 @@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41451
41452 install_exec_creds(bprm);
41453
41454 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41455 + current->mm->pax_flags = 0UL;
41456 +#endif
41457 +
41458 +#ifdef CONFIG_PAX_PAGEEXEC
41459 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41460 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41461 +
41462 +#ifdef CONFIG_PAX_EMUTRAMP
41463 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41464 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41465 +#endif
41466 +
41467 +#ifdef CONFIG_PAX_MPROTECT
41468 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41469 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41470 +#endif
41471 +
41472 + }
41473 +#endif
41474 +
41475 if (N_MAGIC(ex) == OMAGIC) {
41476 unsigned long text_addr, map_size;
41477 loff_t pos;
41478 @@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41479 }
41480
41481 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41482 - PROT_READ | PROT_WRITE | PROT_EXEC,
41483 + PROT_READ | PROT_WRITE,
41484 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41485 fd_offset + ex.a_text);
41486 if (error != N_DATADDR(ex)) {
41487 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41488 index 16f7354..185d8dc 100644
41489 --- a/fs/binfmt_elf.c
41490 +++ b/fs/binfmt_elf.c
41491 @@ -32,6 +32,7 @@
41492 #include <linux/elf.h>
41493 #include <linux/utsname.h>
41494 #include <linux/coredump.h>
41495 +#include <linux/xattr.h>
41496 #include <asm/uaccess.h>
41497 #include <asm/param.h>
41498 #include <asm/page.h>
41499 @@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41500 #define elf_core_dump NULL
41501 #endif
41502
41503 +#ifdef CONFIG_PAX_MPROTECT
41504 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41505 +#endif
41506 +
41507 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41508 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41509 #else
41510 @@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
41511 .load_binary = load_elf_binary,
41512 .load_shlib = load_elf_library,
41513 .core_dump = elf_core_dump,
41514 +
41515 +#ifdef CONFIG_PAX_MPROTECT
41516 + .handle_mprotect= elf_handle_mprotect,
41517 +#endif
41518 +
41519 .min_coredump = ELF_EXEC_PAGESIZE,
41520 };
41521
41522 @@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
41523
41524 static int set_brk(unsigned long start, unsigned long end)
41525 {
41526 + unsigned long e = end;
41527 +
41528 start = ELF_PAGEALIGN(start);
41529 end = ELF_PAGEALIGN(end);
41530 if (end > start) {
41531 @@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41532 if (BAD_ADDR(addr))
41533 return addr;
41534 }
41535 - current->mm->start_brk = current->mm->brk = end;
41536 + current->mm->start_brk = current->mm->brk = e;
41537 return 0;
41538 }
41539
41540 @@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41541 elf_addr_t __user *u_rand_bytes;
41542 const char *k_platform = ELF_PLATFORM;
41543 const char *k_base_platform = ELF_BASE_PLATFORM;
41544 - unsigned char k_rand_bytes[16];
41545 + u32 k_rand_bytes[4];
41546 int items;
41547 elf_addr_t *elf_info;
41548 int ei_index = 0;
41549 const struct cred *cred = current_cred();
41550 struct vm_area_struct *vma;
41551 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41552
41553 /*
41554 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41555 @@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41556 * Generate 16 random bytes for userspace PRNG seeding.
41557 */
41558 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41559 - u_rand_bytes = (elf_addr_t __user *)
41560 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41561 + srandom32(k_rand_bytes[0] ^ random32());
41562 + srandom32(k_rand_bytes[1] ^ random32());
41563 + srandom32(k_rand_bytes[2] ^ random32());
41564 + srandom32(k_rand_bytes[3] ^ random32());
41565 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41566 + u_rand_bytes = (elf_addr_t __user *) p;
41567 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41568 return -EFAULT;
41569
41570 @@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41571 return -EFAULT;
41572 current->mm->env_end = p;
41573
41574 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41575 +
41576 /* Put the elf_info on the stack in the right place. */
41577 sp = (elf_addr_t __user *)envp + 1;
41578 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41579 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41580 return -EFAULT;
41581 return 0;
41582 }
41583 @@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41584 {
41585 struct elf_phdr *elf_phdata;
41586 struct elf_phdr *eppnt;
41587 - unsigned long load_addr = 0;
41588 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41589 int load_addr_set = 0;
41590 unsigned long last_bss = 0, elf_bss = 0;
41591 - unsigned long error = ~0UL;
41592 + unsigned long error = -EINVAL;
41593 unsigned long total_size;
41594 int retval, i, size;
41595
41596 @@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41597 goto out_close;
41598 }
41599
41600 +#ifdef CONFIG_PAX_SEGMEXEC
41601 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41602 + pax_task_size = SEGMEXEC_TASK_SIZE;
41603 +#endif
41604 +
41605 eppnt = elf_phdata;
41606 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41607 if (eppnt->p_type == PT_LOAD) {
41608 @@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41609 k = load_addr + eppnt->p_vaddr;
41610 if (BAD_ADDR(k) ||
41611 eppnt->p_filesz > eppnt->p_memsz ||
41612 - eppnt->p_memsz > TASK_SIZE ||
41613 - TASK_SIZE - eppnt->p_memsz < k) {
41614 + eppnt->p_memsz > pax_task_size ||
41615 + pax_task_size - eppnt->p_memsz < k) {
41616 error = -ENOMEM;
41617 goto out_close;
41618 }
41619 @@ -525,6 +549,351 @@ out:
41620 return error;
41621 }
41622
41623 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41624 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41625 +{
41626 + unsigned long pax_flags = 0UL;
41627 +
41628 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41629 +
41630 +#ifdef CONFIG_PAX_PAGEEXEC
41631 + if (elf_phdata->p_flags & PF_PAGEEXEC)
41632 + pax_flags |= MF_PAX_PAGEEXEC;
41633 +#endif
41634 +
41635 +#ifdef CONFIG_PAX_SEGMEXEC
41636 + if (elf_phdata->p_flags & PF_SEGMEXEC)
41637 + pax_flags |= MF_PAX_SEGMEXEC;
41638 +#endif
41639 +
41640 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41641 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41642 + if ((__supported_pte_mask & _PAGE_NX))
41643 + pax_flags &= ~MF_PAX_SEGMEXEC;
41644 + else
41645 + pax_flags &= ~MF_PAX_PAGEEXEC;
41646 + }
41647 +#endif
41648 +
41649 +#ifdef CONFIG_PAX_EMUTRAMP
41650 + if (elf_phdata->p_flags & PF_EMUTRAMP)
41651 + pax_flags |= MF_PAX_EMUTRAMP;
41652 +#endif
41653 +
41654 +#ifdef CONFIG_PAX_MPROTECT
41655 + if (elf_phdata->p_flags & PF_MPROTECT)
41656 + pax_flags |= MF_PAX_MPROTECT;
41657 +#endif
41658 +
41659 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41660 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41661 + pax_flags |= MF_PAX_RANDMMAP;
41662 +#endif
41663 +
41664 +#endif
41665 +
41666 + return pax_flags;
41667 +}
41668 +
41669 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41670 +{
41671 + unsigned long pax_flags = 0UL;
41672 +
41673 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41674 +
41675 +#ifdef CONFIG_PAX_PAGEEXEC
41676 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41677 + pax_flags |= MF_PAX_PAGEEXEC;
41678 +#endif
41679 +
41680 +#ifdef CONFIG_PAX_SEGMEXEC
41681 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41682 + pax_flags |= MF_PAX_SEGMEXEC;
41683 +#endif
41684 +
41685 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41686 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41687 + if ((__supported_pte_mask & _PAGE_NX))
41688 + pax_flags &= ~MF_PAX_SEGMEXEC;
41689 + else
41690 + pax_flags &= ~MF_PAX_PAGEEXEC;
41691 + }
41692 +#endif
41693 +
41694 +#ifdef CONFIG_PAX_EMUTRAMP
41695 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41696 + pax_flags |= MF_PAX_EMUTRAMP;
41697 +#endif
41698 +
41699 +#ifdef CONFIG_PAX_MPROTECT
41700 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41701 + pax_flags |= MF_PAX_MPROTECT;
41702 +#endif
41703 +
41704 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41705 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41706 + pax_flags |= MF_PAX_RANDMMAP;
41707 +#endif
41708 +
41709 +#endif
41710 +
41711 + return pax_flags;
41712 +}
41713 +
41714 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41715 +{
41716 + unsigned long pax_flags = 0UL;
41717 +
41718 +#ifdef CONFIG_PAX_EI_PAX
41719 +
41720 +#ifdef CONFIG_PAX_PAGEEXEC
41721 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41722 + pax_flags |= MF_PAX_PAGEEXEC;
41723 +#endif
41724 +
41725 +#ifdef CONFIG_PAX_SEGMEXEC
41726 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41727 + pax_flags |= MF_PAX_SEGMEXEC;
41728 +#endif
41729 +
41730 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41731 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41732 + if ((__supported_pte_mask & _PAGE_NX))
41733 + pax_flags &= ~MF_PAX_SEGMEXEC;
41734 + else
41735 + pax_flags &= ~MF_PAX_PAGEEXEC;
41736 + }
41737 +#endif
41738 +
41739 +#ifdef CONFIG_PAX_EMUTRAMP
41740 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41741 + pax_flags |= MF_PAX_EMUTRAMP;
41742 +#endif
41743 +
41744 +#ifdef CONFIG_PAX_MPROTECT
41745 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41746 + pax_flags |= MF_PAX_MPROTECT;
41747 +#endif
41748 +
41749 +#ifdef CONFIG_PAX_ASLR
41750 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41751 + pax_flags |= MF_PAX_RANDMMAP;
41752 +#endif
41753 +
41754 +#else
41755 +
41756 +#ifdef CONFIG_PAX_PAGEEXEC
41757 + pax_flags |= MF_PAX_PAGEEXEC;
41758 +#endif
41759 +
41760 +#ifdef CONFIG_PAX_MPROTECT
41761 + pax_flags |= MF_PAX_MPROTECT;
41762 +#endif
41763 +
41764 +#ifdef CONFIG_PAX_RANDMMAP
41765 + pax_flags |= MF_PAX_RANDMMAP;
41766 +#endif
41767 +
41768 +#ifdef CONFIG_PAX_SEGMEXEC
41769 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41770 + pax_flags &= ~MF_PAX_PAGEEXEC;
41771 + pax_flags |= MF_PAX_SEGMEXEC;
41772 + }
41773 +#endif
41774 +
41775 +#endif
41776 +
41777 + return pax_flags;
41778 +}
41779 +
41780 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41781 +{
41782 +
41783 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41784 + unsigned long i;
41785 +
41786 + for (i = 0UL; i < elf_ex->e_phnum; i++)
41787 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41788 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41789 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41790 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41791 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41792 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41793 + return ~0UL;
41794 +
41795 +#ifdef CONFIG_PAX_SOFTMODE
41796 + if (pax_softmode)
41797 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41798 + else
41799 +#endif
41800 +
41801 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41802 + break;
41803 + }
41804 +#endif
41805 +
41806 + return ~0UL;
41807 +}
41808 +
41809 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41810 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41811 +{
41812 + unsigned long pax_flags = 0UL;
41813 +
41814 +#ifdef CONFIG_PAX_PAGEEXEC
41815 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41816 + pax_flags |= MF_PAX_PAGEEXEC;
41817 +#endif
41818 +
41819 +#ifdef CONFIG_PAX_SEGMEXEC
41820 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41821 + pax_flags |= MF_PAX_SEGMEXEC;
41822 +#endif
41823 +
41824 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41825 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41826 + if ((__supported_pte_mask & _PAGE_NX))
41827 + pax_flags &= ~MF_PAX_SEGMEXEC;
41828 + else
41829 + pax_flags &= ~MF_PAX_PAGEEXEC;
41830 + }
41831 +#endif
41832 +
41833 +#ifdef CONFIG_PAX_EMUTRAMP
41834 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41835 + pax_flags |= MF_PAX_EMUTRAMP;
41836 +#endif
41837 +
41838 +#ifdef CONFIG_PAX_MPROTECT
41839 + if (pax_flags_softmode & MF_PAX_MPROTECT)
41840 + pax_flags |= MF_PAX_MPROTECT;
41841 +#endif
41842 +
41843 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41844 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41845 + pax_flags |= MF_PAX_RANDMMAP;
41846 +#endif
41847 +
41848 + return pax_flags;
41849 +}
41850 +
41851 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41852 +{
41853 + unsigned long pax_flags = 0UL;
41854 +
41855 +#ifdef CONFIG_PAX_PAGEEXEC
41856 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41857 + pax_flags |= MF_PAX_PAGEEXEC;
41858 +#endif
41859 +
41860 +#ifdef CONFIG_PAX_SEGMEXEC
41861 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41862 + pax_flags |= MF_PAX_SEGMEXEC;
41863 +#endif
41864 +
41865 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41866 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41867 + if ((__supported_pte_mask & _PAGE_NX))
41868 + pax_flags &= ~MF_PAX_SEGMEXEC;
41869 + else
41870 + pax_flags &= ~MF_PAX_PAGEEXEC;
41871 + }
41872 +#endif
41873 +
41874 +#ifdef CONFIG_PAX_EMUTRAMP
41875 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41876 + pax_flags |= MF_PAX_EMUTRAMP;
41877 +#endif
41878 +
41879 +#ifdef CONFIG_PAX_MPROTECT
41880 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41881 + pax_flags |= MF_PAX_MPROTECT;
41882 +#endif
41883 +
41884 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41885 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41886 + pax_flags |= MF_PAX_RANDMMAP;
41887 +#endif
41888 +
41889 + return pax_flags;
41890 +}
41891 +#endif
41892 +
41893 +static unsigned long pax_parse_xattr_pax(struct file * const file)
41894 +{
41895 +
41896 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41897 + ssize_t xattr_size, i;
41898 + unsigned char xattr_value[5];
41899 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
41900 +
41901 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
41902 + if (xattr_size <= 0)
41903 + return ~0UL;
41904 +
41905 + for (i = 0; i < xattr_size; i++)
41906 + switch (xattr_value[i]) {
41907 + default:
41908 + return ~0UL;
41909 +
41910 +#define parse_flag(option1, option2, flag) \
41911 + case option1: \
41912 + pax_flags_hardmode |= MF_PAX_##flag; \
41913 + break; \
41914 + case option2: \
41915 + pax_flags_softmode |= MF_PAX_##flag; \
41916 + break;
41917 +
41918 + parse_flag('p', 'P', PAGEEXEC);
41919 + parse_flag('e', 'E', EMUTRAMP);
41920 + parse_flag('m', 'M', MPROTECT);
41921 + parse_flag('r', 'R', RANDMMAP);
41922 + parse_flag('s', 'S', SEGMEXEC);
41923 +
41924 +#undef parse_flag
41925 + }
41926 +
41927 + if (pax_flags_hardmode & pax_flags_softmode)
41928 + return ~0UL;
41929 +
41930 +#ifdef CONFIG_PAX_SOFTMODE
41931 + if (pax_softmode)
41932 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
41933 + else
41934 +#endif
41935 +
41936 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
41937 +#else
41938 + return ~0UL;
41939 +#endif
41940 +
41941 +}
41942 +
41943 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
41944 +{
41945 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
41946 +
41947 + pax_flags = pax_parse_ei_pax(elf_ex);
41948 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
41949 + xattr_pax_flags = pax_parse_xattr_pax(file);
41950 +
41951 + if (pt_pax_flags == ~0UL)
41952 + pt_pax_flags = xattr_pax_flags;
41953 + else if (xattr_pax_flags == ~0UL)
41954 + xattr_pax_flags = pt_pax_flags;
41955 + if (pt_pax_flags != xattr_pax_flags)
41956 + return -EINVAL;
41957 + if (pt_pax_flags != ~0UL)
41958 + pax_flags = pt_pax_flags;
41959 +
41960 + if (0 > pax_check_flags(&pax_flags))
41961 + return -EINVAL;
41962 +
41963 + current->mm->pax_flags = pax_flags;
41964 + return 0;
41965 +}
41966 +#endif
41967 +
41968 /*
41969 * These are the functions used to load ELF style executables and shared
41970 * libraries. There is no binary dependent code anywhere else.
41971 @@ -541,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
41972 {
41973 unsigned int random_variable = 0;
41974
41975 +#ifdef CONFIG_PAX_RANDUSTACK
41976 + if (randomize_va_space)
41977 + return stack_top - current->mm->delta_stack;
41978 +#endif
41979 +
41980 if ((current->flags & PF_RANDOMIZE) &&
41981 !(current->personality & ADDR_NO_RANDOMIZE)) {
41982 random_variable = get_random_int() & STACK_RND_MASK;
41983 @@ -559,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41984 unsigned long load_addr = 0, load_bias = 0;
41985 int load_addr_set = 0;
41986 char * elf_interpreter = NULL;
41987 - unsigned long error;
41988 + unsigned long error = 0;
41989 struct elf_phdr *elf_ppnt, *elf_phdata;
41990 unsigned long elf_bss, elf_brk;
41991 int retval, i;
41992 @@ -569,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41993 unsigned long start_code, end_code, start_data, end_data;
41994 unsigned long reloc_func_desc __maybe_unused = 0;
41995 int executable_stack = EXSTACK_DEFAULT;
41996 - unsigned long def_flags = 0;
41997 struct {
41998 struct elfhdr elf_ex;
41999 struct elfhdr interp_elf_ex;
42000 } *loc;
42001 + unsigned long pax_task_size = TASK_SIZE;
42002
42003 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42004 if (!loc) {
42005 @@ -709,11 +1083,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42006 goto out_free_dentry;
42007
42008 /* OK, This is the point of no return */
42009 - current->mm->def_flags = def_flags;
42010 +
42011 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42012 + current->mm->pax_flags = 0UL;
42013 +#endif
42014 +
42015 +#ifdef CONFIG_PAX_DLRESOLVE
42016 + current->mm->call_dl_resolve = 0UL;
42017 +#endif
42018 +
42019 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42020 + current->mm->call_syscall = 0UL;
42021 +#endif
42022 +
42023 +#ifdef CONFIG_PAX_ASLR
42024 + current->mm->delta_mmap = 0UL;
42025 + current->mm->delta_stack = 0UL;
42026 +#endif
42027 +
42028 + current->mm->def_flags = 0;
42029 +
42030 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42031 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42032 + send_sig(SIGKILL, current, 0);
42033 + goto out_free_dentry;
42034 + }
42035 +#endif
42036 +
42037 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42038 + pax_set_initial_flags(bprm);
42039 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42040 + if (pax_set_initial_flags_func)
42041 + (pax_set_initial_flags_func)(bprm);
42042 +#endif
42043 +
42044 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42045 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42046 + current->mm->context.user_cs_limit = PAGE_SIZE;
42047 + current->mm->def_flags |= VM_PAGEEXEC;
42048 + }
42049 +#endif
42050 +
42051 +#ifdef CONFIG_PAX_SEGMEXEC
42052 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42053 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42054 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42055 + pax_task_size = SEGMEXEC_TASK_SIZE;
42056 + current->mm->def_flags |= VM_NOHUGEPAGE;
42057 + }
42058 +#endif
42059 +
42060 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42061 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42062 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42063 + put_cpu();
42064 + }
42065 +#endif
42066
42067 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42068 may depend on the personality. */
42069 SET_PERSONALITY(loc->elf_ex);
42070 +
42071 +#ifdef CONFIG_PAX_ASLR
42072 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42073 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42074 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42075 + }
42076 +#endif
42077 +
42078 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42079 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42080 + executable_stack = EXSTACK_DISABLE_X;
42081 + current->personality &= ~READ_IMPLIES_EXEC;
42082 + } else
42083 +#endif
42084 +
42085 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42086 current->personality |= READ_IMPLIES_EXEC;
42087
42088 @@ -804,6 +1248,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42089 #else
42090 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42091 #endif
42092 +
42093 +#ifdef CONFIG_PAX_RANDMMAP
42094 + /* PaX: randomize base address at the default exe base if requested */
42095 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42096 +#ifdef CONFIG_SPARC64
42097 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42098 +#else
42099 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42100 +#endif
42101 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42102 + elf_flags |= MAP_FIXED;
42103 + }
42104 +#endif
42105 +
42106 }
42107
42108 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42109 @@ -836,9 +1294,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42110 * allowed task size. Note that p_filesz must always be
42111 * <= p_memsz so it is only necessary to check p_memsz.
42112 */
42113 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42114 - elf_ppnt->p_memsz > TASK_SIZE ||
42115 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42116 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42117 + elf_ppnt->p_memsz > pax_task_size ||
42118 + pax_task_size - elf_ppnt->p_memsz < k) {
42119 /* set_brk can never work. Avoid overflows. */
42120 send_sig(SIGKILL, current, 0);
42121 retval = -EINVAL;
42122 @@ -877,11 +1335,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42123 goto out_free_dentry;
42124 }
42125 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42126 - send_sig(SIGSEGV, current, 0);
42127 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42128 - goto out_free_dentry;
42129 + /*
42130 + * This bss-zeroing can fail if the ELF
42131 + * file specifies odd protections. So
42132 + * we don't check the return value
42133 + */
42134 }
42135
42136 +#ifdef CONFIG_PAX_RANDMMAP
42137 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42138 + unsigned long start, size;
42139 +
42140 + start = ELF_PAGEALIGN(elf_brk);
42141 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42142 + down_write(&current->mm->mmap_sem);
42143 + retval = -ENOMEM;
42144 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42145 + unsigned long prot = PROT_NONE;
42146 +
42147 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42148 +// if (current->personality & ADDR_NO_RANDOMIZE)
42149 +// prot = PROT_READ;
42150 + start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42151 + retval = IS_ERR_VALUE(start) ? start : 0;
42152 + }
42153 + up_write(&current->mm->mmap_sem);
42154 + if (retval == 0)
42155 + retval = set_brk(start + size, start + size + PAGE_SIZE);
42156 + if (retval < 0) {
42157 + send_sig(SIGKILL, current, 0);
42158 + goto out_free_dentry;
42159 + }
42160 + }
42161 +#endif
42162 +
42163 if (elf_interpreter) {
42164 unsigned long uninitialized_var(interp_map_addr);
42165
42166 @@ -1109,7 +1596,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
42167 * Decide what to dump of a segment, part, all or none.
42168 */
42169 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42170 - unsigned long mm_flags)
42171 + unsigned long mm_flags, long signr)
42172 {
42173 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42174
42175 @@ -1146,7 +1633,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42176 if (vma->vm_file == NULL)
42177 return 0;
42178
42179 - if (FILTER(MAPPED_PRIVATE))
42180 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42181 goto whole;
42182
42183 /*
42184 @@ -1368,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42185 {
42186 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42187 int i = 0;
42188 - do
42189 + do {
42190 i += 2;
42191 - while (auxv[i - 2] != AT_NULL);
42192 + } while (auxv[i - 2] != AT_NULL);
42193 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42194 }
42195
42196 @@ -1892,14 +2379,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42197 }
42198
42199 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42200 - unsigned long mm_flags)
42201 + struct coredump_params *cprm)
42202 {
42203 struct vm_area_struct *vma;
42204 size_t size = 0;
42205
42206 for (vma = first_vma(current, gate_vma); vma != NULL;
42207 vma = next_vma(vma, gate_vma))
42208 - size += vma_dump_size(vma, mm_flags);
42209 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42210 return size;
42211 }
42212
42213 @@ -1993,7 +2480,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42214
42215 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42216
42217 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42218 + offset += elf_core_vma_data_size(gate_vma, cprm);
42219 offset += elf_core_extra_data_size();
42220 e_shoff = offset;
42221
42222 @@ -2007,10 +2494,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42223 offset = dataoff;
42224
42225 size += sizeof(*elf);
42226 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42227 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42228 goto end_coredump;
42229
42230 size += sizeof(*phdr4note);
42231 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42232 if (size > cprm->limit
42233 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42234 goto end_coredump;
42235 @@ -2024,7 +2513,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42236 phdr.p_offset = offset;
42237 phdr.p_vaddr = vma->vm_start;
42238 phdr.p_paddr = 0;
42239 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42240 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42241 phdr.p_memsz = vma->vm_end - vma->vm_start;
42242 offset += phdr.p_filesz;
42243 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42244 @@ -2035,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42245 phdr.p_align = ELF_EXEC_PAGESIZE;
42246
42247 size += sizeof(phdr);
42248 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42249 if (size > cprm->limit
42250 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42251 goto end_coredump;
42252 @@ -2059,7 +2549,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42253 unsigned long addr;
42254 unsigned long end;
42255
42256 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42257 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42258
42259 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42260 struct page *page;
42261 @@ -2068,6 +2558,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42262 page = get_dump_page(addr);
42263 if (page) {
42264 void *kaddr = kmap(page);
42265 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42266 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42267 !dump_write(cprm->file, kaddr,
42268 PAGE_SIZE);
42269 @@ -2085,6 +2576,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42270
42271 if (e_phnum == PN_XNUM) {
42272 size += sizeof(*shdr4extnum);
42273 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42274 if (size > cprm->limit
42275 || !dump_write(cprm->file, shdr4extnum,
42276 sizeof(*shdr4extnum)))
42277 @@ -2105,6 +2597,97 @@ out:
42278
42279 #endif /* CONFIG_ELF_CORE */
42280
42281 +#ifdef CONFIG_PAX_MPROTECT
42282 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42283 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42284 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42285 + *
42286 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42287 + * basis because we want to allow the common case and not the special ones.
42288 + */
42289 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42290 +{
42291 + struct elfhdr elf_h;
42292 + struct elf_phdr elf_p;
42293 + unsigned long i;
42294 + unsigned long oldflags;
42295 + bool is_textrel_rw, is_textrel_rx, is_relro;
42296 +
42297 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42298 + return;
42299 +
42300 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42301 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42302 +
42303 +#ifdef CONFIG_PAX_ELFRELOCS
42304 + /* possible TEXTREL */
42305 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42306 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42307 +#else
42308 + is_textrel_rw = false;
42309 + is_textrel_rx = false;
42310 +#endif
42311 +
42312 + /* possible RELRO */
42313 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42314 +
42315 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42316 + return;
42317 +
42318 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42319 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42320 +
42321 +#ifdef CONFIG_PAX_ETEXECRELOCS
42322 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42323 +#else
42324 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42325 +#endif
42326 +
42327 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42328 + !elf_check_arch(&elf_h) ||
42329 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42330 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42331 + return;
42332 +
42333 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42334 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42335 + return;
42336 + switch (elf_p.p_type) {
42337 + case PT_DYNAMIC:
42338 + if (!is_textrel_rw && !is_textrel_rx)
42339 + continue;
42340 + i = 0UL;
42341 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42342 + elf_dyn dyn;
42343 +
42344 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42345 + return;
42346 + if (dyn.d_tag == DT_NULL)
42347 + return;
42348 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42349 + gr_log_textrel(vma);
42350 + if (is_textrel_rw)
42351 + vma->vm_flags |= VM_MAYWRITE;
42352 + else
42353 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42354 + vma->vm_flags &= ~VM_MAYWRITE;
42355 + return;
42356 + }
42357 + i++;
42358 + }
42359 + return;
42360 +
42361 + case PT_GNU_RELRO:
42362 + if (!is_relro)
42363 + continue;
42364 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42365 + vma->vm_flags &= ~VM_MAYWRITE;
42366 + return;
42367 + }
42368 + }
42369 +}
42370 +#endif
42371 +
42372 static int __init init_elf_binfmt(void)
42373 {
42374 register_binfmt(&elf_format);
42375 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42376 index 6b2daf9..a70dccb 100644
42377 --- a/fs/binfmt_flat.c
42378 +++ b/fs/binfmt_flat.c
42379 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42380 realdatastart = (unsigned long) -ENOMEM;
42381 printk("Unable to allocate RAM for process data, errno %d\n",
42382 (int)-realdatastart);
42383 + down_write(&current->mm->mmap_sem);
42384 do_munmap(current->mm, textpos, text_len);
42385 + up_write(&current->mm->mmap_sem);
42386 ret = realdatastart;
42387 goto err;
42388 }
42389 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42390 }
42391 if (IS_ERR_VALUE(result)) {
42392 printk("Unable to read data+bss, errno %d\n", (int)-result);
42393 + down_write(&current->mm->mmap_sem);
42394 do_munmap(current->mm, textpos, text_len);
42395 do_munmap(current->mm, realdatastart, len);
42396 + up_write(&current->mm->mmap_sem);
42397 ret = result;
42398 goto err;
42399 }
42400 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42401 }
42402 if (IS_ERR_VALUE(result)) {
42403 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42404 + down_write(&current->mm->mmap_sem);
42405 do_munmap(current->mm, textpos, text_len + data_len + extra +
42406 MAX_SHARED_LIBS * sizeof(unsigned long));
42407 + up_write(&current->mm->mmap_sem);
42408 ret = result;
42409 goto err;
42410 }
42411 diff --git a/fs/bio.c b/fs/bio.c
42412 index 84da885..2149cd9 100644
42413 --- a/fs/bio.c
42414 +++ b/fs/bio.c
42415 @@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42416 /*
42417 * Overflow, abort
42418 */
42419 - if (end < start)
42420 + if (end < start || end - start > INT_MAX - nr_pages)
42421 return ERR_PTR(-EINVAL);
42422
42423 nr_pages += end - start;
42424 @@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42425 const int read = bio_data_dir(bio) == READ;
42426 struct bio_map_data *bmd = bio->bi_private;
42427 int i;
42428 - char *p = bmd->sgvecs[0].iov_base;
42429 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42430
42431 __bio_for_each_segment(bvec, bio, i, 0) {
42432 char *addr = page_address(bvec->bv_page);
42433 diff --git a/fs/block_dev.c b/fs/block_dev.c
42434 index ba11c30..623d736 100644
42435 --- a/fs/block_dev.c
42436 +++ b/fs/block_dev.c
42437 @@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42438 else if (bdev->bd_contains == bdev)
42439 return true; /* is a whole device which isn't held */
42440
42441 - else if (whole->bd_holder == bd_may_claim)
42442 + else if (whole->bd_holder == (void *)bd_may_claim)
42443 return true; /* is a partition of a device that is being partitioned */
42444 else if (whole->bd_holder != NULL)
42445 return false; /* is a partition of a held device */
42446 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42447 index c053e90..e5f1afc 100644
42448 --- a/fs/btrfs/check-integrity.c
42449 +++ b/fs/btrfs/check-integrity.c
42450 @@ -156,7 +156,7 @@ struct btrfsic_block {
42451 union {
42452 bio_end_io_t *bio;
42453 bh_end_io_t *bh;
42454 - } orig_bio_bh_end_io;
42455 + } __no_const orig_bio_bh_end_io;
42456 int submit_bio_bh_rw;
42457 u64 flush_gen; /* only valid if !never_written */
42458 };
42459 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42460 index 4106264..8157ede 100644
42461 --- a/fs/btrfs/ctree.c
42462 +++ b/fs/btrfs/ctree.c
42463 @@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42464 free_extent_buffer(buf);
42465 add_root_to_dirty_list(root);
42466 } else {
42467 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42468 - parent_start = parent->start;
42469 - else
42470 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42471 + if (parent)
42472 + parent_start = parent->start;
42473 + else
42474 + parent_start = 0;
42475 + } else
42476 parent_start = 0;
42477
42478 WARN_ON(trans->transid != btrfs_header_generation(parent));
42479 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42480 index 61b16c6..b492c09 100644
42481 --- a/fs/btrfs/inode.c
42482 +++ b/fs/btrfs/inode.c
42483 @@ -7071,7 +7071,7 @@ fail:
42484 return -ENOMEM;
42485 }
42486
42487 -static int btrfs_getattr(struct vfsmount *mnt,
42488 +int btrfs_getattr(struct vfsmount *mnt,
42489 struct dentry *dentry, struct kstat *stat)
42490 {
42491 struct inode *inode = dentry->d_inode;
42492 @@ -7085,6 +7085,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42493 return 0;
42494 }
42495
42496 +EXPORT_SYMBOL(btrfs_getattr);
42497 +
42498 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42499 +{
42500 + return BTRFS_I(inode)->root->anon_dev;
42501 +}
42502 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42503 +
42504 /*
42505 * If a file is moved, it will inherit the cow and compression flags of the new
42506 * directory.
42507 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42508 index 14f8e1f..ab8d81f 100644
42509 --- a/fs/btrfs/ioctl.c
42510 +++ b/fs/btrfs/ioctl.c
42511 @@ -2882,9 +2882,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42512 for (i = 0; i < num_types; i++) {
42513 struct btrfs_space_info *tmp;
42514
42515 + /* Don't copy in more than we allocated */
42516 if (!slot_count)
42517 break;
42518
42519 + slot_count--;
42520 +
42521 info = NULL;
42522 rcu_read_lock();
42523 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42524 @@ -2906,15 +2909,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42525 memcpy(dest, &space, sizeof(space));
42526 dest++;
42527 space_args.total_spaces++;
42528 - slot_count--;
42529 }
42530 - if (!slot_count)
42531 - break;
42532 }
42533 up_read(&info->groups_sem);
42534 }
42535
42536 - user_dest = (struct btrfs_ioctl_space_info *)
42537 + user_dest = (struct btrfs_ioctl_space_info __user *)
42538 (arg + sizeof(struct btrfs_ioctl_space_args));
42539
42540 if (copy_to_user(user_dest, dest_orig, alloc_size))
42541 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42542 index 646ee21..f020f87 100644
42543 --- a/fs/btrfs/relocation.c
42544 +++ b/fs/btrfs/relocation.c
42545 @@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42546 }
42547 spin_unlock(&rc->reloc_root_tree.lock);
42548
42549 - BUG_ON((struct btrfs_root *)node->data != root);
42550 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42551
42552 if (!del) {
42553 spin_lock(&rc->reloc_root_tree.lock);
42554 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42555 index 622f469..e8d2d55 100644
42556 --- a/fs/cachefiles/bind.c
42557 +++ b/fs/cachefiles/bind.c
42558 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42559 args);
42560
42561 /* start by checking things over */
42562 - ASSERT(cache->fstop_percent >= 0 &&
42563 - cache->fstop_percent < cache->fcull_percent &&
42564 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42565 cache->fcull_percent < cache->frun_percent &&
42566 cache->frun_percent < 100);
42567
42568 - ASSERT(cache->bstop_percent >= 0 &&
42569 - cache->bstop_percent < cache->bcull_percent &&
42570 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42571 cache->bcull_percent < cache->brun_percent &&
42572 cache->brun_percent < 100);
42573
42574 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42575 index 0a1467b..6a53245 100644
42576 --- a/fs/cachefiles/daemon.c
42577 +++ b/fs/cachefiles/daemon.c
42578 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42579 if (n > buflen)
42580 return -EMSGSIZE;
42581
42582 - if (copy_to_user(_buffer, buffer, n) != 0)
42583 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42584 return -EFAULT;
42585
42586 return n;
42587 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42588 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42589 return -EIO;
42590
42591 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42592 + if (datalen > PAGE_SIZE - 1)
42593 return -EOPNOTSUPP;
42594
42595 /* drag the command string into the kernel so we can parse it */
42596 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42597 if (args[0] != '%' || args[1] != '\0')
42598 return -EINVAL;
42599
42600 - if (fstop < 0 || fstop >= cache->fcull_percent)
42601 + if (fstop >= cache->fcull_percent)
42602 return cachefiles_daemon_range_error(cache, args);
42603
42604 cache->fstop_percent = fstop;
42605 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42606 if (args[0] != '%' || args[1] != '\0')
42607 return -EINVAL;
42608
42609 - if (bstop < 0 || bstop >= cache->bcull_percent)
42610 + if (bstop >= cache->bcull_percent)
42611 return cachefiles_daemon_range_error(cache, args);
42612
42613 cache->bstop_percent = bstop;
42614 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42615 index bd6bc1b..b627b53 100644
42616 --- a/fs/cachefiles/internal.h
42617 +++ b/fs/cachefiles/internal.h
42618 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42619 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42620 struct rb_root active_nodes; /* active nodes (can't be culled) */
42621 rwlock_t active_lock; /* lock for active_nodes */
42622 - atomic_t gravecounter; /* graveyard uniquifier */
42623 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42624 unsigned frun_percent; /* when to stop culling (% files) */
42625 unsigned fcull_percent; /* when to start culling (% files) */
42626 unsigned fstop_percent; /* when to stop allocating (% files) */
42627 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42628 * proc.c
42629 */
42630 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42631 -extern atomic_t cachefiles_lookup_histogram[HZ];
42632 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42633 -extern atomic_t cachefiles_create_histogram[HZ];
42634 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42635 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42636 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42637
42638 extern int __init cachefiles_proc_init(void);
42639 extern void cachefiles_proc_cleanup(void);
42640 static inline
42641 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42642 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42643 {
42644 unsigned long jif = jiffies - start_jif;
42645 if (jif >= HZ)
42646 jif = HZ - 1;
42647 - atomic_inc(&histogram[jif]);
42648 + atomic_inc_unchecked(&histogram[jif]);
42649 }
42650
42651 #else
42652 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42653 index 7f0771d..87d4f36 100644
42654 --- a/fs/cachefiles/namei.c
42655 +++ b/fs/cachefiles/namei.c
42656 @@ -318,7 +318,7 @@ try_again:
42657 /* first step is to make up a grave dentry in the graveyard */
42658 sprintf(nbuffer, "%08x%08x",
42659 (uint32_t) get_seconds(),
42660 - (uint32_t) atomic_inc_return(&cache->gravecounter));
42661 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42662
42663 /* do the multiway lock magic */
42664 trap = lock_rename(cache->graveyard, dir);
42665 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42666 index eccd339..4c1d995 100644
42667 --- a/fs/cachefiles/proc.c
42668 +++ b/fs/cachefiles/proc.c
42669 @@ -14,9 +14,9 @@
42670 #include <linux/seq_file.h>
42671 #include "internal.h"
42672
42673 -atomic_t cachefiles_lookup_histogram[HZ];
42674 -atomic_t cachefiles_mkdir_histogram[HZ];
42675 -atomic_t cachefiles_create_histogram[HZ];
42676 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42677 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42678 +atomic_unchecked_t cachefiles_create_histogram[HZ];
42679
42680 /*
42681 * display the latency histogram
42682 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42683 return 0;
42684 default:
42685 index = (unsigned long) v - 3;
42686 - x = atomic_read(&cachefiles_lookup_histogram[index]);
42687 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
42688 - z = atomic_read(&cachefiles_create_histogram[index]);
42689 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42690 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42691 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42692 if (x == 0 && y == 0 && z == 0)
42693 return 0;
42694
42695 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42696 index 0e3c092..818480e 100644
42697 --- a/fs/cachefiles/rdwr.c
42698 +++ b/fs/cachefiles/rdwr.c
42699 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42700 old_fs = get_fs();
42701 set_fs(KERNEL_DS);
42702 ret = file->f_op->write(
42703 - file, (const void __user *) data, len, &pos);
42704 + file, (const void __force_user *) data, len, &pos);
42705 set_fs(old_fs);
42706 kunmap(page);
42707 if (ret != len)
42708 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42709 index 3e8094b..cb3ff3d 100644
42710 --- a/fs/ceph/dir.c
42711 +++ b/fs/ceph/dir.c
42712 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42713 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42714 struct ceph_mds_client *mdsc = fsc->mdsc;
42715 unsigned frag = fpos_frag(filp->f_pos);
42716 - int off = fpos_off(filp->f_pos);
42717 + unsigned int off = fpos_off(filp->f_pos);
42718 int err;
42719 u32 ftype;
42720 struct ceph_mds_reply_info_parsed *rinfo;
42721 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
42722 if (nd &&
42723 (nd->flags & LOOKUP_OPEN) &&
42724 !(nd->intent.open.flags & O_CREAT)) {
42725 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
42726 + int mode = nd->intent.open.create_mode & ~current_umask();
42727 return ceph_lookup_open(dir, dentry, nd, mode, 1);
42728 }
42729
42730 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42731 index 2704646..c581c91 100644
42732 --- a/fs/cifs/cifs_debug.c
42733 +++ b/fs/cifs/cifs_debug.c
42734 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42735
42736 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42737 #ifdef CONFIG_CIFS_STATS2
42738 - atomic_set(&totBufAllocCount, 0);
42739 - atomic_set(&totSmBufAllocCount, 0);
42740 + atomic_set_unchecked(&totBufAllocCount, 0);
42741 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42742 #endif /* CONFIG_CIFS_STATS2 */
42743 spin_lock(&cifs_tcp_ses_lock);
42744 list_for_each(tmp1, &cifs_tcp_ses_list) {
42745 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42746 tcon = list_entry(tmp3,
42747 struct cifs_tcon,
42748 tcon_list);
42749 - atomic_set(&tcon->num_smbs_sent, 0);
42750 - atomic_set(&tcon->num_writes, 0);
42751 - atomic_set(&tcon->num_reads, 0);
42752 - atomic_set(&tcon->num_oplock_brks, 0);
42753 - atomic_set(&tcon->num_opens, 0);
42754 - atomic_set(&tcon->num_posixopens, 0);
42755 - atomic_set(&tcon->num_posixmkdirs, 0);
42756 - atomic_set(&tcon->num_closes, 0);
42757 - atomic_set(&tcon->num_deletes, 0);
42758 - atomic_set(&tcon->num_mkdirs, 0);
42759 - atomic_set(&tcon->num_rmdirs, 0);
42760 - atomic_set(&tcon->num_renames, 0);
42761 - atomic_set(&tcon->num_t2renames, 0);
42762 - atomic_set(&tcon->num_ffirst, 0);
42763 - atomic_set(&tcon->num_fnext, 0);
42764 - atomic_set(&tcon->num_fclose, 0);
42765 - atomic_set(&tcon->num_hardlinks, 0);
42766 - atomic_set(&tcon->num_symlinks, 0);
42767 - atomic_set(&tcon->num_locks, 0);
42768 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42769 + atomic_set_unchecked(&tcon->num_writes, 0);
42770 + atomic_set_unchecked(&tcon->num_reads, 0);
42771 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42772 + atomic_set_unchecked(&tcon->num_opens, 0);
42773 + atomic_set_unchecked(&tcon->num_posixopens, 0);
42774 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42775 + atomic_set_unchecked(&tcon->num_closes, 0);
42776 + atomic_set_unchecked(&tcon->num_deletes, 0);
42777 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
42778 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
42779 + atomic_set_unchecked(&tcon->num_renames, 0);
42780 + atomic_set_unchecked(&tcon->num_t2renames, 0);
42781 + atomic_set_unchecked(&tcon->num_ffirst, 0);
42782 + atomic_set_unchecked(&tcon->num_fnext, 0);
42783 + atomic_set_unchecked(&tcon->num_fclose, 0);
42784 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
42785 + atomic_set_unchecked(&tcon->num_symlinks, 0);
42786 + atomic_set_unchecked(&tcon->num_locks, 0);
42787 }
42788 }
42789 }
42790 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42791 smBufAllocCount.counter, cifs_min_small);
42792 #ifdef CONFIG_CIFS_STATS2
42793 seq_printf(m, "Total Large %d Small %d Allocations\n",
42794 - atomic_read(&totBufAllocCount),
42795 - atomic_read(&totSmBufAllocCount));
42796 + atomic_read_unchecked(&totBufAllocCount),
42797 + atomic_read_unchecked(&totSmBufAllocCount));
42798 #endif /* CONFIG_CIFS_STATS2 */
42799
42800 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42801 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42802 if (tcon->need_reconnect)
42803 seq_puts(m, "\tDISCONNECTED ");
42804 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42805 - atomic_read(&tcon->num_smbs_sent),
42806 - atomic_read(&tcon->num_oplock_brks));
42807 + atomic_read_unchecked(&tcon->num_smbs_sent),
42808 + atomic_read_unchecked(&tcon->num_oplock_brks));
42809 seq_printf(m, "\nReads: %d Bytes: %lld",
42810 - atomic_read(&tcon->num_reads),
42811 + atomic_read_unchecked(&tcon->num_reads),
42812 (long long)(tcon->bytes_read));
42813 seq_printf(m, "\nWrites: %d Bytes: %lld",
42814 - atomic_read(&tcon->num_writes),
42815 + atomic_read_unchecked(&tcon->num_writes),
42816 (long long)(tcon->bytes_written));
42817 seq_printf(m, "\nFlushes: %d",
42818 - atomic_read(&tcon->num_flushes));
42819 + atomic_read_unchecked(&tcon->num_flushes));
42820 seq_printf(m, "\nLocks: %d HardLinks: %d "
42821 "Symlinks: %d",
42822 - atomic_read(&tcon->num_locks),
42823 - atomic_read(&tcon->num_hardlinks),
42824 - atomic_read(&tcon->num_symlinks));
42825 + atomic_read_unchecked(&tcon->num_locks),
42826 + atomic_read_unchecked(&tcon->num_hardlinks),
42827 + atomic_read_unchecked(&tcon->num_symlinks));
42828 seq_printf(m, "\nOpens: %d Closes: %d "
42829 "Deletes: %d",
42830 - atomic_read(&tcon->num_opens),
42831 - atomic_read(&tcon->num_closes),
42832 - atomic_read(&tcon->num_deletes));
42833 + atomic_read_unchecked(&tcon->num_opens),
42834 + atomic_read_unchecked(&tcon->num_closes),
42835 + atomic_read_unchecked(&tcon->num_deletes));
42836 seq_printf(m, "\nPosix Opens: %d "
42837 "Posix Mkdirs: %d",
42838 - atomic_read(&tcon->num_posixopens),
42839 - atomic_read(&tcon->num_posixmkdirs));
42840 + atomic_read_unchecked(&tcon->num_posixopens),
42841 + atomic_read_unchecked(&tcon->num_posixmkdirs));
42842 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42843 - atomic_read(&tcon->num_mkdirs),
42844 - atomic_read(&tcon->num_rmdirs));
42845 + atomic_read_unchecked(&tcon->num_mkdirs),
42846 + atomic_read_unchecked(&tcon->num_rmdirs));
42847 seq_printf(m, "\nRenames: %d T2 Renames %d",
42848 - atomic_read(&tcon->num_renames),
42849 - atomic_read(&tcon->num_t2renames));
42850 + atomic_read_unchecked(&tcon->num_renames),
42851 + atomic_read_unchecked(&tcon->num_t2renames));
42852 seq_printf(m, "\nFindFirst: %d FNext %d "
42853 "FClose %d",
42854 - atomic_read(&tcon->num_ffirst),
42855 - atomic_read(&tcon->num_fnext),
42856 - atomic_read(&tcon->num_fclose));
42857 + atomic_read_unchecked(&tcon->num_ffirst),
42858 + atomic_read_unchecked(&tcon->num_fnext),
42859 + atomic_read_unchecked(&tcon->num_fclose));
42860 }
42861 }
42862 }
42863 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42864 index 541ef81..a78deb8 100644
42865 --- a/fs/cifs/cifsfs.c
42866 +++ b/fs/cifs/cifsfs.c
42867 @@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
42868 cifs_req_cachep = kmem_cache_create("cifs_request",
42869 CIFSMaxBufSize +
42870 MAX_CIFS_HDR_SIZE, 0,
42871 - SLAB_HWCACHE_ALIGN, NULL);
42872 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42873 if (cifs_req_cachep == NULL)
42874 return -ENOMEM;
42875
42876 @@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
42877 efficient to alloc 1 per page off the slab compared to 17K (5page)
42878 alloc of large cifs buffers even when page debugging is on */
42879 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42880 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42881 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42882 NULL);
42883 if (cifs_sm_req_cachep == NULL) {
42884 mempool_destroy(cifs_req_poolp);
42885 @@ -1097,8 +1097,8 @@ init_cifs(void)
42886 atomic_set(&bufAllocCount, 0);
42887 atomic_set(&smBufAllocCount, 0);
42888 #ifdef CONFIG_CIFS_STATS2
42889 - atomic_set(&totBufAllocCount, 0);
42890 - atomic_set(&totSmBufAllocCount, 0);
42891 + atomic_set_unchecked(&totBufAllocCount, 0);
42892 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42893 #endif /* CONFIG_CIFS_STATS2 */
42894
42895 atomic_set(&midCount, 0);
42896 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42897 index 73fea28..b996b84 100644
42898 --- a/fs/cifs/cifsglob.h
42899 +++ b/fs/cifs/cifsglob.h
42900 @@ -439,28 +439,28 @@ struct cifs_tcon {
42901 __u16 Flags; /* optional support bits */
42902 enum statusEnum tidStatus;
42903 #ifdef CONFIG_CIFS_STATS
42904 - atomic_t num_smbs_sent;
42905 - atomic_t num_writes;
42906 - atomic_t num_reads;
42907 - atomic_t num_flushes;
42908 - atomic_t num_oplock_brks;
42909 - atomic_t num_opens;
42910 - atomic_t num_closes;
42911 - atomic_t num_deletes;
42912 - atomic_t num_mkdirs;
42913 - atomic_t num_posixopens;
42914 - atomic_t num_posixmkdirs;
42915 - atomic_t num_rmdirs;
42916 - atomic_t num_renames;
42917 - atomic_t num_t2renames;
42918 - atomic_t num_ffirst;
42919 - atomic_t num_fnext;
42920 - atomic_t num_fclose;
42921 - atomic_t num_hardlinks;
42922 - atomic_t num_symlinks;
42923 - atomic_t num_locks;
42924 - atomic_t num_acl_get;
42925 - atomic_t num_acl_set;
42926 + atomic_unchecked_t num_smbs_sent;
42927 + atomic_unchecked_t num_writes;
42928 + atomic_unchecked_t num_reads;
42929 + atomic_unchecked_t num_flushes;
42930 + atomic_unchecked_t num_oplock_brks;
42931 + atomic_unchecked_t num_opens;
42932 + atomic_unchecked_t num_closes;
42933 + atomic_unchecked_t num_deletes;
42934 + atomic_unchecked_t num_mkdirs;
42935 + atomic_unchecked_t num_posixopens;
42936 + atomic_unchecked_t num_posixmkdirs;
42937 + atomic_unchecked_t num_rmdirs;
42938 + atomic_unchecked_t num_renames;
42939 + atomic_unchecked_t num_t2renames;
42940 + atomic_unchecked_t num_ffirst;
42941 + atomic_unchecked_t num_fnext;
42942 + atomic_unchecked_t num_fclose;
42943 + atomic_unchecked_t num_hardlinks;
42944 + atomic_unchecked_t num_symlinks;
42945 + atomic_unchecked_t num_locks;
42946 + atomic_unchecked_t num_acl_get;
42947 + atomic_unchecked_t num_acl_set;
42948 #ifdef CONFIG_CIFS_STATS2
42949 unsigned long long time_writes;
42950 unsigned long long time_reads;
42951 @@ -677,7 +677,7 @@ convert_delimiter(char *path, char delim)
42952 }
42953
42954 #ifdef CONFIG_CIFS_STATS
42955 -#define cifs_stats_inc atomic_inc
42956 +#define cifs_stats_inc atomic_inc_unchecked
42957
42958 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
42959 unsigned int bytes)
42960 @@ -1036,8 +1036,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
42961 /* Various Debug counters */
42962 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
42963 #ifdef CONFIG_CIFS_STATS2
42964 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
42965 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
42966 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
42967 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
42968 #endif
42969 GLOBAL_EXTERN atomic_t smBufAllocCount;
42970 GLOBAL_EXTERN atomic_t midCount;
42971 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
42972 index 6b0e064..94e6c3c 100644
42973 --- a/fs/cifs/link.c
42974 +++ b/fs/cifs/link.c
42975 @@ -600,7 +600,7 @@ symlink_exit:
42976
42977 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
42978 {
42979 - char *p = nd_get_link(nd);
42980 + const char *p = nd_get_link(nd);
42981 if (!IS_ERR(p))
42982 kfree(p);
42983 }
42984 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
42985 index c29d1aa..58018da 100644
42986 --- a/fs/cifs/misc.c
42987 +++ b/fs/cifs/misc.c
42988 @@ -156,7 +156,7 @@ cifs_buf_get(void)
42989 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
42990 atomic_inc(&bufAllocCount);
42991 #ifdef CONFIG_CIFS_STATS2
42992 - atomic_inc(&totBufAllocCount);
42993 + atomic_inc_unchecked(&totBufAllocCount);
42994 #endif /* CONFIG_CIFS_STATS2 */
42995 }
42996
42997 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
42998 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
42999 atomic_inc(&smBufAllocCount);
43000 #ifdef CONFIG_CIFS_STATS2
43001 - atomic_inc(&totSmBufAllocCount);
43002 + atomic_inc_unchecked(&totSmBufAllocCount);
43003 #endif /* CONFIG_CIFS_STATS2 */
43004
43005 }
43006 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43007 index 6901578..d402eb5 100644
43008 --- a/fs/coda/cache.c
43009 +++ b/fs/coda/cache.c
43010 @@ -24,7 +24,7 @@
43011 #include "coda_linux.h"
43012 #include "coda_cache.h"
43013
43014 -static atomic_t permission_epoch = ATOMIC_INIT(0);
43015 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43016
43017 /* replace or extend an acl cache hit */
43018 void coda_cache_enter(struct inode *inode, int mask)
43019 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43020 struct coda_inode_info *cii = ITOC(inode);
43021
43022 spin_lock(&cii->c_lock);
43023 - cii->c_cached_epoch = atomic_read(&permission_epoch);
43024 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43025 if (cii->c_uid != current_fsuid()) {
43026 cii->c_uid = current_fsuid();
43027 cii->c_cached_perm = mask;
43028 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43029 {
43030 struct coda_inode_info *cii = ITOC(inode);
43031 spin_lock(&cii->c_lock);
43032 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43033 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43034 spin_unlock(&cii->c_lock);
43035 }
43036
43037 /* remove all acl caches */
43038 void coda_cache_clear_all(struct super_block *sb)
43039 {
43040 - atomic_inc(&permission_epoch);
43041 + atomic_inc_unchecked(&permission_epoch);
43042 }
43043
43044
43045 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43046 spin_lock(&cii->c_lock);
43047 hit = (mask & cii->c_cached_perm) == mask &&
43048 cii->c_uid == current_fsuid() &&
43049 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43050 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43051 spin_unlock(&cii->c_lock);
43052
43053 return hit;
43054 diff --git a/fs/compat.c b/fs/compat.c
43055 index f2944ac..62845d2 100644
43056 --- a/fs/compat.c
43057 +++ b/fs/compat.c
43058 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43059
43060 set_fs(KERNEL_DS);
43061 /* The __user pointer cast is valid because of the set_fs() */
43062 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43063 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43064 set_fs(oldfs);
43065 /* truncating is ok because it's a user address */
43066 if (!ret)
43067 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43068 goto out;
43069
43070 ret = -EINVAL;
43071 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43072 + if (nr_segs > UIO_MAXIOV)
43073 goto out;
43074 if (nr_segs > fast_segs) {
43075 ret = -ENOMEM;
43076 @@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
43077
43078 struct compat_readdir_callback {
43079 struct compat_old_linux_dirent __user *dirent;
43080 + struct file * file;
43081 int result;
43082 };
43083
43084 @@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43085 buf->result = -EOVERFLOW;
43086 return -EOVERFLOW;
43087 }
43088 +
43089 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43090 + return 0;
43091 +
43092 buf->result++;
43093 dirent = buf->dirent;
43094 if (!access_ok(VERIFY_WRITE, dirent,
43095 @@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43096
43097 buf.result = 0;
43098 buf.dirent = dirent;
43099 + buf.file = file;
43100
43101 error = vfs_readdir(file, compat_fillonedir, &buf);
43102 if (buf.result)
43103 @@ -900,6 +906,7 @@ struct compat_linux_dirent {
43104 struct compat_getdents_callback {
43105 struct compat_linux_dirent __user *current_dir;
43106 struct compat_linux_dirent __user *previous;
43107 + struct file * file;
43108 int count;
43109 int error;
43110 };
43111 @@ -921,6 +928,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43112 buf->error = -EOVERFLOW;
43113 return -EOVERFLOW;
43114 }
43115 +
43116 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43117 + return 0;
43118 +
43119 dirent = buf->previous;
43120 if (dirent) {
43121 if (__put_user(offset, &dirent->d_off))
43122 @@ -968,6 +979,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43123 buf.previous = NULL;
43124 buf.count = count;
43125 buf.error = 0;
43126 + buf.file = file;
43127
43128 error = vfs_readdir(file, compat_filldir, &buf);
43129 if (error >= 0)
43130 @@ -989,6 +1001,7 @@ out:
43131 struct compat_getdents_callback64 {
43132 struct linux_dirent64 __user *current_dir;
43133 struct linux_dirent64 __user *previous;
43134 + struct file * file;
43135 int count;
43136 int error;
43137 };
43138 @@ -1005,6 +1018,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43139 buf->error = -EINVAL; /* only used if we fail.. */
43140 if (reclen > buf->count)
43141 return -EINVAL;
43142 +
43143 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43144 + return 0;
43145 +
43146 dirent = buf->previous;
43147
43148 if (dirent) {
43149 @@ -1056,13 +1073,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43150 buf.previous = NULL;
43151 buf.count = count;
43152 buf.error = 0;
43153 + buf.file = file;
43154
43155 error = vfs_readdir(file, compat_filldir64, &buf);
43156 if (error >= 0)
43157 error = buf.error;
43158 lastdirent = buf.previous;
43159 if (lastdirent) {
43160 - typeof(lastdirent->d_off) d_off = file->f_pos;
43161 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43162 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43163 error = -EFAULT;
43164 else
43165 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43166 index 112e45a..b59845b 100644
43167 --- a/fs/compat_binfmt_elf.c
43168 +++ b/fs/compat_binfmt_elf.c
43169 @@ -30,11 +30,13 @@
43170 #undef elf_phdr
43171 #undef elf_shdr
43172 #undef elf_note
43173 +#undef elf_dyn
43174 #undef elf_addr_t
43175 #define elfhdr elf32_hdr
43176 #define elf_phdr elf32_phdr
43177 #define elf_shdr elf32_shdr
43178 #define elf_note elf32_note
43179 +#define elf_dyn Elf32_Dyn
43180 #define elf_addr_t Elf32_Addr
43181
43182 /*
43183 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43184 index debdfe0..75d31d4 100644
43185 --- a/fs/compat_ioctl.c
43186 +++ b/fs/compat_ioctl.c
43187 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43188
43189 err = get_user(palp, &up->palette);
43190 err |= get_user(length, &up->length);
43191 + if (err)
43192 + return -EFAULT;
43193
43194 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43195 err = put_user(compat_ptr(palp), &up_native->palette);
43196 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43197 return -EFAULT;
43198 if (__get_user(udata, &ss32->iomem_base))
43199 return -EFAULT;
43200 - ss.iomem_base = compat_ptr(udata);
43201 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43202 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43203 __get_user(ss.port_high, &ss32->port_high))
43204 return -EFAULT;
43205 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43206 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43207 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43208 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43209 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43210 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43211 return -EFAULT;
43212
43213 return ioctl_preallocate(file, p);
43214 @@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43215 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43216 {
43217 unsigned int a, b;
43218 - a = *(unsigned int *)p;
43219 - b = *(unsigned int *)q;
43220 + a = *(const unsigned int *)p;
43221 + b = *(const unsigned int *)q;
43222 if (a > b)
43223 return 1;
43224 if (a < b)
43225 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43226 index 7e6c52d..94bc756 100644
43227 --- a/fs/configfs/dir.c
43228 +++ b/fs/configfs/dir.c
43229 @@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43230 }
43231 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43232 struct configfs_dirent *next;
43233 - const char * name;
43234 + const unsigned char * name;
43235 + char d_name[sizeof(next->s_dentry->d_iname)];
43236 int len;
43237 struct inode *inode = NULL;
43238
43239 @@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43240 continue;
43241
43242 name = configfs_get_name(next);
43243 - len = strlen(name);
43244 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43245 + len = next->s_dentry->d_name.len;
43246 + memcpy(d_name, name, len);
43247 + name = d_name;
43248 + } else
43249 + len = strlen(name);
43250
43251 /*
43252 * We'll have a dentry and an inode for
43253 diff --git a/fs/dcache.c b/fs/dcache.c
43254 index b80531c..8ca7e2d 100644
43255 --- a/fs/dcache.c
43256 +++ b/fs/dcache.c
43257 @@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned long mempages)
43258 mempages -= reserve;
43259
43260 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43261 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43262 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43263
43264 dcache_init();
43265 inode_init();
43266 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43267 index b80bc84..0d46d1a 100644
43268 --- a/fs/debugfs/inode.c
43269 +++ b/fs/debugfs/inode.c
43270 @@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43271 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43272 {
43273 return debugfs_create_file(name,
43274 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43275 + S_IFDIR | S_IRWXU,
43276 +#else
43277 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43278 +#endif
43279 parent, NULL, NULL);
43280 }
43281 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43282 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43283 index ab35b11..b30af66 100644
43284 --- a/fs/ecryptfs/inode.c
43285 +++ b/fs/ecryptfs/inode.c
43286 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43287 old_fs = get_fs();
43288 set_fs(get_ds());
43289 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43290 - (char __user *)lower_buf,
43291 + (char __force_user *)lower_buf,
43292 lower_bufsiz);
43293 set_fs(old_fs);
43294 if (rc < 0)
43295 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43296 }
43297 old_fs = get_fs();
43298 set_fs(get_ds());
43299 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43300 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43301 set_fs(old_fs);
43302 if (rc < 0) {
43303 kfree(buf);
43304 @@ -733,7 +733,7 @@ out:
43305 static void
43306 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43307 {
43308 - char *buf = nd_get_link(nd);
43309 + const char *buf = nd_get_link(nd);
43310 if (!IS_ERR(buf)) {
43311 /* Free the char* */
43312 kfree(buf);
43313 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43314 index 3a06f40..f7af544 100644
43315 --- a/fs/ecryptfs/miscdev.c
43316 +++ b/fs/ecryptfs/miscdev.c
43317 @@ -345,7 +345,7 @@ check_list:
43318 goto out_unlock_msg_ctx;
43319 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43320 if (msg_ctx->msg) {
43321 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43322 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43323 goto out_unlock_msg_ctx;
43324 i += packet_length_size;
43325 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43326 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43327 index b2a34a1..162fa69 100644
43328 --- a/fs/ecryptfs/read_write.c
43329 +++ b/fs/ecryptfs/read_write.c
43330 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43331 return -EIO;
43332 fs_save = get_fs();
43333 set_fs(get_ds());
43334 - rc = vfs_write(lower_file, data, size, &offset);
43335 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43336 set_fs(fs_save);
43337 mark_inode_dirty_sync(ecryptfs_inode);
43338 return rc;
43339 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43340 return -EIO;
43341 fs_save = get_fs();
43342 set_fs(get_ds());
43343 - rc = vfs_read(lower_file, data, size, &offset);
43344 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43345 set_fs(fs_save);
43346 return rc;
43347 }
43348 diff --git a/fs/exec.c b/fs/exec.c
43349 index b1fd202..582240d 100644
43350 --- a/fs/exec.c
43351 +++ b/fs/exec.c
43352 @@ -55,6 +55,15 @@
43353 #include <linux/pipe_fs_i.h>
43354 #include <linux/oom.h>
43355 #include <linux/compat.h>
43356 +#include <linux/random.h>
43357 +#include <linux/seq_file.h>
43358 +
43359 +#ifdef CONFIG_PAX_REFCOUNT
43360 +#include <linux/kallsyms.h>
43361 +#include <linux/kdebug.h>
43362 +#endif
43363 +
43364 +#include <trace/events/fs.h>
43365
43366 #include <asm/uaccess.h>
43367 #include <asm/mmu_context.h>
43368 @@ -66,6 +75,18 @@
43369
43370 #include <trace/events/sched.h>
43371
43372 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43373 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
43374 +{
43375 + WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
43376 +}
43377 +#endif
43378 +
43379 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43380 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43381 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43382 +#endif
43383 +
43384 int core_uses_pid;
43385 char core_pattern[CORENAME_MAX_SIZE] = "core";
43386 unsigned int core_pipe_limit;
43387 @@ -75,7 +96,7 @@ struct core_name {
43388 char *corename;
43389 int used, size;
43390 };
43391 -static atomic_t call_count = ATOMIC_INIT(1);
43392 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43393
43394 /* The maximal length of core_pattern is also specified in sysctl.c */
43395
43396 @@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43397 int write)
43398 {
43399 struct page *page;
43400 - int ret;
43401
43402 -#ifdef CONFIG_STACK_GROWSUP
43403 - if (write) {
43404 - ret = expand_downwards(bprm->vma, pos);
43405 - if (ret < 0)
43406 - return NULL;
43407 - }
43408 -#endif
43409 - ret = get_user_pages(current, bprm->mm, pos,
43410 - 1, write, 1, &page, NULL);
43411 - if (ret <= 0)
43412 + if (0 > expand_downwards(bprm->vma, pos))
43413 + return NULL;
43414 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43415 return NULL;
43416
43417 if (write) {
43418 @@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43419 if (size <= ARG_MAX)
43420 return page;
43421
43422 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43423 + // only allow 512KB for argv+env on suid/sgid binaries
43424 + // to prevent easy ASLR exhaustion
43425 + if (((bprm->cred->euid != current_euid()) ||
43426 + (bprm->cred->egid != current_egid())) &&
43427 + (size > (512 * 1024))) {
43428 + put_page(page);
43429 + return NULL;
43430 + }
43431 +#endif
43432 +
43433 /*
43434 * Limit to 1/4-th the stack size for the argv+env strings.
43435 * This ensures that:
43436 @@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43437 vma->vm_end = STACK_TOP_MAX;
43438 vma->vm_start = vma->vm_end - PAGE_SIZE;
43439 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43440 +
43441 +#ifdef CONFIG_PAX_SEGMEXEC
43442 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43443 +#endif
43444 +
43445 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43446 INIT_LIST_HEAD(&vma->anon_vma_chain);
43447
43448 @@ -291,6 +320,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43449 mm->stack_vm = mm->total_vm = 1;
43450 up_write(&mm->mmap_sem);
43451 bprm->p = vma->vm_end - sizeof(void *);
43452 +
43453 +#ifdef CONFIG_PAX_RANDUSTACK
43454 + if (randomize_va_space)
43455 + bprm->p ^= random32() & ~PAGE_MASK;
43456 +#endif
43457 +
43458 return 0;
43459 err:
43460 up_write(&mm->mmap_sem);
43461 @@ -399,19 +434,7 @@ err:
43462 return err;
43463 }
43464
43465 -struct user_arg_ptr {
43466 -#ifdef CONFIG_COMPAT
43467 - bool is_compat;
43468 -#endif
43469 - union {
43470 - const char __user *const __user *native;
43471 -#ifdef CONFIG_COMPAT
43472 - compat_uptr_t __user *compat;
43473 -#endif
43474 - } ptr;
43475 -};
43476 -
43477 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43478 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43479 {
43480 const char __user *native;
43481
43482 @@ -420,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43483 compat_uptr_t compat;
43484
43485 if (get_user(compat, argv.ptr.compat + nr))
43486 - return ERR_PTR(-EFAULT);
43487 + return (const char __force_user *)ERR_PTR(-EFAULT);
43488
43489 return compat_ptr(compat);
43490 }
43491 #endif
43492
43493 if (get_user(native, argv.ptr.native + nr))
43494 - return ERR_PTR(-EFAULT);
43495 + return (const char __force_user *)ERR_PTR(-EFAULT);
43496
43497 return native;
43498 }
43499 @@ -446,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
43500 if (!p)
43501 break;
43502
43503 - if (IS_ERR(p))
43504 + if (IS_ERR((const char __force_kernel *)p))
43505 return -EFAULT;
43506
43507 if (i++ >= max)
43508 @@ -480,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43509
43510 ret = -EFAULT;
43511 str = get_user_arg_ptr(argv, argc);
43512 - if (IS_ERR(str))
43513 + if (IS_ERR((const char __force_kernel *)str))
43514 goto out;
43515
43516 len = strnlen_user(str, MAX_ARG_STRLEN);
43517 @@ -562,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43518 int r;
43519 mm_segment_t oldfs = get_fs();
43520 struct user_arg_ptr argv = {
43521 - .ptr.native = (const char __user *const __user *)__argv,
43522 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43523 };
43524
43525 set_fs(KERNEL_DS);
43526 @@ -597,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43527 unsigned long new_end = old_end - shift;
43528 struct mmu_gather tlb;
43529
43530 - BUG_ON(new_start > new_end);
43531 + if (new_start >= new_end || new_start < mmap_min_addr)
43532 + return -ENOMEM;
43533
43534 /*
43535 * ensure there are no vmas between where we want to go
43536 @@ -606,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43537 if (vma != find_vma(mm, new_start))
43538 return -EFAULT;
43539
43540 +#ifdef CONFIG_PAX_SEGMEXEC
43541 + BUG_ON(pax_find_mirror_vma(vma));
43542 +#endif
43543 +
43544 /*
43545 * cover the whole range: [new_start, old_end)
43546 */
43547 @@ -686,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43548 stack_top = arch_align_stack(stack_top);
43549 stack_top = PAGE_ALIGN(stack_top);
43550
43551 - if (unlikely(stack_top < mmap_min_addr) ||
43552 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43553 - return -ENOMEM;
43554 -
43555 stack_shift = vma->vm_end - stack_top;
43556
43557 bprm->p -= stack_shift;
43558 @@ -701,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43559 bprm->exec -= stack_shift;
43560
43561 down_write(&mm->mmap_sem);
43562 +
43563 + /* Move stack pages down in memory. */
43564 + if (stack_shift) {
43565 + ret = shift_arg_pages(vma, stack_shift);
43566 + if (ret)
43567 + goto out_unlock;
43568 + }
43569 +
43570 vm_flags = VM_STACK_FLAGS;
43571
43572 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43573 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43574 + vm_flags &= ~VM_EXEC;
43575 +
43576 +#ifdef CONFIG_PAX_MPROTECT
43577 + if (mm->pax_flags & MF_PAX_MPROTECT)
43578 + vm_flags &= ~VM_MAYEXEC;
43579 +#endif
43580 +
43581 + }
43582 +#endif
43583 +
43584 /*
43585 * Adjust stack execute permissions; explicitly enable for
43586 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43587 @@ -721,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43588 goto out_unlock;
43589 BUG_ON(prev != vma);
43590
43591 - /* Move stack pages down in memory. */
43592 - if (stack_shift) {
43593 - ret = shift_arg_pages(vma, stack_shift);
43594 - if (ret)
43595 - goto out_unlock;
43596 - }
43597 -
43598 /* mprotect_fixup is overkill to remove the temporary stack flags */
43599 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43600
43601 @@ -785,6 +822,8 @@ struct file *open_exec(const char *name)
43602
43603 fsnotify_open(file);
43604
43605 + trace_open_exec(name);
43606 +
43607 err = deny_write_access(file);
43608 if (err)
43609 goto exit;
43610 @@ -808,7 +847,7 @@ int kernel_read(struct file *file, loff_t offset,
43611 old_fs = get_fs();
43612 set_fs(get_ds());
43613 /* The cast to a user pointer is valid due to the set_fs() */
43614 - result = vfs_read(file, (void __user *)addr, count, &pos);
43615 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43616 set_fs(old_fs);
43617 return result;
43618 }
43619 @@ -1254,7 +1293,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43620 }
43621 rcu_read_unlock();
43622
43623 - if (p->fs->users > n_fs) {
43624 + if (atomic_read(&p->fs->users) > n_fs) {
43625 bprm->unsafe |= LSM_UNSAFE_SHARE;
43626 } else {
43627 res = -EAGAIN;
43628 @@ -1451,6 +1490,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43629
43630 EXPORT_SYMBOL(search_binary_handler);
43631
43632 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43633 +static DEFINE_PER_CPU(u64, exec_counter);
43634 +static int __init init_exec_counters(void)
43635 +{
43636 + unsigned int cpu;
43637 +
43638 + for_each_possible_cpu(cpu) {
43639 + per_cpu(exec_counter, cpu) = (u64)cpu;
43640 + }
43641 +
43642 + return 0;
43643 +}
43644 +early_initcall(init_exec_counters);
43645 +static inline void increment_exec_counter(void)
43646 +{
43647 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
43648 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43649 +}
43650 +#else
43651 +static inline void increment_exec_counter(void) {}
43652 +#endif
43653 +
43654 /*
43655 * sys_execve() executes a new program.
43656 */
43657 @@ -1459,6 +1520,11 @@ static int do_execve_common(const char *filename,
43658 struct user_arg_ptr envp,
43659 struct pt_regs *regs)
43660 {
43661 +#ifdef CONFIG_GRKERNSEC
43662 + struct file *old_exec_file;
43663 + struct acl_subject_label *old_acl;
43664 + struct rlimit old_rlim[RLIM_NLIMITS];
43665 +#endif
43666 struct linux_binprm *bprm;
43667 struct file *file;
43668 struct files_struct *displaced;
43669 @@ -1466,6 +1532,8 @@ static int do_execve_common(const char *filename,
43670 int retval;
43671 const struct cred *cred = current_cred();
43672
43673 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43674 +
43675 /*
43676 * We move the actual failure in case of RLIMIT_NPROC excess from
43677 * set*uid() to execve() because too many poorly written programs
43678 @@ -1506,12 +1574,27 @@ static int do_execve_common(const char *filename,
43679 if (IS_ERR(file))
43680 goto out_unmark;
43681
43682 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
43683 + retval = -EPERM;
43684 + goto out_file;
43685 + }
43686 +
43687 sched_exec();
43688
43689 bprm->file = file;
43690 bprm->filename = filename;
43691 bprm->interp = filename;
43692
43693 + if (gr_process_user_ban()) {
43694 + retval = -EPERM;
43695 + goto out_file;
43696 + }
43697 +
43698 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43699 + retval = -EACCES;
43700 + goto out_file;
43701 + }
43702 +
43703 retval = bprm_mm_init(bprm);
43704 if (retval)
43705 goto out_file;
43706 @@ -1528,24 +1611,65 @@ static int do_execve_common(const char *filename,
43707 if (retval < 0)
43708 goto out;
43709
43710 +#ifdef CONFIG_GRKERNSEC
43711 + old_acl = current->acl;
43712 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43713 + old_exec_file = current->exec_file;
43714 + get_file(file);
43715 + current->exec_file = file;
43716 +#endif
43717 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43718 + /* limit suid stack to 8MB
43719 + we saved the old limits above and will restore them if this exec fails
43720 + */
43721 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43722 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43723 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43724 +#endif
43725 +
43726 + if (!gr_tpe_allow(file)) {
43727 + retval = -EACCES;
43728 + goto out_fail;
43729 + }
43730 +
43731 + if (gr_check_crash_exec(file)) {
43732 + retval = -EACCES;
43733 + goto out_fail;
43734 + }
43735 +
43736 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43737 + bprm->unsafe);
43738 + if (retval < 0)
43739 + goto out_fail;
43740 +
43741 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43742 if (retval < 0)
43743 - goto out;
43744 + goto out_fail;
43745
43746 bprm->exec = bprm->p;
43747 retval = copy_strings(bprm->envc, envp, bprm);
43748 if (retval < 0)
43749 - goto out;
43750 + goto out_fail;
43751
43752 retval = copy_strings(bprm->argc, argv, bprm);
43753 if (retval < 0)
43754 - goto out;
43755 + goto out_fail;
43756 +
43757 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43758 +
43759 + gr_handle_exec_args(bprm, argv);
43760
43761 retval = search_binary_handler(bprm,regs);
43762 if (retval < 0)
43763 - goto out;
43764 + goto out_fail;
43765 +#ifdef CONFIG_GRKERNSEC
43766 + if (old_exec_file)
43767 + fput(old_exec_file);
43768 +#endif
43769
43770 /* execve succeeded */
43771 +
43772 + increment_exec_counter();
43773 current->fs->in_exec = 0;
43774 current->in_execve = 0;
43775 acct_update_integrals(current);
43776 @@ -1554,6 +1678,14 @@ static int do_execve_common(const char *filename,
43777 put_files_struct(displaced);
43778 return retval;
43779
43780 +out_fail:
43781 +#ifdef CONFIG_GRKERNSEC
43782 + current->acl = old_acl;
43783 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43784 + fput(current->exec_file);
43785 + current->exec_file = old_exec_file;
43786 +#endif
43787 +
43788 out:
43789 if (bprm->mm) {
43790 acct_arg_size(bprm, 0);
43791 @@ -1627,7 +1759,7 @@ static int expand_corename(struct core_name *cn)
43792 {
43793 char *old_corename = cn->corename;
43794
43795 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43796 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43797 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43798
43799 if (!cn->corename) {
43800 @@ -1724,7 +1856,7 @@ static int format_corename(struct core_name *cn, long signr)
43801 int pid_in_pattern = 0;
43802 int err = 0;
43803
43804 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43805 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43806 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43807 cn->used = 0;
43808
43809 @@ -1821,6 +1953,228 @@ out:
43810 return ispipe;
43811 }
43812
43813 +int pax_check_flags(unsigned long *flags)
43814 +{
43815 + int retval = 0;
43816 +
43817 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43818 + if (*flags & MF_PAX_SEGMEXEC)
43819 + {
43820 + *flags &= ~MF_PAX_SEGMEXEC;
43821 + retval = -EINVAL;
43822 + }
43823 +#endif
43824 +
43825 + if ((*flags & MF_PAX_PAGEEXEC)
43826 +
43827 +#ifdef CONFIG_PAX_PAGEEXEC
43828 + && (*flags & MF_PAX_SEGMEXEC)
43829 +#endif
43830 +
43831 + )
43832 + {
43833 + *flags &= ~MF_PAX_PAGEEXEC;
43834 + retval = -EINVAL;
43835 + }
43836 +
43837 + if ((*flags & MF_PAX_MPROTECT)
43838 +
43839 +#ifdef CONFIG_PAX_MPROTECT
43840 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43841 +#endif
43842 +
43843 + )
43844 + {
43845 + *flags &= ~MF_PAX_MPROTECT;
43846 + retval = -EINVAL;
43847 + }
43848 +
43849 + if ((*flags & MF_PAX_EMUTRAMP)
43850 +
43851 +#ifdef CONFIG_PAX_EMUTRAMP
43852 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43853 +#endif
43854 +
43855 + )
43856 + {
43857 + *flags &= ~MF_PAX_EMUTRAMP;
43858 + retval = -EINVAL;
43859 + }
43860 +
43861 + return retval;
43862 +}
43863 +
43864 +EXPORT_SYMBOL(pax_check_flags);
43865 +
43866 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43867 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43868 +{
43869 + struct task_struct *tsk = current;
43870 + struct mm_struct *mm = current->mm;
43871 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43872 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43873 + char *path_exec = NULL;
43874 + char *path_fault = NULL;
43875 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
43876 +
43877 + if (buffer_exec && buffer_fault) {
43878 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43879 +
43880 + down_read(&mm->mmap_sem);
43881 + vma = mm->mmap;
43882 + while (vma && (!vma_exec || !vma_fault)) {
43883 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43884 + vma_exec = vma;
43885 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43886 + vma_fault = vma;
43887 + vma = vma->vm_next;
43888 + }
43889 + if (vma_exec) {
43890 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43891 + if (IS_ERR(path_exec))
43892 + path_exec = "<path too long>";
43893 + else {
43894 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43895 + if (path_exec) {
43896 + *path_exec = 0;
43897 + path_exec = buffer_exec;
43898 + } else
43899 + path_exec = "<path too long>";
43900 + }
43901 + }
43902 + if (vma_fault) {
43903 + start = vma_fault->vm_start;
43904 + end = vma_fault->vm_end;
43905 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43906 + if (vma_fault->vm_file) {
43907 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43908 + if (IS_ERR(path_fault))
43909 + path_fault = "<path too long>";
43910 + else {
43911 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43912 + if (path_fault) {
43913 + *path_fault = 0;
43914 + path_fault = buffer_fault;
43915 + } else
43916 + path_fault = "<path too long>";
43917 + }
43918 + } else
43919 + path_fault = "<anonymous mapping>";
43920 + }
43921 + up_read(&mm->mmap_sem);
43922 + }
43923 + if (tsk->signal->curr_ip)
43924 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43925 + else
43926 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43927 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43928 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43929 + task_uid(tsk), task_euid(tsk), pc, sp);
43930 + free_page((unsigned long)buffer_exec);
43931 + free_page((unsigned long)buffer_fault);
43932 + pax_report_insns(regs, pc, sp);
43933 + do_coredump(SIGKILL, SIGKILL, regs);
43934 +}
43935 +#endif
43936 +
43937 +#ifdef CONFIG_PAX_REFCOUNT
43938 +void pax_report_refcount_overflow(struct pt_regs *regs)
43939 +{
43940 + if (current->signal->curr_ip)
43941 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43942 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43943 + else
43944 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43945 + current->comm, task_pid_nr(current), current_uid(), current_euid());
43946 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43947 + show_regs(regs);
43948 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43949 +}
43950 +#endif
43951 +
43952 +#ifdef CONFIG_PAX_USERCOPY
43953 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43954 +int object_is_on_stack(const void *obj, unsigned long len)
43955 +{
43956 + const void * const stack = task_stack_page(current);
43957 + const void * const stackend = stack + THREAD_SIZE;
43958 +
43959 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43960 + const void *frame = NULL;
43961 + const void *oldframe;
43962 +#endif
43963 +
43964 + if (obj + len < obj)
43965 + return -1;
43966 +
43967 + if (obj + len <= stack || stackend <= obj)
43968 + return 0;
43969 +
43970 + if (obj < stack || stackend < obj + len)
43971 + return -1;
43972 +
43973 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43974 + oldframe = __builtin_frame_address(1);
43975 + if (oldframe)
43976 + frame = __builtin_frame_address(2);
43977 + /*
43978 + low ----------------------------------------------> high
43979 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
43980 + ^----------------^
43981 + allow copies only within here
43982 + */
43983 + while (stack <= frame && frame < stackend) {
43984 + /* if obj + len extends past the last frame, this
43985 + check won't pass and the next frame will be 0,
43986 + causing us to bail out and correctly report
43987 + the copy as invalid
43988 + */
43989 + if (obj + len <= frame)
43990 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
43991 + oldframe = frame;
43992 + frame = *(const void * const *)frame;
43993 + }
43994 + return -1;
43995 +#else
43996 + return 1;
43997 +#endif
43998 +}
43999 +
44000 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44001 +{
44002 + if (current->signal->curr_ip)
44003 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44004 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44005 + else
44006 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44007 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44008 + dump_stack();
44009 + gr_handle_kernel_exploit();
44010 + do_group_exit(SIGKILL);
44011 +}
44012 +#endif
44013 +
44014 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44015 +void pax_track_stack(void)
44016 +{
44017 + unsigned long sp = (unsigned long)&sp;
44018 + if (sp < current_thread_info()->lowest_stack &&
44019 + sp > (unsigned long)task_stack_page(current))
44020 + current_thread_info()->lowest_stack = sp;
44021 +}
44022 +EXPORT_SYMBOL(pax_track_stack);
44023 +#endif
44024 +
44025 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
44026 +void report_size_overflow(const char *file, unsigned int line, const char *func)
44027 +{
44028 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44029 + dump_stack();
44030 + do_group_exit(SIGKILL);
44031 +}
44032 +EXPORT_SYMBOL(report_size_overflow);
44033 +#endif
44034 +
44035 static int zap_process(struct task_struct *start, int exit_code)
44036 {
44037 struct task_struct *t;
44038 @@ -2018,17 +2372,17 @@ static void wait_for_dump_helpers(struct file *file)
44039 pipe = file->f_path.dentry->d_inode->i_pipe;
44040
44041 pipe_lock(pipe);
44042 - pipe->readers++;
44043 - pipe->writers--;
44044 + atomic_inc(&pipe->readers);
44045 + atomic_dec(&pipe->writers);
44046
44047 - while ((pipe->readers > 1) && (!signal_pending(current))) {
44048 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44049 wake_up_interruptible_sync(&pipe->wait);
44050 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44051 pipe_wait(pipe);
44052 }
44053
44054 - pipe->readers--;
44055 - pipe->writers++;
44056 + atomic_dec(&pipe->readers);
44057 + atomic_inc(&pipe->writers);
44058 pipe_unlock(pipe);
44059
44060 }
44061 @@ -2089,7 +2443,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44062 int retval = 0;
44063 int flag = 0;
44064 int ispipe;
44065 - static atomic_t core_dump_count = ATOMIC_INIT(0);
44066 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44067 struct coredump_params cprm = {
44068 .signr = signr,
44069 .regs = regs,
44070 @@ -2104,6 +2458,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44071
44072 audit_core_dumps(signr);
44073
44074 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44075 + gr_handle_brute_attach(current, cprm.mm_flags);
44076 +
44077 binfmt = mm->binfmt;
44078 if (!binfmt || !binfmt->core_dump)
44079 goto fail;
44080 @@ -2171,7 +2528,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44081 }
44082 cprm.limit = RLIM_INFINITY;
44083
44084 - dump_count = atomic_inc_return(&core_dump_count);
44085 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
44086 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44087 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44088 task_tgid_vnr(current), current->comm);
44089 @@ -2198,6 +2555,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44090 } else {
44091 struct inode *inode;
44092
44093 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44094 +
44095 if (cprm.limit < binfmt->min_coredump)
44096 goto fail_unlock;
44097
44098 @@ -2241,7 +2600,7 @@ close_fail:
44099 filp_close(cprm.file, NULL);
44100 fail_dropcount:
44101 if (ispipe)
44102 - atomic_dec(&core_dump_count);
44103 + atomic_dec_unchecked(&core_dump_count);
44104 fail_unlock:
44105 kfree(cn.corename);
44106 fail_corename:
44107 @@ -2260,7 +2619,7 @@ fail:
44108 */
44109 int dump_write(struct file *file, const void *addr, int nr)
44110 {
44111 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44112 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44113 }
44114 EXPORT_SYMBOL(dump_write);
44115
44116 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44117 index a8cbe1b..fed04cb 100644
44118 --- a/fs/ext2/balloc.c
44119 +++ b/fs/ext2/balloc.c
44120 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44121
44122 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44123 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44124 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44125 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44126 sbi->s_resuid != current_fsuid() &&
44127 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44128 return 0;
44129 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44130 index baac1b1..1499b62 100644
44131 --- a/fs/ext3/balloc.c
44132 +++ b/fs/ext3/balloc.c
44133 @@ -1438,9 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44134
44135 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44136 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44137 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44138 + if (free_blocks < root_blocks + 1 &&
44139 !use_reservation && sbi->s_resuid != current_fsuid() &&
44140 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44141 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44142 + !capable_nolog(CAP_SYS_RESOURCE)) {
44143 return 0;
44144 }
44145 return 1;
44146 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44147 index 4bbd07a..a37bee6 100644
44148 --- a/fs/ext4/balloc.c
44149 +++ b/fs/ext4/balloc.c
44150 @@ -463,8 +463,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44151 /* Hm, nope. Are (enough) root reserved clusters available? */
44152 if (sbi->s_resuid == current_fsuid() ||
44153 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44154 - capable(CAP_SYS_RESOURCE) ||
44155 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44156 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44157 + capable_nolog(CAP_SYS_RESOURCE)) {
44158
44159 if (free_clusters >= (nclusters + dirty_clusters))
44160 return 1;
44161 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44162 index 0e01e90..ae2bd5e 100644
44163 --- a/fs/ext4/ext4.h
44164 +++ b/fs/ext4/ext4.h
44165 @@ -1225,19 +1225,19 @@ struct ext4_sb_info {
44166 unsigned long s_mb_last_start;
44167
44168 /* stats for buddy allocator */
44169 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44170 - atomic_t s_bal_success; /* we found long enough chunks */
44171 - atomic_t s_bal_allocated; /* in blocks */
44172 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44173 - atomic_t s_bal_goals; /* goal hits */
44174 - atomic_t s_bal_breaks; /* too long searches */
44175 - atomic_t s_bal_2orders; /* 2^order hits */
44176 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44177 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44178 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44179 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44180 + atomic_unchecked_t s_bal_goals; /* goal hits */
44181 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44182 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44183 spinlock_t s_bal_lock;
44184 unsigned long s_mb_buddies_generated;
44185 unsigned long long s_mb_generation_time;
44186 - atomic_t s_mb_lost_chunks;
44187 - atomic_t s_mb_preallocated;
44188 - atomic_t s_mb_discarded;
44189 + atomic_unchecked_t s_mb_lost_chunks;
44190 + atomic_unchecked_t s_mb_preallocated;
44191 + atomic_unchecked_t s_mb_discarded;
44192 atomic_t s_lock_busy;
44193
44194 /* locality groups */
44195 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44196 index 6b0a57e..1955a44 100644
44197 --- a/fs/ext4/mballoc.c
44198 +++ b/fs/ext4/mballoc.c
44199 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44200 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44201
44202 if (EXT4_SB(sb)->s_mb_stats)
44203 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44204 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44205
44206 break;
44207 }
44208 @@ -2041,7 +2041,7 @@ repeat:
44209 ac->ac_status = AC_STATUS_CONTINUE;
44210 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44211 cr = 3;
44212 - atomic_inc(&sbi->s_mb_lost_chunks);
44213 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44214 goto repeat;
44215 }
44216 }
44217 @@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
44218 if (sbi->s_mb_stats) {
44219 ext4_msg(sb, KERN_INFO,
44220 "mballoc: %u blocks %u reqs (%u success)",
44221 - atomic_read(&sbi->s_bal_allocated),
44222 - atomic_read(&sbi->s_bal_reqs),
44223 - atomic_read(&sbi->s_bal_success));
44224 + atomic_read_unchecked(&sbi->s_bal_allocated),
44225 + atomic_read_unchecked(&sbi->s_bal_reqs),
44226 + atomic_read_unchecked(&sbi->s_bal_success));
44227 ext4_msg(sb, KERN_INFO,
44228 "mballoc: %u extents scanned, %u goal hits, "
44229 "%u 2^N hits, %u breaks, %u lost",
44230 - atomic_read(&sbi->s_bal_ex_scanned),
44231 - atomic_read(&sbi->s_bal_goals),
44232 - atomic_read(&sbi->s_bal_2orders),
44233 - atomic_read(&sbi->s_bal_breaks),
44234 - atomic_read(&sbi->s_mb_lost_chunks));
44235 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44236 + atomic_read_unchecked(&sbi->s_bal_goals),
44237 + atomic_read_unchecked(&sbi->s_bal_2orders),
44238 + atomic_read_unchecked(&sbi->s_bal_breaks),
44239 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44240 ext4_msg(sb, KERN_INFO,
44241 "mballoc: %lu generated and it took %Lu",
44242 sbi->s_mb_buddies_generated,
44243 sbi->s_mb_generation_time);
44244 ext4_msg(sb, KERN_INFO,
44245 "mballoc: %u preallocated, %u discarded",
44246 - atomic_read(&sbi->s_mb_preallocated),
44247 - atomic_read(&sbi->s_mb_discarded));
44248 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44249 + atomic_read_unchecked(&sbi->s_mb_discarded));
44250 }
44251
44252 free_percpu(sbi->s_locality_groups);
44253 @@ -3045,16 +3045,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44254 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44255
44256 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44257 - atomic_inc(&sbi->s_bal_reqs);
44258 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44259 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44260 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44261 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44262 - atomic_inc(&sbi->s_bal_success);
44263 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44264 + atomic_inc_unchecked(&sbi->s_bal_success);
44265 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44266 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44267 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44268 - atomic_inc(&sbi->s_bal_goals);
44269 + atomic_inc_unchecked(&sbi->s_bal_goals);
44270 if (ac->ac_found > sbi->s_mb_max_to_scan)
44271 - atomic_inc(&sbi->s_bal_breaks);
44272 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44273 }
44274
44275 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44276 @@ -3458,7 +3458,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44277 trace_ext4_mb_new_inode_pa(ac, pa);
44278
44279 ext4_mb_use_inode_pa(ac, pa);
44280 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44281 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44282
44283 ei = EXT4_I(ac->ac_inode);
44284 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44285 @@ -3518,7 +3518,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44286 trace_ext4_mb_new_group_pa(ac, pa);
44287
44288 ext4_mb_use_group_pa(ac, pa);
44289 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44290 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44291
44292 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44293 lg = ac->ac_lg;
44294 @@ -3607,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44295 * from the bitmap and continue.
44296 */
44297 }
44298 - atomic_add(free, &sbi->s_mb_discarded);
44299 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44300
44301 return err;
44302 }
44303 @@ -3625,7 +3625,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44304 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44305 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44306 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44307 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44308 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44309 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44310
44311 return 0;
44312 diff --git a/fs/fcntl.c b/fs/fcntl.c
44313 index 75e7c1f..1eb3e4d 100644
44314 --- a/fs/fcntl.c
44315 +++ b/fs/fcntl.c
44316 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44317 if (err)
44318 return err;
44319
44320 + if (gr_handle_chroot_fowner(pid, type))
44321 + return -ENOENT;
44322 + if (gr_check_protected_task_fowner(pid, type))
44323 + return -EACCES;
44324 +
44325 f_modown(filp, pid, type, force);
44326 return 0;
44327 }
44328 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44329
44330 static int f_setown_ex(struct file *filp, unsigned long arg)
44331 {
44332 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44333 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44334 struct f_owner_ex owner;
44335 struct pid *pid;
44336 int type;
44337 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44338
44339 static int f_getown_ex(struct file *filp, unsigned long arg)
44340 {
44341 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44342 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44343 struct f_owner_ex owner;
44344 int ret = 0;
44345
44346 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44347 switch (cmd) {
44348 case F_DUPFD:
44349 case F_DUPFD_CLOEXEC:
44350 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44351 if (arg >= rlimit(RLIMIT_NOFILE))
44352 break;
44353 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44354 diff --git a/fs/fifo.c b/fs/fifo.c
44355 index b1a524d..4ee270e 100644
44356 --- a/fs/fifo.c
44357 +++ b/fs/fifo.c
44358 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44359 */
44360 filp->f_op = &read_pipefifo_fops;
44361 pipe->r_counter++;
44362 - if (pipe->readers++ == 0)
44363 + if (atomic_inc_return(&pipe->readers) == 1)
44364 wake_up_partner(inode);
44365
44366 - if (!pipe->writers) {
44367 + if (!atomic_read(&pipe->writers)) {
44368 if ((filp->f_flags & O_NONBLOCK)) {
44369 /* suppress POLLHUP until we have
44370 * seen a writer */
44371 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44372 * errno=ENXIO when there is no process reading the FIFO.
44373 */
44374 ret = -ENXIO;
44375 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44376 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44377 goto err;
44378
44379 filp->f_op = &write_pipefifo_fops;
44380 pipe->w_counter++;
44381 - if (!pipe->writers++)
44382 + if (atomic_inc_return(&pipe->writers) == 1)
44383 wake_up_partner(inode);
44384
44385 - if (!pipe->readers) {
44386 + if (!atomic_read(&pipe->readers)) {
44387 wait_for_partner(inode, &pipe->r_counter);
44388 if (signal_pending(current))
44389 goto err_wr;
44390 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44391 */
44392 filp->f_op = &rdwr_pipefifo_fops;
44393
44394 - pipe->readers++;
44395 - pipe->writers++;
44396 + atomic_inc(&pipe->readers);
44397 + atomic_inc(&pipe->writers);
44398 pipe->r_counter++;
44399 pipe->w_counter++;
44400 - if (pipe->readers == 1 || pipe->writers == 1)
44401 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44402 wake_up_partner(inode);
44403 break;
44404
44405 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44406 return 0;
44407
44408 err_rd:
44409 - if (!--pipe->readers)
44410 + if (atomic_dec_and_test(&pipe->readers))
44411 wake_up_interruptible(&pipe->wait);
44412 ret = -ERESTARTSYS;
44413 goto err;
44414
44415 err_wr:
44416 - if (!--pipe->writers)
44417 + if (atomic_dec_and_test(&pipe->writers))
44418 wake_up_interruptible(&pipe->wait);
44419 ret = -ERESTARTSYS;
44420 goto err;
44421
44422 err:
44423 - if (!pipe->readers && !pipe->writers)
44424 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44425 free_pipe_info(inode);
44426
44427 err_nocleanup:
44428 diff --git a/fs/file.c b/fs/file.c
44429 index ba3f605..fade102 100644
44430 --- a/fs/file.c
44431 +++ b/fs/file.c
44432 @@ -15,6 +15,7 @@
44433 #include <linux/slab.h>
44434 #include <linux/vmalloc.h>
44435 #include <linux/file.h>
44436 +#include <linux/security.h>
44437 #include <linux/fdtable.h>
44438 #include <linux/bitops.h>
44439 #include <linux/interrupt.h>
44440 @@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
44441 * N.B. For clone tasks sharing a files structure, this test
44442 * will limit the total number of files that can be opened.
44443 */
44444 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44445 if (nr >= rlimit(RLIMIT_NOFILE))
44446 return -EMFILE;
44447
44448 diff --git a/fs/filesystems.c b/fs/filesystems.c
44449 index 96f2428..f5eeb8e 100644
44450 --- a/fs/filesystems.c
44451 +++ b/fs/filesystems.c
44452 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44453 int len = dot ? dot - name : strlen(name);
44454
44455 fs = __get_fs_type(name, len);
44456 +
44457 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44458 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44459 +#else
44460 if (!fs && (request_module("%.*s", len, name) == 0))
44461 +#endif
44462 fs = __get_fs_type(name, len);
44463
44464 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44465 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44466 index e159e68..e7d2a6f 100644
44467 --- a/fs/fs_struct.c
44468 +++ b/fs/fs_struct.c
44469 @@ -4,6 +4,7 @@
44470 #include <linux/path.h>
44471 #include <linux/slab.h>
44472 #include <linux/fs_struct.h>
44473 +#include <linux/grsecurity.h>
44474 #include "internal.h"
44475
44476 static inline void path_get_longterm(struct path *path)
44477 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44478 write_seqcount_begin(&fs->seq);
44479 old_root = fs->root;
44480 fs->root = *path;
44481 + gr_set_chroot_entries(current, path);
44482 write_seqcount_end(&fs->seq);
44483 spin_unlock(&fs->lock);
44484 if (old_root.dentry)
44485 @@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
44486 return 1;
44487 }
44488
44489 +static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
44490 +{
44491 + if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
44492 + return 0;
44493 + *p = *new;
44494 +
44495 + gr_set_chroot_entries(task, new);
44496 +
44497 + return 1;
44498 +}
44499 +
44500 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44501 {
44502 struct task_struct *g, *p;
44503 @@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44504 int hits = 0;
44505 spin_lock(&fs->lock);
44506 write_seqcount_begin(&fs->seq);
44507 - hits += replace_path(&fs->root, old_root, new_root);
44508 + hits += replace_root_path(p, &fs->root, old_root, new_root);
44509 hits += replace_path(&fs->pwd, old_root, new_root);
44510 write_seqcount_end(&fs->seq);
44511 while (hits--) {
44512 @@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
44513 task_lock(tsk);
44514 spin_lock(&fs->lock);
44515 tsk->fs = NULL;
44516 - kill = !--fs->users;
44517 + gr_clear_chroot_entries(tsk);
44518 + kill = !atomic_dec_return(&fs->users);
44519 spin_unlock(&fs->lock);
44520 task_unlock(tsk);
44521 if (kill)
44522 @@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44523 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44524 /* We don't need to lock fs - think why ;-) */
44525 if (fs) {
44526 - fs->users = 1;
44527 + atomic_set(&fs->users, 1);
44528 fs->in_exec = 0;
44529 spin_lock_init(&fs->lock);
44530 seqcount_init(&fs->seq);
44531 @@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44532 spin_lock(&old->lock);
44533 fs->root = old->root;
44534 path_get_longterm(&fs->root);
44535 + /* instead of calling gr_set_chroot_entries here,
44536 + we call it from every caller of this function
44537 + */
44538 fs->pwd = old->pwd;
44539 path_get_longterm(&fs->pwd);
44540 spin_unlock(&old->lock);
44541 @@ -151,8 +168,9 @@ int unshare_fs_struct(void)
44542
44543 task_lock(current);
44544 spin_lock(&fs->lock);
44545 - kill = !--fs->users;
44546 + kill = !atomic_dec_return(&fs->users);
44547 current->fs = new_fs;
44548 + gr_set_chroot_entries(current, &new_fs->root);
44549 spin_unlock(&fs->lock);
44550 task_unlock(current);
44551
44552 @@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44553
44554 int current_umask(void)
44555 {
44556 - return current->fs->umask;
44557 + return current->fs->umask | gr_acl_umask();
44558 }
44559 EXPORT_SYMBOL(current_umask);
44560
44561 /* to be mentioned only in INIT_TASK */
44562 struct fs_struct init_fs = {
44563 - .users = 1,
44564 + .users = ATOMIC_INIT(1),
44565 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44566 .seq = SEQCNT_ZERO,
44567 .umask = 0022,
44568 @@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
44569 task_lock(current);
44570
44571 spin_lock(&init_fs.lock);
44572 - init_fs.users++;
44573 + atomic_inc(&init_fs.users);
44574 spin_unlock(&init_fs.lock);
44575
44576 spin_lock(&fs->lock);
44577 current->fs = &init_fs;
44578 - kill = !--fs->users;
44579 + gr_set_chroot_entries(current, &current->fs->root);
44580 + kill = !atomic_dec_return(&fs->users);
44581 spin_unlock(&fs->lock);
44582
44583 task_unlock(current);
44584 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44585 index 9905350..02eaec4 100644
44586 --- a/fs/fscache/cookie.c
44587 +++ b/fs/fscache/cookie.c
44588 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44589 parent ? (char *) parent->def->name : "<no-parent>",
44590 def->name, netfs_data);
44591
44592 - fscache_stat(&fscache_n_acquires);
44593 + fscache_stat_unchecked(&fscache_n_acquires);
44594
44595 /* if there's no parent cookie, then we don't create one here either */
44596 if (!parent) {
44597 - fscache_stat(&fscache_n_acquires_null);
44598 + fscache_stat_unchecked(&fscache_n_acquires_null);
44599 _leave(" [no parent]");
44600 return NULL;
44601 }
44602 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44603 /* allocate and initialise a cookie */
44604 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44605 if (!cookie) {
44606 - fscache_stat(&fscache_n_acquires_oom);
44607 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44608 _leave(" [ENOMEM]");
44609 return NULL;
44610 }
44611 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44612
44613 switch (cookie->def->type) {
44614 case FSCACHE_COOKIE_TYPE_INDEX:
44615 - fscache_stat(&fscache_n_cookie_index);
44616 + fscache_stat_unchecked(&fscache_n_cookie_index);
44617 break;
44618 case FSCACHE_COOKIE_TYPE_DATAFILE:
44619 - fscache_stat(&fscache_n_cookie_data);
44620 + fscache_stat_unchecked(&fscache_n_cookie_data);
44621 break;
44622 default:
44623 - fscache_stat(&fscache_n_cookie_special);
44624 + fscache_stat_unchecked(&fscache_n_cookie_special);
44625 break;
44626 }
44627
44628 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44629 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44630 atomic_dec(&parent->n_children);
44631 __fscache_cookie_put(cookie);
44632 - fscache_stat(&fscache_n_acquires_nobufs);
44633 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44634 _leave(" = NULL");
44635 return NULL;
44636 }
44637 }
44638
44639 - fscache_stat(&fscache_n_acquires_ok);
44640 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44641 _leave(" = %p", cookie);
44642 return cookie;
44643 }
44644 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44645 cache = fscache_select_cache_for_object(cookie->parent);
44646 if (!cache) {
44647 up_read(&fscache_addremove_sem);
44648 - fscache_stat(&fscache_n_acquires_no_cache);
44649 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44650 _leave(" = -ENOMEDIUM [no cache]");
44651 return -ENOMEDIUM;
44652 }
44653 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44654 object = cache->ops->alloc_object(cache, cookie);
44655 fscache_stat_d(&fscache_n_cop_alloc_object);
44656 if (IS_ERR(object)) {
44657 - fscache_stat(&fscache_n_object_no_alloc);
44658 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44659 ret = PTR_ERR(object);
44660 goto error;
44661 }
44662
44663 - fscache_stat(&fscache_n_object_alloc);
44664 + fscache_stat_unchecked(&fscache_n_object_alloc);
44665
44666 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44667
44668 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44669 struct fscache_object *object;
44670 struct hlist_node *_p;
44671
44672 - fscache_stat(&fscache_n_updates);
44673 + fscache_stat_unchecked(&fscache_n_updates);
44674
44675 if (!cookie) {
44676 - fscache_stat(&fscache_n_updates_null);
44677 + fscache_stat_unchecked(&fscache_n_updates_null);
44678 _leave(" [no cookie]");
44679 return;
44680 }
44681 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44682 struct fscache_object *object;
44683 unsigned long event;
44684
44685 - fscache_stat(&fscache_n_relinquishes);
44686 + fscache_stat_unchecked(&fscache_n_relinquishes);
44687 if (retire)
44688 - fscache_stat(&fscache_n_relinquishes_retire);
44689 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44690
44691 if (!cookie) {
44692 - fscache_stat(&fscache_n_relinquishes_null);
44693 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44694 _leave(" [no cookie]");
44695 return;
44696 }
44697 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44698
44699 /* wait for the cookie to finish being instantiated (or to fail) */
44700 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44701 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44702 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44703 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44704 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44705 }
44706 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44707 index f6aad48..88dcf26 100644
44708 --- a/fs/fscache/internal.h
44709 +++ b/fs/fscache/internal.h
44710 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44711 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44712 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44713
44714 -extern atomic_t fscache_n_op_pend;
44715 -extern atomic_t fscache_n_op_run;
44716 -extern atomic_t fscache_n_op_enqueue;
44717 -extern atomic_t fscache_n_op_deferred_release;
44718 -extern atomic_t fscache_n_op_release;
44719 -extern atomic_t fscache_n_op_gc;
44720 -extern atomic_t fscache_n_op_cancelled;
44721 -extern atomic_t fscache_n_op_rejected;
44722 +extern atomic_unchecked_t fscache_n_op_pend;
44723 +extern atomic_unchecked_t fscache_n_op_run;
44724 +extern atomic_unchecked_t fscache_n_op_enqueue;
44725 +extern atomic_unchecked_t fscache_n_op_deferred_release;
44726 +extern atomic_unchecked_t fscache_n_op_release;
44727 +extern atomic_unchecked_t fscache_n_op_gc;
44728 +extern atomic_unchecked_t fscache_n_op_cancelled;
44729 +extern atomic_unchecked_t fscache_n_op_rejected;
44730
44731 -extern atomic_t fscache_n_attr_changed;
44732 -extern atomic_t fscache_n_attr_changed_ok;
44733 -extern atomic_t fscache_n_attr_changed_nobufs;
44734 -extern atomic_t fscache_n_attr_changed_nomem;
44735 -extern atomic_t fscache_n_attr_changed_calls;
44736 +extern atomic_unchecked_t fscache_n_attr_changed;
44737 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
44738 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44739 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44740 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
44741
44742 -extern atomic_t fscache_n_allocs;
44743 -extern atomic_t fscache_n_allocs_ok;
44744 -extern atomic_t fscache_n_allocs_wait;
44745 -extern atomic_t fscache_n_allocs_nobufs;
44746 -extern atomic_t fscache_n_allocs_intr;
44747 -extern atomic_t fscache_n_allocs_object_dead;
44748 -extern atomic_t fscache_n_alloc_ops;
44749 -extern atomic_t fscache_n_alloc_op_waits;
44750 +extern atomic_unchecked_t fscache_n_allocs;
44751 +extern atomic_unchecked_t fscache_n_allocs_ok;
44752 +extern atomic_unchecked_t fscache_n_allocs_wait;
44753 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
44754 +extern atomic_unchecked_t fscache_n_allocs_intr;
44755 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
44756 +extern atomic_unchecked_t fscache_n_alloc_ops;
44757 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
44758
44759 -extern atomic_t fscache_n_retrievals;
44760 -extern atomic_t fscache_n_retrievals_ok;
44761 -extern atomic_t fscache_n_retrievals_wait;
44762 -extern atomic_t fscache_n_retrievals_nodata;
44763 -extern atomic_t fscache_n_retrievals_nobufs;
44764 -extern atomic_t fscache_n_retrievals_intr;
44765 -extern atomic_t fscache_n_retrievals_nomem;
44766 -extern atomic_t fscache_n_retrievals_object_dead;
44767 -extern atomic_t fscache_n_retrieval_ops;
44768 -extern atomic_t fscache_n_retrieval_op_waits;
44769 +extern atomic_unchecked_t fscache_n_retrievals;
44770 +extern atomic_unchecked_t fscache_n_retrievals_ok;
44771 +extern atomic_unchecked_t fscache_n_retrievals_wait;
44772 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
44773 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44774 +extern atomic_unchecked_t fscache_n_retrievals_intr;
44775 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
44776 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44777 +extern atomic_unchecked_t fscache_n_retrieval_ops;
44778 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44779
44780 -extern atomic_t fscache_n_stores;
44781 -extern atomic_t fscache_n_stores_ok;
44782 -extern atomic_t fscache_n_stores_again;
44783 -extern atomic_t fscache_n_stores_nobufs;
44784 -extern atomic_t fscache_n_stores_oom;
44785 -extern atomic_t fscache_n_store_ops;
44786 -extern atomic_t fscache_n_store_calls;
44787 -extern atomic_t fscache_n_store_pages;
44788 -extern atomic_t fscache_n_store_radix_deletes;
44789 -extern atomic_t fscache_n_store_pages_over_limit;
44790 +extern atomic_unchecked_t fscache_n_stores;
44791 +extern atomic_unchecked_t fscache_n_stores_ok;
44792 +extern atomic_unchecked_t fscache_n_stores_again;
44793 +extern atomic_unchecked_t fscache_n_stores_nobufs;
44794 +extern atomic_unchecked_t fscache_n_stores_oom;
44795 +extern atomic_unchecked_t fscache_n_store_ops;
44796 +extern atomic_unchecked_t fscache_n_store_calls;
44797 +extern atomic_unchecked_t fscache_n_store_pages;
44798 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
44799 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44800
44801 -extern atomic_t fscache_n_store_vmscan_not_storing;
44802 -extern atomic_t fscache_n_store_vmscan_gone;
44803 -extern atomic_t fscache_n_store_vmscan_busy;
44804 -extern atomic_t fscache_n_store_vmscan_cancelled;
44805 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44806 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44807 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44808 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44809
44810 -extern atomic_t fscache_n_marks;
44811 -extern atomic_t fscache_n_uncaches;
44812 +extern atomic_unchecked_t fscache_n_marks;
44813 +extern atomic_unchecked_t fscache_n_uncaches;
44814
44815 -extern atomic_t fscache_n_acquires;
44816 -extern atomic_t fscache_n_acquires_null;
44817 -extern atomic_t fscache_n_acquires_no_cache;
44818 -extern atomic_t fscache_n_acquires_ok;
44819 -extern atomic_t fscache_n_acquires_nobufs;
44820 -extern atomic_t fscache_n_acquires_oom;
44821 +extern atomic_unchecked_t fscache_n_acquires;
44822 +extern atomic_unchecked_t fscache_n_acquires_null;
44823 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
44824 +extern atomic_unchecked_t fscache_n_acquires_ok;
44825 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
44826 +extern atomic_unchecked_t fscache_n_acquires_oom;
44827
44828 -extern atomic_t fscache_n_updates;
44829 -extern atomic_t fscache_n_updates_null;
44830 -extern atomic_t fscache_n_updates_run;
44831 +extern atomic_unchecked_t fscache_n_updates;
44832 +extern atomic_unchecked_t fscache_n_updates_null;
44833 +extern atomic_unchecked_t fscache_n_updates_run;
44834
44835 -extern atomic_t fscache_n_relinquishes;
44836 -extern atomic_t fscache_n_relinquishes_null;
44837 -extern atomic_t fscache_n_relinquishes_waitcrt;
44838 -extern atomic_t fscache_n_relinquishes_retire;
44839 +extern atomic_unchecked_t fscache_n_relinquishes;
44840 +extern atomic_unchecked_t fscache_n_relinquishes_null;
44841 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44842 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
44843
44844 -extern atomic_t fscache_n_cookie_index;
44845 -extern atomic_t fscache_n_cookie_data;
44846 -extern atomic_t fscache_n_cookie_special;
44847 +extern atomic_unchecked_t fscache_n_cookie_index;
44848 +extern atomic_unchecked_t fscache_n_cookie_data;
44849 +extern atomic_unchecked_t fscache_n_cookie_special;
44850
44851 -extern atomic_t fscache_n_object_alloc;
44852 -extern atomic_t fscache_n_object_no_alloc;
44853 -extern atomic_t fscache_n_object_lookups;
44854 -extern atomic_t fscache_n_object_lookups_negative;
44855 -extern atomic_t fscache_n_object_lookups_positive;
44856 -extern atomic_t fscache_n_object_lookups_timed_out;
44857 -extern atomic_t fscache_n_object_created;
44858 -extern atomic_t fscache_n_object_avail;
44859 -extern atomic_t fscache_n_object_dead;
44860 +extern atomic_unchecked_t fscache_n_object_alloc;
44861 +extern atomic_unchecked_t fscache_n_object_no_alloc;
44862 +extern atomic_unchecked_t fscache_n_object_lookups;
44863 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
44864 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
44865 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44866 +extern atomic_unchecked_t fscache_n_object_created;
44867 +extern atomic_unchecked_t fscache_n_object_avail;
44868 +extern atomic_unchecked_t fscache_n_object_dead;
44869
44870 -extern atomic_t fscache_n_checkaux_none;
44871 -extern atomic_t fscache_n_checkaux_okay;
44872 -extern atomic_t fscache_n_checkaux_update;
44873 -extern atomic_t fscache_n_checkaux_obsolete;
44874 +extern atomic_unchecked_t fscache_n_checkaux_none;
44875 +extern atomic_unchecked_t fscache_n_checkaux_okay;
44876 +extern atomic_unchecked_t fscache_n_checkaux_update;
44877 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44878
44879 extern atomic_t fscache_n_cop_alloc_object;
44880 extern atomic_t fscache_n_cop_lookup_object;
44881 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44882 atomic_inc(stat);
44883 }
44884
44885 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44886 +{
44887 + atomic_inc_unchecked(stat);
44888 +}
44889 +
44890 static inline void fscache_stat_d(atomic_t *stat)
44891 {
44892 atomic_dec(stat);
44893 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44894
44895 #define __fscache_stat(stat) (NULL)
44896 #define fscache_stat(stat) do {} while (0)
44897 +#define fscache_stat_unchecked(stat) do {} while (0)
44898 #define fscache_stat_d(stat) do {} while (0)
44899 #endif
44900
44901 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44902 index b6b897c..0ffff9c 100644
44903 --- a/fs/fscache/object.c
44904 +++ b/fs/fscache/object.c
44905 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44906 /* update the object metadata on disk */
44907 case FSCACHE_OBJECT_UPDATING:
44908 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44909 - fscache_stat(&fscache_n_updates_run);
44910 + fscache_stat_unchecked(&fscache_n_updates_run);
44911 fscache_stat(&fscache_n_cop_update_object);
44912 object->cache->ops->update_object(object);
44913 fscache_stat_d(&fscache_n_cop_update_object);
44914 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44915 spin_lock(&object->lock);
44916 object->state = FSCACHE_OBJECT_DEAD;
44917 spin_unlock(&object->lock);
44918 - fscache_stat(&fscache_n_object_dead);
44919 + fscache_stat_unchecked(&fscache_n_object_dead);
44920 goto terminal_transit;
44921
44922 /* handle the parent cache of this object being withdrawn from
44923 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44924 spin_lock(&object->lock);
44925 object->state = FSCACHE_OBJECT_DEAD;
44926 spin_unlock(&object->lock);
44927 - fscache_stat(&fscache_n_object_dead);
44928 + fscache_stat_unchecked(&fscache_n_object_dead);
44929 goto terminal_transit;
44930
44931 /* complain about the object being woken up once it is
44932 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44933 parent->cookie->def->name, cookie->def->name,
44934 object->cache->tag->name);
44935
44936 - fscache_stat(&fscache_n_object_lookups);
44937 + fscache_stat_unchecked(&fscache_n_object_lookups);
44938 fscache_stat(&fscache_n_cop_lookup_object);
44939 ret = object->cache->ops->lookup_object(object);
44940 fscache_stat_d(&fscache_n_cop_lookup_object);
44941 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44942 if (ret == -ETIMEDOUT) {
44943 /* probably stuck behind another object, so move this one to
44944 * the back of the queue */
44945 - fscache_stat(&fscache_n_object_lookups_timed_out);
44946 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44947 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44948 }
44949
44950 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44951
44952 spin_lock(&object->lock);
44953 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44954 - fscache_stat(&fscache_n_object_lookups_negative);
44955 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44956
44957 /* transit here to allow write requests to begin stacking up
44958 * and read requests to begin returning ENODATA */
44959 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
44960 * result, in which case there may be data available */
44961 spin_lock(&object->lock);
44962 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44963 - fscache_stat(&fscache_n_object_lookups_positive);
44964 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
44965
44966 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
44967
44968 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
44969 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44970 } else {
44971 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
44972 - fscache_stat(&fscache_n_object_created);
44973 + fscache_stat_unchecked(&fscache_n_object_created);
44974
44975 object->state = FSCACHE_OBJECT_AVAILABLE;
44976 spin_unlock(&object->lock);
44977 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
44978 fscache_enqueue_dependents(object);
44979
44980 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
44981 - fscache_stat(&fscache_n_object_avail);
44982 + fscache_stat_unchecked(&fscache_n_object_avail);
44983
44984 _leave("");
44985 }
44986 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44987 enum fscache_checkaux result;
44988
44989 if (!object->cookie->def->check_aux) {
44990 - fscache_stat(&fscache_n_checkaux_none);
44991 + fscache_stat_unchecked(&fscache_n_checkaux_none);
44992 return FSCACHE_CHECKAUX_OKAY;
44993 }
44994
44995 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44996 switch (result) {
44997 /* entry okay as is */
44998 case FSCACHE_CHECKAUX_OKAY:
44999 - fscache_stat(&fscache_n_checkaux_okay);
45000 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
45001 break;
45002
45003 /* entry requires update */
45004 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45005 - fscache_stat(&fscache_n_checkaux_update);
45006 + fscache_stat_unchecked(&fscache_n_checkaux_update);
45007 break;
45008
45009 /* entry requires deletion */
45010 case FSCACHE_CHECKAUX_OBSOLETE:
45011 - fscache_stat(&fscache_n_checkaux_obsolete);
45012 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45013 break;
45014
45015 default:
45016 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45017 index 30afdfa..2256596 100644
45018 --- a/fs/fscache/operation.c
45019 +++ b/fs/fscache/operation.c
45020 @@ -17,7 +17,7 @@
45021 #include <linux/slab.h>
45022 #include "internal.h"
45023
45024 -atomic_t fscache_op_debug_id;
45025 +atomic_unchecked_t fscache_op_debug_id;
45026 EXPORT_SYMBOL(fscache_op_debug_id);
45027
45028 /**
45029 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45030 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45031 ASSERTCMP(atomic_read(&op->usage), >, 0);
45032
45033 - fscache_stat(&fscache_n_op_enqueue);
45034 + fscache_stat_unchecked(&fscache_n_op_enqueue);
45035 switch (op->flags & FSCACHE_OP_TYPE) {
45036 case FSCACHE_OP_ASYNC:
45037 _debug("queue async");
45038 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45039 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45040 if (op->processor)
45041 fscache_enqueue_operation(op);
45042 - fscache_stat(&fscache_n_op_run);
45043 + fscache_stat_unchecked(&fscache_n_op_run);
45044 }
45045
45046 /*
45047 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45048 if (object->n_ops > 1) {
45049 atomic_inc(&op->usage);
45050 list_add_tail(&op->pend_link, &object->pending_ops);
45051 - fscache_stat(&fscache_n_op_pend);
45052 + fscache_stat_unchecked(&fscache_n_op_pend);
45053 } else if (!list_empty(&object->pending_ops)) {
45054 atomic_inc(&op->usage);
45055 list_add_tail(&op->pend_link, &object->pending_ops);
45056 - fscache_stat(&fscache_n_op_pend);
45057 + fscache_stat_unchecked(&fscache_n_op_pend);
45058 fscache_start_operations(object);
45059 } else {
45060 ASSERTCMP(object->n_in_progress, ==, 0);
45061 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45062 object->n_exclusive++; /* reads and writes must wait */
45063 atomic_inc(&op->usage);
45064 list_add_tail(&op->pend_link, &object->pending_ops);
45065 - fscache_stat(&fscache_n_op_pend);
45066 + fscache_stat_unchecked(&fscache_n_op_pend);
45067 ret = 0;
45068 } else {
45069 /* not allowed to submit ops in any other state */
45070 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45071 if (object->n_exclusive > 0) {
45072 atomic_inc(&op->usage);
45073 list_add_tail(&op->pend_link, &object->pending_ops);
45074 - fscache_stat(&fscache_n_op_pend);
45075 + fscache_stat_unchecked(&fscache_n_op_pend);
45076 } else if (!list_empty(&object->pending_ops)) {
45077 atomic_inc(&op->usage);
45078 list_add_tail(&op->pend_link, &object->pending_ops);
45079 - fscache_stat(&fscache_n_op_pend);
45080 + fscache_stat_unchecked(&fscache_n_op_pend);
45081 fscache_start_operations(object);
45082 } else {
45083 ASSERTCMP(object->n_exclusive, ==, 0);
45084 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45085 object->n_ops++;
45086 atomic_inc(&op->usage);
45087 list_add_tail(&op->pend_link, &object->pending_ops);
45088 - fscache_stat(&fscache_n_op_pend);
45089 + fscache_stat_unchecked(&fscache_n_op_pend);
45090 ret = 0;
45091 } else if (object->state == FSCACHE_OBJECT_DYING ||
45092 object->state == FSCACHE_OBJECT_LC_DYING ||
45093 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45094 - fscache_stat(&fscache_n_op_rejected);
45095 + fscache_stat_unchecked(&fscache_n_op_rejected);
45096 ret = -ENOBUFS;
45097 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45098 fscache_report_unexpected_submission(object, op, ostate);
45099 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45100
45101 ret = -EBUSY;
45102 if (!list_empty(&op->pend_link)) {
45103 - fscache_stat(&fscache_n_op_cancelled);
45104 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45105 list_del_init(&op->pend_link);
45106 object->n_ops--;
45107 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45108 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45109 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45110 BUG();
45111
45112 - fscache_stat(&fscache_n_op_release);
45113 + fscache_stat_unchecked(&fscache_n_op_release);
45114
45115 if (op->release) {
45116 op->release(op);
45117 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45118 * lock, and defer it otherwise */
45119 if (!spin_trylock(&object->lock)) {
45120 _debug("defer put");
45121 - fscache_stat(&fscache_n_op_deferred_release);
45122 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45123
45124 cache = object->cache;
45125 spin_lock(&cache->op_gc_list_lock);
45126 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45127
45128 _debug("GC DEFERRED REL OBJ%x OP%x",
45129 object->debug_id, op->debug_id);
45130 - fscache_stat(&fscache_n_op_gc);
45131 + fscache_stat_unchecked(&fscache_n_op_gc);
45132
45133 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45134
45135 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45136 index 3f7a59b..cf196cc 100644
45137 --- a/fs/fscache/page.c
45138 +++ b/fs/fscache/page.c
45139 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45140 val = radix_tree_lookup(&cookie->stores, page->index);
45141 if (!val) {
45142 rcu_read_unlock();
45143 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45144 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45145 __fscache_uncache_page(cookie, page);
45146 return true;
45147 }
45148 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45149 spin_unlock(&cookie->stores_lock);
45150
45151 if (xpage) {
45152 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45153 - fscache_stat(&fscache_n_store_radix_deletes);
45154 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45155 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45156 ASSERTCMP(xpage, ==, page);
45157 } else {
45158 - fscache_stat(&fscache_n_store_vmscan_gone);
45159 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45160 }
45161
45162 wake_up_bit(&cookie->flags, 0);
45163 @@ -107,7 +107,7 @@ page_busy:
45164 /* we might want to wait here, but that could deadlock the allocator as
45165 * the work threads writing to the cache may all end up sleeping
45166 * on memory allocation */
45167 - fscache_stat(&fscache_n_store_vmscan_busy);
45168 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45169 return false;
45170 }
45171 EXPORT_SYMBOL(__fscache_maybe_release_page);
45172 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45173 FSCACHE_COOKIE_STORING_TAG);
45174 if (!radix_tree_tag_get(&cookie->stores, page->index,
45175 FSCACHE_COOKIE_PENDING_TAG)) {
45176 - fscache_stat(&fscache_n_store_radix_deletes);
45177 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45178 xpage = radix_tree_delete(&cookie->stores, page->index);
45179 }
45180 spin_unlock(&cookie->stores_lock);
45181 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45182
45183 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45184
45185 - fscache_stat(&fscache_n_attr_changed_calls);
45186 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45187
45188 if (fscache_object_is_active(object)) {
45189 fscache_stat(&fscache_n_cop_attr_changed);
45190 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45191
45192 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45193
45194 - fscache_stat(&fscache_n_attr_changed);
45195 + fscache_stat_unchecked(&fscache_n_attr_changed);
45196
45197 op = kzalloc(sizeof(*op), GFP_KERNEL);
45198 if (!op) {
45199 - fscache_stat(&fscache_n_attr_changed_nomem);
45200 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45201 _leave(" = -ENOMEM");
45202 return -ENOMEM;
45203 }
45204 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45205 if (fscache_submit_exclusive_op(object, op) < 0)
45206 goto nobufs;
45207 spin_unlock(&cookie->lock);
45208 - fscache_stat(&fscache_n_attr_changed_ok);
45209 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45210 fscache_put_operation(op);
45211 _leave(" = 0");
45212 return 0;
45213 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45214 nobufs:
45215 spin_unlock(&cookie->lock);
45216 kfree(op);
45217 - fscache_stat(&fscache_n_attr_changed_nobufs);
45218 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45219 _leave(" = %d", -ENOBUFS);
45220 return -ENOBUFS;
45221 }
45222 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45223 /* allocate a retrieval operation and attempt to submit it */
45224 op = kzalloc(sizeof(*op), GFP_NOIO);
45225 if (!op) {
45226 - fscache_stat(&fscache_n_retrievals_nomem);
45227 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45228 return NULL;
45229 }
45230
45231 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45232 return 0;
45233 }
45234
45235 - fscache_stat(&fscache_n_retrievals_wait);
45236 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45237
45238 jif = jiffies;
45239 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45240 fscache_wait_bit_interruptible,
45241 TASK_INTERRUPTIBLE) != 0) {
45242 - fscache_stat(&fscache_n_retrievals_intr);
45243 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45244 _leave(" = -ERESTARTSYS");
45245 return -ERESTARTSYS;
45246 }
45247 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45248 */
45249 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45250 struct fscache_retrieval *op,
45251 - atomic_t *stat_op_waits,
45252 - atomic_t *stat_object_dead)
45253 + atomic_unchecked_t *stat_op_waits,
45254 + atomic_unchecked_t *stat_object_dead)
45255 {
45256 int ret;
45257
45258 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45259 goto check_if_dead;
45260
45261 _debug(">>> WT");
45262 - fscache_stat(stat_op_waits);
45263 + fscache_stat_unchecked(stat_op_waits);
45264 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45265 fscache_wait_bit_interruptible,
45266 TASK_INTERRUPTIBLE) < 0) {
45267 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45268
45269 check_if_dead:
45270 if (unlikely(fscache_object_is_dead(object))) {
45271 - fscache_stat(stat_object_dead);
45272 + fscache_stat_unchecked(stat_object_dead);
45273 return -ENOBUFS;
45274 }
45275 return 0;
45276 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45277
45278 _enter("%p,%p,,,", cookie, page);
45279
45280 - fscache_stat(&fscache_n_retrievals);
45281 + fscache_stat_unchecked(&fscache_n_retrievals);
45282
45283 if (hlist_empty(&cookie->backing_objects))
45284 goto nobufs;
45285 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45286 goto nobufs_unlock;
45287 spin_unlock(&cookie->lock);
45288
45289 - fscache_stat(&fscache_n_retrieval_ops);
45290 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45291
45292 /* pin the netfs read context in case we need to do the actual netfs
45293 * read because we've encountered a cache read failure */
45294 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45295
45296 error:
45297 if (ret == -ENOMEM)
45298 - fscache_stat(&fscache_n_retrievals_nomem);
45299 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45300 else if (ret == -ERESTARTSYS)
45301 - fscache_stat(&fscache_n_retrievals_intr);
45302 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45303 else if (ret == -ENODATA)
45304 - fscache_stat(&fscache_n_retrievals_nodata);
45305 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45306 else if (ret < 0)
45307 - fscache_stat(&fscache_n_retrievals_nobufs);
45308 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45309 else
45310 - fscache_stat(&fscache_n_retrievals_ok);
45311 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45312
45313 fscache_put_retrieval(op);
45314 _leave(" = %d", ret);
45315 @@ -429,7 +429,7 @@ nobufs_unlock:
45316 spin_unlock(&cookie->lock);
45317 kfree(op);
45318 nobufs:
45319 - fscache_stat(&fscache_n_retrievals_nobufs);
45320 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45321 _leave(" = -ENOBUFS");
45322 return -ENOBUFS;
45323 }
45324 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45325
45326 _enter("%p,,%d,,,", cookie, *nr_pages);
45327
45328 - fscache_stat(&fscache_n_retrievals);
45329 + fscache_stat_unchecked(&fscache_n_retrievals);
45330
45331 if (hlist_empty(&cookie->backing_objects))
45332 goto nobufs;
45333 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45334 goto nobufs_unlock;
45335 spin_unlock(&cookie->lock);
45336
45337 - fscache_stat(&fscache_n_retrieval_ops);
45338 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45339
45340 /* pin the netfs read context in case we need to do the actual netfs
45341 * read because we've encountered a cache read failure */
45342 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45343
45344 error:
45345 if (ret == -ENOMEM)
45346 - fscache_stat(&fscache_n_retrievals_nomem);
45347 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45348 else if (ret == -ERESTARTSYS)
45349 - fscache_stat(&fscache_n_retrievals_intr);
45350 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45351 else if (ret == -ENODATA)
45352 - fscache_stat(&fscache_n_retrievals_nodata);
45353 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45354 else if (ret < 0)
45355 - fscache_stat(&fscache_n_retrievals_nobufs);
45356 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45357 else
45358 - fscache_stat(&fscache_n_retrievals_ok);
45359 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45360
45361 fscache_put_retrieval(op);
45362 _leave(" = %d", ret);
45363 @@ -545,7 +545,7 @@ nobufs_unlock:
45364 spin_unlock(&cookie->lock);
45365 kfree(op);
45366 nobufs:
45367 - fscache_stat(&fscache_n_retrievals_nobufs);
45368 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45369 _leave(" = -ENOBUFS");
45370 return -ENOBUFS;
45371 }
45372 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45373
45374 _enter("%p,%p,,,", cookie, page);
45375
45376 - fscache_stat(&fscache_n_allocs);
45377 + fscache_stat_unchecked(&fscache_n_allocs);
45378
45379 if (hlist_empty(&cookie->backing_objects))
45380 goto nobufs;
45381 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45382 goto nobufs_unlock;
45383 spin_unlock(&cookie->lock);
45384
45385 - fscache_stat(&fscache_n_alloc_ops);
45386 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45387
45388 ret = fscache_wait_for_retrieval_activation(
45389 object, op,
45390 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45391
45392 error:
45393 if (ret == -ERESTARTSYS)
45394 - fscache_stat(&fscache_n_allocs_intr);
45395 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45396 else if (ret < 0)
45397 - fscache_stat(&fscache_n_allocs_nobufs);
45398 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45399 else
45400 - fscache_stat(&fscache_n_allocs_ok);
45401 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45402
45403 fscache_put_retrieval(op);
45404 _leave(" = %d", ret);
45405 @@ -625,7 +625,7 @@ nobufs_unlock:
45406 spin_unlock(&cookie->lock);
45407 kfree(op);
45408 nobufs:
45409 - fscache_stat(&fscache_n_allocs_nobufs);
45410 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45411 _leave(" = -ENOBUFS");
45412 return -ENOBUFS;
45413 }
45414 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45415
45416 spin_lock(&cookie->stores_lock);
45417
45418 - fscache_stat(&fscache_n_store_calls);
45419 + fscache_stat_unchecked(&fscache_n_store_calls);
45420
45421 /* find a page to store */
45422 page = NULL;
45423 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45424 page = results[0];
45425 _debug("gang %d [%lx]", n, page->index);
45426 if (page->index > op->store_limit) {
45427 - fscache_stat(&fscache_n_store_pages_over_limit);
45428 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45429 goto superseded;
45430 }
45431
45432 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45433 spin_unlock(&cookie->stores_lock);
45434 spin_unlock(&object->lock);
45435
45436 - fscache_stat(&fscache_n_store_pages);
45437 + fscache_stat_unchecked(&fscache_n_store_pages);
45438 fscache_stat(&fscache_n_cop_write_page);
45439 ret = object->cache->ops->write_page(op, page);
45440 fscache_stat_d(&fscache_n_cop_write_page);
45441 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45442 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45443 ASSERT(PageFsCache(page));
45444
45445 - fscache_stat(&fscache_n_stores);
45446 + fscache_stat_unchecked(&fscache_n_stores);
45447
45448 op = kzalloc(sizeof(*op), GFP_NOIO);
45449 if (!op)
45450 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45451 spin_unlock(&cookie->stores_lock);
45452 spin_unlock(&object->lock);
45453
45454 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45455 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45456 op->store_limit = object->store_limit;
45457
45458 if (fscache_submit_op(object, &op->op) < 0)
45459 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45460
45461 spin_unlock(&cookie->lock);
45462 radix_tree_preload_end();
45463 - fscache_stat(&fscache_n_store_ops);
45464 - fscache_stat(&fscache_n_stores_ok);
45465 + fscache_stat_unchecked(&fscache_n_store_ops);
45466 + fscache_stat_unchecked(&fscache_n_stores_ok);
45467
45468 /* the work queue now carries its own ref on the object */
45469 fscache_put_operation(&op->op);
45470 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45471 return 0;
45472
45473 already_queued:
45474 - fscache_stat(&fscache_n_stores_again);
45475 + fscache_stat_unchecked(&fscache_n_stores_again);
45476 already_pending:
45477 spin_unlock(&cookie->stores_lock);
45478 spin_unlock(&object->lock);
45479 spin_unlock(&cookie->lock);
45480 radix_tree_preload_end();
45481 kfree(op);
45482 - fscache_stat(&fscache_n_stores_ok);
45483 + fscache_stat_unchecked(&fscache_n_stores_ok);
45484 _leave(" = 0");
45485 return 0;
45486
45487 @@ -851,14 +851,14 @@ nobufs:
45488 spin_unlock(&cookie->lock);
45489 radix_tree_preload_end();
45490 kfree(op);
45491 - fscache_stat(&fscache_n_stores_nobufs);
45492 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45493 _leave(" = -ENOBUFS");
45494 return -ENOBUFS;
45495
45496 nomem_free:
45497 kfree(op);
45498 nomem:
45499 - fscache_stat(&fscache_n_stores_oom);
45500 + fscache_stat_unchecked(&fscache_n_stores_oom);
45501 _leave(" = -ENOMEM");
45502 return -ENOMEM;
45503 }
45504 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45505 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45506 ASSERTCMP(page, !=, NULL);
45507
45508 - fscache_stat(&fscache_n_uncaches);
45509 + fscache_stat_unchecked(&fscache_n_uncaches);
45510
45511 /* cache withdrawal may beat us to it */
45512 if (!PageFsCache(page))
45513 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45514 unsigned long loop;
45515
45516 #ifdef CONFIG_FSCACHE_STATS
45517 - atomic_add(pagevec->nr, &fscache_n_marks);
45518 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45519 #endif
45520
45521 for (loop = 0; loop < pagevec->nr; loop++) {
45522 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45523 index 4765190..2a067f2 100644
45524 --- a/fs/fscache/stats.c
45525 +++ b/fs/fscache/stats.c
45526 @@ -18,95 +18,95 @@
45527 /*
45528 * operation counters
45529 */
45530 -atomic_t fscache_n_op_pend;
45531 -atomic_t fscache_n_op_run;
45532 -atomic_t fscache_n_op_enqueue;
45533 -atomic_t fscache_n_op_requeue;
45534 -atomic_t fscache_n_op_deferred_release;
45535 -atomic_t fscache_n_op_release;
45536 -atomic_t fscache_n_op_gc;
45537 -atomic_t fscache_n_op_cancelled;
45538 -atomic_t fscache_n_op_rejected;
45539 +atomic_unchecked_t fscache_n_op_pend;
45540 +atomic_unchecked_t fscache_n_op_run;
45541 +atomic_unchecked_t fscache_n_op_enqueue;
45542 +atomic_unchecked_t fscache_n_op_requeue;
45543 +atomic_unchecked_t fscache_n_op_deferred_release;
45544 +atomic_unchecked_t fscache_n_op_release;
45545 +atomic_unchecked_t fscache_n_op_gc;
45546 +atomic_unchecked_t fscache_n_op_cancelled;
45547 +atomic_unchecked_t fscache_n_op_rejected;
45548
45549 -atomic_t fscache_n_attr_changed;
45550 -atomic_t fscache_n_attr_changed_ok;
45551 -atomic_t fscache_n_attr_changed_nobufs;
45552 -atomic_t fscache_n_attr_changed_nomem;
45553 -atomic_t fscache_n_attr_changed_calls;
45554 +atomic_unchecked_t fscache_n_attr_changed;
45555 +atomic_unchecked_t fscache_n_attr_changed_ok;
45556 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45557 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45558 +atomic_unchecked_t fscache_n_attr_changed_calls;
45559
45560 -atomic_t fscache_n_allocs;
45561 -atomic_t fscache_n_allocs_ok;
45562 -atomic_t fscache_n_allocs_wait;
45563 -atomic_t fscache_n_allocs_nobufs;
45564 -atomic_t fscache_n_allocs_intr;
45565 -atomic_t fscache_n_allocs_object_dead;
45566 -atomic_t fscache_n_alloc_ops;
45567 -atomic_t fscache_n_alloc_op_waits;
45568 +atomic_unchecked_t fscache_n_allocs;
45569 +atomic_unchecked_t fscache_n_allocs_ok;
45570 +atomic_unchecked_t fscache_n_allocs_wait;
45571 +atomic_unchecked_t fscache_n_allocs_nobufs;
45572 +atomic_unchecked_t fscache_n_allocs_intr;
45573 +atomic_unchecked_t fscache_n_allocs_object_dead;
45574 +atomic_unchecked_t fscache_n_alloc_ops;
45575 +atomic_unchecked_t fscache_n_alloc_op_waits;
45576
45577 -atomic_t fscache_n_retrievals;
45578 -atomic_t fscache_n_retrievals_ok;
45579 -atomic_t fscache_n_retrievals_wait;
45580 -atomic_t fscache_n_retrievals_nodata;
45581 -atomic_t fscache_n_retrievals_nobufs;
45582 -atomic_t fscache_n_retrievals_intr;
45583 -atomic_t fscache_n_retrievals_nomem;
45584 -atomic_t fscache_n_retrievals_object_dead;
45585 -atomic_t fscache_n_retrieval_ops;
45586 -atomic_t fscache_n_retrieval_op_waits;
45587 +atomic_unchecked_t fscache_n_retrievals;
45588 +atomic_unchecked_t fscache_n_retrievals_ok;
45589 +atomic_unchecked_t fscache_n_retrievals_wait;
45590 +atomic_unchecked_t fscache_n_retrievals_nodata;
45591 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45592 +atomic_unchecked_t fscache_n_retrievals_intr;
45593 +atomic_unchecked_t fscache_n_retrievals_nomem;
45594 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45595 +atomic_unchecked_t fscache_n_retrieval_ops;
45596 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45597
45598 -atomic_t fscache_n_stores;
45599 -atomic_t fscache_n_stores_ok;
45600 -atomic_t fscache_n_stores_again;
45601 -atomic_t fscache_n_stores_nobufs;
45602 -atomic_t fscache_n_stores_oom;
45603 -atomic_t fscache_n_store_ops;
45604 -atomic_t fscache_n_store_calls;
45605 -atomic_t fscache_n_store_pages;
45606 -atomic_t fscache_n_store_radix_deletes;
45607 -atomic_t fscache_n_store_pages_over_limit;
45608 +atomic_unchecked_t fscache_n_stores;
45609 +atomic_unchecked_t fscache_n_stores_ok;
45610 +atomic_unchecked_t fscache_n_stores_again;
45611 +atomic_unchecked_t fscache_n_stores_nobufs;
45612 +atomic_unchecked_t fscache_n_stores_oom;
45613 +atomic_unchecked_t fscache_n_store_ops;
45614 +atomic_unchecked_t fscache_n_store_calls;
45615 +atomic_unchecked_t fscache_n_store_pages;
45616 +atomic_unchecked_t fscache_n_store_radix_deletes;
45617 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45618
45619 -atomic_t fscache_n_store_vmscan_not_storing;
45620 -atomic_t fscache_n_store_vmscan_gone;
45621 -atomic_t fscache_n_store_vmscan_busy;
45622 -atomic_t fscache_n_store_vmscan_cancelled;
45623 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45624 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45625 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45626 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45627
45628 -atomic_t fscache_n_marks;
45629 -atomic_t fscache_n_uncaches;
45630 +atomic_unchecked_t fscache_n_marks;
45631 +atomic_unchecked_t fscache_n_uncaches;
45632
45633 -atomic_t fscache_n_acquires;
45634 -atomic_t fscache_n_acquires_null;
45635 -atomic_t fscache_n_acquires_no_cache;
45636 -atomic_t fscache_n_acquires_ok;
45637 -atomic_t fscache_n_acquires_nobufs;
45638 -atomic_t fscache_n_acquires_oom;
45639 +atomic_unchecked_t fscache_n_acquires;
45640 +atomic_unchecked_t fscache_n_acquires_null;
45641 +atomic_unchecked_t fscache_n_acquires_no_cache;
45642 +atomic_unchecked_t fscache_n_acquires_ok;
45643 +atomic_unchecked_t fscache_n_acquires_nobufs;
45644 +atomic_unchecked_t fscache_n_acquires_oom;
45645
45646 -atomic_t fscache_n_updates;
45647 -atomic_t fscache_n_updates_null;
45648 -atomic_t fscache_n_updates_run;
45649 +atomic_unchecked_t fscache_n_updates;
45650 +atomic_unchecked_t fscache_n_updates_null;
45651 +atomic_unchecked_t fscache_n_updates_run;
45652
45653 -atomic_t fscache_n_relinquishes;
45654 -atomic_t fscache_n_relinquishes_null;
45655 -atomic_t fscache_n_relinquishes_waitcrt;
45656 -atomic_t fscache_n_relinquishes_retire;
45657 +atomic_unchecked_t fscache_n_relinquishes;
45658 +atomic_unchecked_t fscache_n_relinquishes_null;
45659 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45660 +atomic_unchecked_t fscache_n_relinquishes_retire;
45661
45662 -atomic_t fscache_n_cookie_index;
45663 -atomic_t fscache_n_cookie_data;
45664 -atomic_t fscache_n_cookie_special;
45665 +atomic_unchecked_t fscache_n_cookie_index;
45666 +atomic_unchecked_t fscache_n_cookie_data;
45667 +atomic_unchecked_t fscache_n_cookie_special;
45668
45669 -atomic_t fscache_n_object_alloc;
45670 -atomic_t fscache_n_object_no_alloc;
45671 -atomic_t fscache_n_object_lookups;
45672 -atomic_t fscache_n_object_lookups_negative;
45673 -atomic_t fscache_n_object_lookups_positive;
45674 -atomic_t fscache_n_object_lookups_timed_out;
45675 -atomic_t fscache_n_object_created;
45676 -atomic_t fscache_n_object_avail;
45677 -atomic_t fscache_n_object_dead;
45678 +atomic_unchecked_t fscache_n_object_alloc;
45679 +atomic_unchecked_t fscache_n_object_no_alloc;
45680 +atomic_unchecked_t fscache_n_object_lookups;
45681 +atomic_unchecked_t fscache_n_object_lookups_negative;
45682 +atomic_unchecked_t fscache_n_object_lookups_positive;
45683 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45684 +atomic_unchecked_t fscache_n_object_created;
45685 +atomic_unchecked_t fscache_n_object_avail;
45686 +atomic_unchecked_t fscache_n_object_dead;
45687
45688 -atomic_t fscache_n_checkaux_none;
45689 -atomic_t fscache_n_checkaux_okay;
45690 -atomic_t fscache_n_checkaux_update;
45691 -atomic_t fscache_n_checkaux_obsolete;
45692 +atomic_unchecked_t fscache_n_checkaux_none;
45693 +atomic_unchecked_t fscache_n_checkaux_okay;
45694 +atomic_unchecked_t fscache_n_checkaux_update;
45695 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45696
45697 atomic_t fscache_n_cop_alloc_object;
45698 atomic_t fscache_n_cop_lookup_object;
45699 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45700 seq_puts(m, "FS-Cache statistics\n");
45701
45702 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45703 - atomic_read(&fscache_n_cookie_index),
45704 - atomic_read(&fscache_n_cookie_data),
45705 - atomic_read(&fscache_n_cookie_special));
45706 + atomic_read_unchecked(&fscache_n_cookie_index),
45707 + atomic_read_unchecked(&fscache_n_cookie_data),
45708 + atomic_read_unchecked(&fscache_n_cookie_special));
45709
45710 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45711 - atomic_read(&fscache_n_object_alloc),
45712 - atomic_read(&fscache_n_object_no_alloc),
45713 - atomic_read(&fscache_n_object_avail),
45714 - atomic_read(&fscache_n_object_dead));
45715 + atomic_read_unchecked(&fscache_n_object_alloc),
45716 + atomic_read_unchecked(&fscache_n_object_no_alloc),
45717 + atomic_read_unchecked(&fscache_n_object_avail),
45718 + atomic_read_unchecked(&fscache_n_object_dead));
45719 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45720 - atomic_read(&fscache_n_checkaux_none),
45721 - atomic_read(&fscache_n_checkaux_okay),
45722 - atomic_read(&fscache_n_checkaux_update),
45723 - atomic_read(&fscache_n_checkaux_obsolete));
45724 + atomic_read_unchecked(&fscache_n_checkaux_none),
45725 + atomic_read_unchecked(&fscache_n_checkaux_okay),
45726 + atomic_read_unchecked(&fscache_n_checkaux_update),
45727 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45728
45729 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45730 - atomic_read(&fscache_n_marks),
45731 - atomic_read(&fscache_n_uncaches));
45732 + atomic_read_unchecked(&fscache_n_marks),
45733 + atomic_read_unchecked(&fscache_n_uncaches));
45734
45735 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45736 " oom=%u\n",
45737 - atomic_read(&fscache_n_acquires),
45738 - atomic_read(&fscache_n_acquires_null),
45739 - atomic_read(&fscache_n_acquires_no_cache),
45740 - atomic_read(&fscache_n_acquires_ok),
45741 - atomic_read(&fscache_n_acquires_nobufs),
45742 - atomic_read(&fscache_n_acquires_oom));
45743 + atomic_read_unchecked(&fscache_n_acquires),
45744 + atomic_read_unchecked(&fscache_n_acquires_null),
45745 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
45746 + atomic_read_unchecked(&fscache_n_acquires_ok),
45747 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
45748 + atomic_read_unchecked(&fscache_n_acquires_oom));
45749
45750 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45751 - atomic_read(&fscache_n_object_lookups),
45752 - atomic_read(&fscache_n_object_lookups_negative),
45753 - atomic_read(&fscache_n_object_lookups_positive),
45754 - atomic_read(&fscache_n_object_created),
45755 - atomic_read(&fscache_n_object_lookups_timed_out));
45756 + atomic_read_unchecked(&fscache_n_object_lookups),
45757 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
45758 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
45759 + atomic_read_unchecked(&fscache_n_object_created),
45760 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45761
45762 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45763 - atomic_read(&fscache_n_updates),
45764 - atomic_read(&fscache_n_updates_null),
45765 - atomic_read(&fscache_n_updates_run));
45766 + atomic_read_unchecked(&fscache_n_updates),
45767 + atomic_read_unchecked(&fscache_n_updates_null),
45768 + atomic_read_unchecked(&fscache_n_updates_run));
45769
45770 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45771 - atomic_read(&fscache_n_relinquishes),
45772 - atomic_read(&fscache_n_relinquishes_null),
45773 - atomic_read(&fscache_n_relinquishes_waitcrt),
45774 - atomic_read(&fscache_n_relinquishes_retire));
45775 + atomic_read_unchecked(&fscache_n_relinquishes),
45776 + atomic_read_unchecked(&fscache_n_relinquishes_null),
45777 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45778 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
45779
45780 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45781 - atomic_read(&fscache_n_attr_changed),
45782 - atomic_read(&fscache_n_attr_changed_ok),
45783 - atomic_read(&fscache_n_attr_changed_nobufs),
45784 - atomic_read(&fscache_n_attr_changed_nomem),
45785 - atomic_read(&fscache_n_attr_changed_calls));
45786 + atomic_read_unchecked(&fscache_n_attr_changed),
45787 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
45788 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45789 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45790 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
45791
45792 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45793 - atomic_read(&fscache_n_allocs),
45794 - atomic_read(&fscache_n_allocs_ok),
45795 - atomic_read(&fscache_n_allocs_wait),
45796 - atomic_read(&fscache_n_allocs_nobufs),
45797 - atomic_read(&fscache_n_allocs_intr));
45798 + atomic_read_unchecked(&fscache_n_allocs),
45799 + atomic_read_unchecked(&fscache_n_allocs_ok),
45800 + atomic_read_unchecked(&fscache_n_allocs_wait),
45801 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
45802 + atomic_read_unchecked(&fscache_n_allocs_intr));
45803 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45804 - atomic_read(&fscache_n_alloc_ops),
45805 - atomic_read(&fscache_n_alloc_op_waits),
45806 - atomic_read(&fscache_n_allocs_object_dead));
45807 + atomic_read_unchecked(&fscache_n_alloc_ops),
45808 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
45809 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
45810
45811 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45812 " int=%u oom=%u\n",
45813 - atomic_read(&fscache_n_retrievals),
45814 - atomic_read(&fscache_n_retrievals_ok),
45815 - atomic_read(&fscache_n_retrievals_wait),
45816 - atomic_read(&fscache_n_retrievals_nodata),
45817 - atomic_read(&fscache_n_retrievals_nobufs),
45818 - atomic_read(&fscache_n_retrievals_intr),
45819 - atomic_read(&fscache_n_retrievals_nomem));
45820 + atomic_read_unchecked(&fscache_n_retrievals),
45821 + atomic_read_unchecked(&fscache_n_retrievals_ok),
45822 + atomic_read_unchecked(&fscache_n_retrievals_wait),
45823 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
45824 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45825 + atomic_read_unchecked(&fscache_n_retrievals_intr),
45826 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
45827 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45828 - atomic_read(&fscache_n_retrieval_ops),
45829 - atomic_read(&fscache_n_retrieval_op_waits),
45830 - atomic_read(&fscache_n_retrievals_object_dead));
45831 + atomic_read_unchecked(&fscache_n_retrieval_ops),
45832 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45833 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45834
45835 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45836 - atomic_read(&fscache_n_stores),
45837 - atomic_read(&fscache_n_stores_ok),
45838 - atomic_read(&fscache_n_stores_again),
45839 - atomic_read(&fscache_n_stores_nobufs),
45840 - atomic_read(&fscache_n_stores_oom));
45841 + atomic_read_unchecked(&fscache_n_stores),
45842 + atomic_read_unchecked(&fscache_n_stores_ok),
45843 + atomic_read_unchecked(&fscache_n_stores_again),
45844 + atomic_read_unchecked(&fscache_n_stores_nobufs),
45845 + atomic_read_unchecked(&fscache_n_stores_oom));
45846 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45847 - atomic_read(&fscache_n_store_ops),
45848 - atomic_read(&fscache_n_store_calls),
45849 - atomic_read(&fscache_n_store_pages),
45850 - atomic_read(&fscache_n_store_radix_deletes),
45851 - atomic_read(&fscache_n_store_pages_over_limit));
45852 + atomic_read_unchecked(&fscache_n_store_ops),
45853 + atomic_read_unchecked(&fscache_n_store_calls),
45854 + atomic_read_unchecked(&fscache_n_store_pages),
45855 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
45856 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45857
45858 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45859 - atomic_read(&fscache_n_store_vmscan_not_storing),
45860 - atomic_read(&fscache_n_store_vmscan_gone),
45861 - atomic_read(&fscache_n_store_vmscan_busy),
45862 - atomic_read(&fscache_n_store_vmscan_cancelled));
45863 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45864 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45865 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45866 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45867
45868 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45869 - atomic_read(&fscache_n_op_pend),
45870 - atomic_read(&fscache_n_op_run),
45871 - atomic_read(&fscache_n_op_enqueue),
45872 - atomic_read(&fscache_n_op_cancelled),
45873 - atomic_read(&fscache_n_op_rejected));
45874 + atomic_read_unchecked(&fscache_n_op_pend),
45875 + atomic_read_unchecked(&fscache_n_op_run),
45876 + atomic_read_unchecked(&fscache_n_op_enqueue),
45877 + atomic_read_unchecked(&fscache_n_op_cancelled),
45878 + atomic_read_unchecked(&fscache_n_op_rejected));
45879 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45880 - atomic_read(&fscache_n_op_deferred_release),
45881 - atomic_read(&fscache_n_op_release),
45882 - atomic_read(&fscache_n_op_gc));
45883 + atomic_read_unchecked(&fscache_n_op_deferred_release),
45884 + atomic_read_unchecked(&fscache_n_op_release),
45885 + atomic_read_unchecked(&fscache_n_op_gc));
45886
45887 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45888 atomic_read(&fscache_n_cop_alloc_object),
45889 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45890 index 3426521..3b75162 100644
45891 --- a/fs/fuse/cuse.c
45892 +++ b/fs/fuse/cuse.c
45893 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
45894 INIT_LIST_HEAD(&cuse_conntbl[i]);
45895
45896 /* inherit and extend fuse_dev_operations */
45897 - cuse_channel_fops = fuse_dev_operations;
45898 - cuse_channel_fops.owner = THIS_MODULE;
45899 - cuse_channel_fops.open = cuse_channel_open;
45900 - cuse_channel_fops.release = cuse_channel_release;
45901 + pax_open_kernel();
45902 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45903 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45904 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
45905 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
45906 + pax_close_kernel();
45907
45908 cuse_class = class_create(THIS_MODULE, "cuse");
45909 if (IS_ERR(cuse_class))
45910 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45911 index 7df2b5e..5804aa7 100644
45912 --- a/fs/fuse/dev.c
45913 +++ b/fs/fuse/dev.c
45914 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45915 ret = 0;
45916 pipe_lock(pipe);
45917
45918 - if (!pipe->readers) {
45919 + if (!atomic_read(&pipe->readers)) {
45920 send_sig(SIGPIPE, current, 0);
45921 if (!ret)
45922 ret = -EPIPE;
45923 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45924 index df5ac04..08cee2a 100644
45925 --- a/fs/fuse/dir.c
45926 +++ b/fs/fuse/dir.c
45927 @@ -1180,7 +1180,7 @@ static char *read_link(struct dentry *dentry)
45928 return link;
45929 }
45930
45931 -static void free_link(char *link)
45932 +static void free_link(const char *link)
45933 {
45934 if (!IS_ERR(link))
45935 free_page((unsigned long) link);
45936 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45937 index a9ba244..d9df391 100644
45938 --- a/fs/gfs2/inode.c
45939 +++ b/fs/gfs2/inode.c
45940 @@ -1496,7 +1496,7 @@ out:
45941
45942 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45943 {
45944 - char *s = nd_get_link(nd);
45945 + const char *s = nd_get_link(nd);
45946 if (!IS_ERR(s))
45947 kfree(s);
45948 }
45949 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
45950 index 001ef01..f7d5f07 100644
45951 --- a/fs/hugetlbfs/inode.c
45952 +++ b/fs/hugetlbfs/inode.c
45953 @@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
45954 .kill_sb = kill_litter_super,
45955 };
45956
45957 -static struct vfsmount *hugetlbfs_vfsmount;
45958 +struct vfsmount *hugetlbfs_vfsmount;
45959
45960 static int can_do_hugetlb_shm(void)
45961 {
45962 diff --git a/fs/inode.c b/fs/inode.c
45963 index 9f4f5fe..6214688 100644
45964 --- a/fs/inode.c
45965 +++ b/fs/inode.c
45966 @@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
45967
45968 #ifdef CONFIG_SMP
45969 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
45970 - static atomic_t shared_last_ino;
45971 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
45972 + static atomic_unchecked_t shared_last_ino;
45973 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
45974
45975 res = next - LAST_INO_BATCH;
45976 }
45977 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
45978 index 4a6cf28..d3a29d3 100644
45979 --- a/fs/jffs2/erase.c
45980 +++ b/fs/jffs2/erase.c
45981 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
45982 struct jffs2_unknown_node marker = {
45983 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
45984 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45985 - .totlen = cpu_to_je32(c->cleanmarker_size)
45986 + .totlen = cpu_to_je32(c->cleanmarker_size),
45987 + .hdr_crc = cpu_to_je32(0)
45988 };
45989
45990 jffs2_prealloc_raw_node_refs(c, jeb, 1);
45991 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
45992 index 74d9be1..d5dd140 100644
45993 --- a/fs/jffs2/wbuf.c
45994 +++ b/fs/jffs2/wbuf.c
45995 @@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
45996 {
45997 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
45998 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45999 - .totlen = constant_cpu_to_je32(8)
46000 + .totlen = constant_cpu_to_je32(8),
46001 + .hdr_crc = constant_cpu_to_je32(0)
46002 };
46003
46004 /*
46005 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46006 index 4a82950..bcaa0cb 100644
46007 --- a/fs/jfs/super.c
46008 +++ b/fs/jfs/super.c
46009 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46010
46011 jfs_inode_cachep =
46012 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46013 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46014 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46015 init_once);
46016 if (jfs_inode_cachep == NULL)
46017 return -ENOMEM;
46018 diff --git a/fs/libfs.c b/fs/libfs.c
46019 index 18d08f5..fe3dc64 100644
46020 --- a/fs/libfs.c
46021 +++ b/fs/libfs.c
46022 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46023
46024 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46025 struct dentry *next;
46026 + char d_name[sizeof(next->d_iname)];
46027 + const unsigned char *name;
46028 +
46029 next = list_entry(p, struct dentry, d_u.d_child);
46030 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46031 if (!simple_positive(next)) {
46032 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46033
46034 spin_unlock(&next->d_lock);
46035 spin_unlock(&dentry->d_lock);
46036 - if (filldir(dirent, next->d_name.name,
46037 + name = next->d_name.name;
46038 + if (name == next->d_iname) {
46039 + memcpy(d_name, name, next->d_name.len);
46040 + name = d_name;
46041 + }
46042 + if (filldir(dirent, name,
46043 next->d_name.len, filp->f_pos,
46044 next->d_inode->i_ino,
46045 dt_type(next->d_inode)) < 0)
46046 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46047 index 8392cb8..80d6193 100644
46048 --- a/fs/lockd/clntproc.c
46049 +++ b/fs/lockd/clntproc.c
46050 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46051 /*
46052 * Cookie counter for NLM requests
46053 */
46054 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46055 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46056
46057 void nlmclnt_next_cookie(struct nlm_cookie *c)
46058 {
46059 - u32 cookie = atomic_inc_return(&nlm_cookie);
46060 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46061
46062 memcpy(c->data, &cookie, 4);
46063 c->len=4;
46064 diff --git a/fs/locks.c b/fs/locks.c
46065 index 0d68f1f..f216b79 100644
46066 --- a/fs/locks.c
46067 +++ b/fs/locks.c
46068 @@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46069 return;
46070
46071 if (filp->f_op && filp->f_op->flock) {
46072 - struct file_lock fl = {
46073 + struct file_lock flock = {
46074 .fl_pid = current->tgid,
46075 .fl_file = filp,
46076 .fl_flags = FL_FLOCK,
46077 .fl_type = F_UNLCK,
46078 .fl_end = OFFSET_MAX,
46079 };
46080 - filp->f_op->flock(filp, F_SETLKW, &fl);
46081 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46082 - fl.fl_ops->fl_release_private(&fl);
46083 + filp->f_op->flock(filp, F_SETLKW, &flock);
46084 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46085 + flock.fl_ops->fl_release_private(&flock);
46086 }
46087
46088 lock_flocks();
46089 diff --git a/fs/namei.c b/fs/namei.c
46090 index c427919..e37fd3f 100644
46091 --- a/fs/namei.c
46092 +++ b/fs/namei.c
46093 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46094 if (ret != -EACCES)
46095 return ret;
46096
46097 +#ifdef CONFIG_GRKERNSEC
46098 + /* we'll block if we have to log due to a denied capability use */
46099 + if (mask & MAY_NOT_BLOCK)
46100 + return -ECHILD;
46101 +#endif
46102 +
46103 if (S_ISDIR(inode->i_mode)) {
46104 /* DACs are overridable for directories */
46105 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46106 - return 0;
46107 if (!(mask & MAY_WRITE))
46108 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46109 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46110 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46111 return 0;
46112 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46113 + return 0;
46114 return -EACCES;
46115 }
46116 /*
46117 + * Searching includes executable on directories, else just read.
46118 + */
46119 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46120 + if (mask == MAY_READ)
46121 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46122 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46123 + return 0;
46124 +
46125 + /*
46126 * Read/write DACs are always overridable.
46127 * Executable DACs are overridable when there is
46128 * at least one exec bit set.
46129 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46130 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46131 return 0;
46132
46133 - /*
46134 - * Searching includes executable on directories, else just read.
46135 - */
46136 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46137 - if (mask == MAY_READ)
46138 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46139 - return 0;
46140 -
46141 return -EACCES;
46142 }
46143
46144 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46145 return error;
46146 }
46147
46148 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46149 + dentry->d_inode, dentry, nd->path.mnt)) {
46150 + error = -EACCES;
46151 + *p = ERR_PTR(error); /* no ->put_link(), please */
46152 + path_put(&nd->path);
46153 + return error;
46154 + }
46155 +
46156 nd->last_type = LAST_BIND;
46157 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46158 error = PTR_ERR(*p);
46159 if (!IS_ERR(*p)) {
46160 - char *s = nd_get_link(nd);
46161 + const char *s = nd_get_link(nd);
46162 error = 0;
46163 if (s)
46164 error = __vfs_follow_link(nd, s);
46165 @@ -1753,6 +1769,21 @@ static int path_lookupat(int dfd, const char *name,
46166 if (!err)
46167 err = complete_walk(nd);
46168
46169 + if (!(nd->flags & LOOKUP_PARENT)) {
46170 +#ifdef CONFIG_GRKERNSEC
46171 + if (flags & LOOKUP_RCU) {
46172 + if (!err)
46173 + path_put(&nd->path);
46174 + err = -ECHILD;
46175 + } else
46176 +#endif
46177 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46178 + if (!err)
46179 + path_put(&nd->path);
46180 + err = -ENOENT;
46181 + }
46182 + }
46183 +
46184 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46185 if (!nd->inode->i_op->lookup) {
46186 path_put(&nd->path);
46187 @@ -1780,6 +1811,15 @@ static int do_path_lookup(int dfd, const char *name,
46188 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46189
46190 if (likely(!retval)) {
46191 + if (*name != '/' && nd->path.dentry && nd->inode) {
46192 +#ifdef CONFIG_GRKERNSEC
46193 + if (flags & LOOKUP_RCU)
46194 + return -ECHILD;
46195 +#endif
46196 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46197 + return -ENOENT;
46198 + }
46199 +
46200 if (unlikely(!audit_dummy_context())) {
46201 if (nd->path.dentry && nd->inode)
46202 audit_inode(name, nd->path.dentry);
46203 @@ -2126,6 +2166,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46204 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46205 return -EPERM;
46206
46207 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46208 + return -EPERM;
46209 + if (gr_handle_rawio(inode))
46210 + return -EPERM;
46211 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46212 + return -EACCES;
46213 +
46214 return 0;
46215 }
46216
46217 @@ -2187,6 +2234,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46218 error = complete_walk(nd);
46219 if (error)
46220 return ERR_PTR(error);
46221 +#ifdef CONFIG_GRKERNSEC
46222 + if (nd->flags & LOOKUP_RCU) {
46223 + error = -ECHILD;
46224 + goto exit;
46225 + }
46226 +#endif
46227 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46228 + error = -ENOENT;
46229 + goto exit;
46230 + }
46231 audit_inode(pathname, nd->path.dentry);
46232 if (open_flag & O_CREAT) {
46233 error = -EISDIR;
46234 @@ -2197,6 +2254,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46235 error = complete_walk(nd);
46236 if (error)
46237 return ERR_PTR(error);
46238 +#ifdef CONFIG_GRKERNSEC
46239 + if (nd->flags & LOOKUP_RCU) {
46240 + error = -ECHILD;
46241 + goto exit;
46242 + }
46243 +#endif
46244 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46245 + error = -ENOENT;
46246 + goto exit;
46247 + }
46248 audit_inode(pathname, dir);
46249 goto ok;
46250 }
46251 @@ -2218,6 +2285,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46252 error = complete_walk(nd);
46253 if (error)
46254 return ERR_PTR(error);
46255 +#ifdef CONFIG_GRKERNSEC
46256 + if (nd->flags & LOOKUP_RCU) {
46257 + error = -ECHILD;
46258 + goto exit;
46259 + }
46260 +#endif
46261 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46262 + error = -ENOENT;
46263 + goto exit;
46264 + }
46265
46266 error = -ENOTDIR;
46267 if (nd->flags & LOOKUP_DIRECTORY) {
46268 @@ -2258,6 +2335,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46269 /* Negative dentry, just create the file */
46270 if (!dentry->d_inode) {
46271 umode_t mode = op->mode;
46272 +
46273 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46274 + error = -EACCES;
46275 + goto exit_mutex_unlock;
46276 + }
46277 +
46278 if (!IS_POSIXACL(dir->d_inode))
46279 mode &= ~current_umask();
46280 /*
46281 @@ -2281,6 +2364,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46282 error = vfs_create(dir->d_inode, dentry, mode, nd);
46283 if (error)
46284 goto exit_mutex_unlock;
46285 + else
46286 + gr_handle_create(path->dentry, path->mnt);
46287 mutex_unlock(&dir->d_inode->i_mutex);
46288 dput(nd->path.dentry);
46289 nd->path.dentry = dentry;
46290 @@ -2290,6 +2375,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46291 /*
46292 * It already exists.
46293 */
46294 +
46295 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46296 + error = -ENOENT;
46297 + goto exit_mutex_unlock;
46298 + }
46299 +
46300 + /* only check if O_CREAT is specified, all other checks need to go
46301 + into may_open */
46302 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46303 + error = -EACCES;
46304 + goto exit_mutex_unlock;
46305 + }
46306 +
46307 mutex_unlock(&dir->d_inode->i_mutex);
46308 audit_inode(pathname, path->dentry);
46309
46310 @@ -2502,6 +2600,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46311 *path = nd.path;
46312 return dentry;
46313 eexist:
46314 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46315 + dput(dentry);
46316 + dentry = ERR_PTR(-ENOENT);
46317 + goto fail;
46318 + }
46319 dput(dentry);
46320 dentry = ERR_PTR(-EEXIST);
46321 fail:
46322 @@ -2524,6 +2627,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46323 }
46324 EXPORT_SYMBOL(user_path_create);
46325
46326 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46327 +{
46328 + char *tmp = getname(pathname);
46329 + struct dentry *res;
46330 + if (IS_ERR(tmp))
46331 + return ERR_CAST(tmp);
46332 + res = kern_path_create(dfd, tmp, path, is_dir);
46333 + if (IS_ERR(res))
46334 + putname(tmp);
46335 + else
46336 + *to = tmp;
46337 + return res;
46338 +}
46339 +
46340 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46341 {
46342 int error = may_create(dir, dentry);
46343 @@ -2591,6 +2708,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46344 error = mnt_want_write(path.mnt);
46345 if (error)
46346 goto out_dput;
46347 +
46348 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46349 + error = -EPERM;
46350 + goto out_drop_write;
46351 + }
46352 +
46353 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46354 + error = -EACCES;
46355 + goto out_drop_write;
46356 + }
46357 +
46358 error = security_path_mknod(&path, dentry, mode, dev);
46359 if (error)
46360 goto out_drop_write;
46361 @@ -2608,6 +2736,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46362 }
46363 out_drop_write:
46364 mnt_drop_write(path.mnt);
46365 +
46366 + if (!error)
46367 + gr_handle_create(dentry, path.mnt);
46368 out_dput:
46369 dput(dentry);
46370 mutex_unlock(&path.dentry->d_inode->i_mutex);
46371 @@ -2661,12 +2792,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46372 error = mnt_want_write(path.mnt);
46373 if (error)
46374 goto out_dput;
46375 +
46376 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46377 + error = -EACCES;
46378 + goto out_drop_write;
46379 + }
46380 +
46381 error = security_path_mkdir(&path, dentry, mode);
46382 if (error)
46383 goto out_drop_write;
46384 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46385 out_drop_write:
46386 mnt_drop_write(path.mnt);
46387 +
46388 + if (!error)
46389 + gr_handle_create(dentry, path.mnt);
46390 out_dput:
46391 dput(dentry);
46392 mutex_unlock(&path.dentry->d_inode->i_mutex);
46393 @@ -2746,6 +2886,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46394 char * name;
46395 struct dentry *dentry;
46396 struct nameidata nd;
46397 + ino_t saved_ino = 0;
46398 + dev_t saved_dev = 0;
46399
46400 error = user_path_parent(dfd, pathname, &nd, &name);
46401 if (error)
46402 @@ -2774,6 +2916,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46403 error = -ENOENT;
46404 goto exit3;
46405 }
46406 +
46407 + saved_ino = dentry->d_inode->i_ino;
46408 + saved_dev = gr_get_dev_from_dentry(dentry);
46409 +
46410 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46411 + error = -EACCES;
46412 + goto exit3;
46413 + }
46414 +
46415 error = mnt_want_write(nd.path.mnt);
46416 if (error)
46417 goto exit3;
46418 @@ -2781,6 +2932,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46419 if (error)
46420 goto exit4;
46421 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46422 + if (!error && (saved_dev || saved_ino))
46423 + gr_handle_delete(saved_ino, saved_dev);
46424 exit4:
46425 mnt_drop_write(nd.path.mnt);
46426 exit3:
46427 @@ -2843,6 +2996,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46428 struct dentry *dentry;
46429 struct nameidata nd;
46430 struct inode *inode = NULL;
46431 + ino_t saved_ino = 0;
46432 + dev_t saved_dev = 0;
46433
46434 error = user_path_parent(dfd, pathname, &nd, &name);
46435 if (error)
46436 @@ -2865,6 +3020,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46437 if (!inode)
46438 goto slashes;
46439 ihold(inode);
46440 +
46441 + if (inode->i_nlink <= 1) {
46442 + saved_ino = inode->i_ino;
46443 + saved_dev = gr_get_dev_from_dentry(dentry);
46444 + }
46445 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46446 + error = -EACCES;
46447 + goto exit2;
46448 + }
46449 +
46450 error = mnt_want_write(nd.path.mnt);
46451 if (error)
46452 goto exit2;
46453 @@ -2872,6 +3037,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46454 if (error)
46455 goto exit3;
46456 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46457 + if (!error && (saved_ino || saved_dev))
46458 + gr_handle_delete(saved_ino, saved_dev);
46459 exit3:
46460 mnt_drop_write(nd.path.mnt);
46461 exit2:
46462 @@ -2947,10 +3114,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46463 error = mnt_want_write(path.mnt);
46464 if (error)
46465 goto out_dput;
46466 +
46467 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46468 + error = -EACCES;
46469 + goto out_drop_write;
46470 + }
46471 +
46472 error = security_path_symlink(&path, dentry, from);
46473 if (error)
46474 goto out_drop_write;
46475 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46476 + if (!error)
46477 + gr_handle_create(dentry, path.mnt);
46478 out_drop_write:
46479 mnt_drop_write(path.mnt);
46480 out_dput:
46481 @@ -3025,6 +3200,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46482 {
46483 struct dentry *new_dentry;
46484 struct path old_path, new_path;
46485 + char *to = NULL;
46486 int how = 0;
46487 int error;
46488
46489 @@ -3048,7 +3224,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46490 if (error)
46491 return error;
46492
46493 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46494 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46495 error = PTR_ERR(new_dentry);
46496 if (IS_ERR(new_dentry))
46497 goto out;
46498 @@ -3059,13 +3235,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46499 error = mnt_want_write(new_path.mnt);
46500 if (error)
46501 goto out_dput;
46502 +
46503 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46504 + old_path.dentry->d_inode,
46505 + old_path.dentry->d_inode->i_mode, to)) {
46506 + error = -EACCES;
46507 + goto out_drop_write;
46508 + }
46509 +
46510 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46511 + old_path.dentry, old_path.mnt, to)) {
46512 + error = -EACCES;
46513 + goto out_drop_write;
46514 + }
46515 +
46516 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46517 if (error)
46518 goto out_drop_write;
46519 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46520 + if (!error)
46521 + gr_handle_create(new_dentry, new_path.mnt);
46522 out_drop_write:
46523 mnt_drop_write(new_path.mnt);
46524 out_dput:
46525 + putname(to);
46526 dput(new_dentry);
46527 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46528 path_put(&new_path);
46529 @@ -3299,6 +3492,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46530 if (new_dentry == trap)
46531 goto exit5;
46532
46533 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46534 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46535 + to);
46536 + if (error)
46537 + goto exit5;
46538 +
46539 error = mnt_want_write(oldnd.path.mnt);
46540 if (error)
46541 goto exit5;
46542 @@ -3308,6 +3507,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46543 goto exit6;
46544 error = vfs_rename(old_dir->d_inode, old_dentry,
46545 new_dir->d_inode, new_dentry);
46546 + if (!error)
46547 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46548 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46549 exit6:
46550 mnt_drop_write(oldnd.path.mnt);
46551 exit5:
46552 @@ -3333,6 +3535,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46553
46554 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46555 {
46556 + char tmpbuf[64];
46557 + const char *newlink;
46558 int len;
46559
46560 len = PTR_ERR(link);
46561 @@ -3342,7 +3546,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46562 len = strlen(link);
46563 if (len > (unsigned) buflen)
46564 len = buflen;
46565 - if (copy_to_user(buffer, link, len))
46566 +
46567 + if (len < sizeof(tmpbuf)) {
46568 + memcpy(tmpbuf, link, len);
46569 + newlink = tmpbuf;
46570 + } else
46571 + newlink = link;
46572 +
46573 + if (copy_to_user(buffer, newlink, len))
46574 len = -EFAULT;
46575 out:
46576 return len;
46577 diff --git a/fs/namespace.c b/fs/namespace.c
46578 index 4e46539..b28253c 100644
46579 --- a/fs/namespace.c
46580 +++ b/fs/namespace.c
46581 @@ -1156,6 +1156,9 @@ static int do_umount(struct mount *mnt, int flags)
46582 if (!(sb->s_flags & MS_RDONLY))
46583 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46584 up_write(&sb->s_umount);
46585 +
46586 + gr_log_remount(mnt->mnt_devname, retval);
46587 +
46588 return retval;
46589 }
46590
46591 @@ -1175,6 +1178,9 @@ static int do_umount(struct mount *mnt, int flags)
46592 br_write_unlock(vfsmount_lock);
46593 up_write(&namespace_sem);
46594 release_mounts(&umount_list);
46595 +
46596 + gr_log_unmount(mnt->mnt_devname, retval);
46597 +
46598 return retval;
46599 }
46600
46601 @@ -2176,6 +2182,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46602 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46603 MS_STRICTATIME);
46604
46605 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46606 + retval = -EPERM;
46607 + goto dput_out;
46608 + }
46609 +
46610 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46611 + retval = -EPERM;
46612 + goto dput_out;
46613 + }
46614 +
46615 if (flags & MS_REMOUNT)
46616 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46617 data_page);
46618 @@ -2190,6 +2206,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46619 dev_name, data_page);
46620 dput_out:
46621 path_put(&path);
46622 +
46623 + gr_log_mount(dev_name, dir_name, retval);
46624 +
46625 return retval;
46626 }
46627
46628 @@ -2471,6 +2490,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46629 if (error)
46630 goto out2;
46631
46632 + if (gr_handle_chroot_pivot()) {
46633 + error = -EPERM;
46634 + goto out2;
46635 + }
46636 +
46637 get_fs_root(current->fs, &root);
46638 error = lock_mount(&old);
46639 if (error)
46640 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46641 index e8bbfa5..864f936 100644
46642 --- a/fs/nfs/inode.c
46643 +++ b/fs/nfs/inode.c
46644 @@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46645 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46646 nfsi->attrtimeo_timestamp = jiffies;
46647
46648 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46649 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46650 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46651 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46652 else
46653 @@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46654 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46655 }
46656
46657 -static atomic_long_t nfs_attr_generation_counter;
46658 +static atomic_long_unchecked_t nfs_attr_generation_counter;
46659
46660 static unsigned long nfs_read_attr_generation_counter(void)
46661 {
46662 - return atomic_long_read(&nfs_attr_generation_counter);
46663 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46664 }
46665
46666 unsigned long nfs_inc_attr_generation_counter(void)
46667 {
46668 - return atomic_long_inc_return(&nfs_attr_generation_counter);
46669 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46670 }
46671
46672 void nfs_fattr_init(struct nfs_fattr *fattr)
46673 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46674 index 5686661..80a9a3a 100644
46675 --- a/fs/nfsd/vfs.c
46676 +++ b/fs/nfsd/vfs.c
46677 @@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46678 } else {
46679 oldfs = get_fs();
46680 set_fs(KERNEL_DS);
46681 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46682 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46683 set_fs(oldfs);
46684 }
46685
46686 @@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46687
46688 /* Write the data. */
46689 oldfs = get_fs(); set_fs(KERNEL_DS);
46690 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46691 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46692 set_fs(oldfs);
46693 if (host_err < 0)
46694 goto out_nfserr;
46695 @@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46696 */
46697
46698 oldfs = get_fs(); set_fs(KERNEL_DS);
46699 - host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
46700 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
46701 set_fs(oldfs);
46702
46703 if (host_err < 0)
46704 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46705 index 3568c8a..e0240d8 100644
46706 --- a/fs/notify/fanotify/fanotify_user.c
46707 +++ b/fs/notify/fanotify/fanotify_user.c
46708 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46709 goto out_close_fd;
46710
46711 ret = -EFAULT;
46712 - if (copy_to_user(buf, &fanotify_event_metadata,
46713 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46714 + copy_to_user(buf, &fanotify_event_metadata,
46715 fanotify_event_metadata.event_len))
46716 goto out_kill_access_response;
46717
46718 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46719 index c887b13..0fdf472 100644
46720 --- a/fs/notify/notification.c
46721 +++ b/fs/notify/notification.c
46722 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46723 * get set to 0 so it will never get 'freed'
46724 */
46725 static struct fsnotify_event *q_overflow_event;
46726 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46727 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46728
46729 /**
46730 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46731 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46732 */
46733 u32 fsnotify_get_cookie(void)
46734 {
46735 - return atomic_inc_return(&fsnotify_sync_cookie);
46736 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46737 }
46738 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46739
46740 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46741 index 99e3610..02c1068 100644
46742 --- a/fs/ntfs/dir.c
46743 +++ b/fs/ntfs/dir.c
46744 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
46745 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46746 ~(s64)(ndir->itype.index.block_size - 1)));
46747 /* Bounds checks. */
46748 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46749 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46750 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46751 "inode 0x%lx or driver bug.", vdir->i_ino);
46752 goto err_out;
46753 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46754 index 8639169..76697aa 100644
46755 --- a/fs/ntfs/file.c
46756 +++ b/fs/ntfs/file.c
46757 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46758 #endif /* NTFS_RW */
46759 };
46760
46761 -const struct file_operations ntfs_empty_file_ops = {};
46762 +const struct file_operations ntfs_empty_file_ops __read_only;
46763
46764 -const struct inode_operations ntfs_empty_inode_ops = {};
46765 +const struct inode_operations ntfs_empty_inode_ops __read_only;
46766 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46767 index 210c352..a174f83 100644
46768 --- a/fs/ocfs2/localalloc.c
46769 +++ b/fs/ocfs2/localalloc.c
46770 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46771 goto bail;
46772 }
46773
46774 - atomic_inc(&osb->alloc_stats.moves);
46775 + atomic_inc_unchecked(&osb->alloc_stats.moves);
46776
46777 bail:
46778 if (handle)
46779 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46780 index d355e6e..578d905 100644
46781 --- a/fs/ocfs2/ocfs2.h
46782 +++ b/fs/ocfs2/ocfs2.h
46783 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
46784
46785 struct ocfs2_alloc_stats
46786 {
46787 - atomic_t moves;
46788 - atomic_t local_data;
46789 - atomic_t bitmap_data;
46790 - atomic_t bg_allocs;
46791 - atomic_t bg_extends;
46792 + atomic_unchecked_t moves;
46793 + atomic_unchecked_t local_data;
46794 + atomic_unchecked_t bitmap_data;
46795 + atomic_unchecked_t bg_allocs;
46796 + atomic_unchecked_t bg_extends;
46797 };
46798
46799 enum ocfs2_local_alloc_state
46800 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46801 index f169da4..9112253 100644
46802 --- a/fs/ocfs2/suballoc.c
46803 +++ b/fs/ocfs2/suballoc.c
46804 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46805 mlog_errno(status);
46806 goto bail;
46807 }
46808 - atomic_inc(&osb->alloc_stats.bg_extends);
46809 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46810
46811 /* You should never ask for this much metadata */
46812 BUG_ON(bits_wanted >
46813 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46814 mlog_errno(status);
46815 goto bail;
46816 }
46817 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46818 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46819
46820 *suballoc_loc = res.sr_bg_blkno;
46821 *suballoc_bit_start = res.sr_bit_offset;
46822 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46823 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46824 res->sr_bits);
46825
46826 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46827 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46828
46829 BUG_ON(res->sr_bits != 1);
46830
46831 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46832 mlog_errno(status);
46833 goto bail;
46834 }
46835 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46836 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46837
46838 BUG_ON(res.sr_bits != 1);
46839
46840 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46841 cluster_start,
46842 num_clusters);
46843 if (!status)
46844 - atomic_inc(&osb->alloc_stats.local_data);
46845 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
46846 } else {
46847 if (min_clusters > (osb->bitmap_cpg - 1)) {
46848 /* The only paths asking for contiguousness
46849 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46850 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46851 res.sr_bg_blkno,
46852 res.sr_bit_offset);
46853 - atomic_inc(&osb->alloc_stats.bitmap_data);
46854 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46855 *num_clusters = res.sr_bits;
46856 }
46857 }
46858 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46859 index 68f4541..89cfe6a 100644
46860 --- a/fs/ocfs2/super.c
46861 +++ b/fs/ocfs2/super.c
46862 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46863 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46864 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46865 "Stats",
46866 - atomic_read(&osb->alloc_stats.bitmap_data),
46867 - atomic_read(&osb->alloc_stats.local_data),
46868 - atomic_read(&osb->alloc_stats.bg_allocs),
46869 - atomic_read(&osb->alloc_stats.moves),
46870 - atomic_read(&osb->alloc_stats.bg_extends));
46871 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46872 + atomic_read_unchecked(&osb->alloc_stats.local_data),
46873 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46874 + atomic_read_unchecked(&osb->alloc_stats.moves),
46875 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46876
46877 out += snprintf(buf + out, len - out,
46878 "%10s => State: %u Descriptor: %llu Size: %u bits "
46879 @@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46880 spin_lock_init(&osb->osb_xattr_lock);
46881 ocfs2_init_steal_slots(osb);
46882
46883 - atomic_set(&osb->alloc_stats.moves, 0);
46884 - atomic_set(&osb->alloc_stats.local_data, 0);
46885 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
46886 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
46887 - atomic_set(&osb->alloc_stats.bg_extends, 0);
46888 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
46889 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
46890 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
46891 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
46892 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
46893
46894 /* Copy the blockcheck stats from the superblock probe */
46895 osb->osb_ecc_stats = *stats;
46896 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
46897 index 5d22872..523db20 100644
46898 --- a/fs/ocfs2/symlink.c
46899 +++ b/fs/ocfs2/symlink.c
46900 @@ -142,7 +142,7 @@ bail:
46901
46902 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46903 {
46904 - char *link = nd_get_link(nd);
46905 + const char *link = nd_get_link(nd);
46906 if (!IS_ERR(link))
46907 kfree(link);
46908 }
46909 diff --git a/fs/open.c b/fs/open.c
46910 index 5720854..ccfe124 100644
46911 --- a/fs/open.c
46912 +++ b/fs/open.c
46913 @@ -31,6 +31,8 @@
46914 #include <linux/ima.h>
46915 #include <linux/dnotify.h>
46916
46917 +#define CREATE_TRACE_POINTS
46918 +#include <trace/events/fs.h>
46919 #include "internal.h"
46920
46921 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
46922 @@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
46923 error = locks_verify_truncate(inode, NULL, length);
46924 if (!error)
46925 error = security_path_truncate(&path);
46926 +
46927 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46928 + error = -EACCES;
46929 +
46930 if (!error)
46931 error = do_truncate(path.dentry, length, 0, NULL);
46932
46933 @@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
46934 if (__mnt_is_readonly(path.mnt))
46935 res = -EROFS;
46936
46937 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46938 + res = -EACCES;
46939 +
46940 out_path_release:
46941 path_put(&path);
46942 out:
46943 @@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
46944 if (error)
46945 goto dput_and_out;
46946
46947 + gr_log_chdir(path.dentry, path.mnt);
46948 +
46949 set_fs_pwd(current->fs, &path);
46950
46951 dput_and_out:
46952 @@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
46953 goto out_putf;
46954
46955 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46956 +
46957 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
46958 + error = -EPERM;
46959 +
46960 + if (!error)
46961 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
46962 +
46963 if (!error)
46964 set_fs_pwd(current->fs, &file->f_path);
46965 out_putf:
46966 @@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
46967 if (error)
46968 goto dput_and_out;
46969
46970 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
46971 + goto dput_and_out;
46972 +
46973 set_fs_root(current->fs, &path);
46974 +
46975 + gr_handle_chroot_chdir(&path);
46976 +
46977 error = 0;
46978 dput_and_out:
46979 path_put(&path);
46980 @@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
46981 if (error)
46982 return error;
46983 mutex_lock(&inode->i_mutex);
46984 +
46985 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
46986 + error = -EACCES;
46987 + goto out_unlock;
46988 + }
46989 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
46990 + error = -EACCES;
46991 + goto out_unlock;
46992 + }
46993 +
46994 error = security_path_chmod(path, mode);
46995 if (error)
46996 goto out_unlock;
46997 @@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
46998 int error;
46999 struct iattr newattrs;
47000
47001 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
47002 + return -EACCES;
47003 +
47004 newattrs.ia_valid = ATTR_CTIME;
47005 if (user != (uid_t) -1) {
47006 newattrs.ia_valid |= ATTR_UID;
47007 @@ -987,6 +1024,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
47008 } else {
47009 fsnotify_open(f);
47010 fd_install(fd, f);
47011 + trace_do_sys_open(tmp, flags, mode);
47012 }
47013 }
47014 putname(tmp);
47015 diff --git a/fs/pipe.c b/fs/pipe.c
47016 index fec5e4a..f4210f9 100644
47017 --- a/fs/pipe.c
47018 +++ b/fs/pipe.c
47019 @@ -438,9 +438,9 @@ redo:
47020 }
47021 if (bufs) /* More to do? */
47022 continue;
47023 - if (!pipe->writers)
47024 + if (!atomic_read(&pipe->writers))
47025 break;
47026 - if (!pipe->waiting_writers) {
47027 + if (!atomic_read(&pipe->waiting_writers)) {
47028 /* syscall merging: Usually we must not sleep
47029 * if O_NONBLOCK is set, or if we got some data.
47030 * But if a writer sleeps in kernel space, then
47031 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47032 mutex_lock(&inode->i_mutex);
47033 pipe = inode->i_pipe;
47034
47035 - if (!pipe->readers) {
47036 + if (!atomic_read(&pipe->readers)) {
47037 send_sig(SIGPIPE, current, 0);
47038 ret = -EPIPE;
47039 goto out;
47040 @@ -553,7 +553,7 @@ redo1:
47041 for (;;) {
47042 int bufs;
47043
47044 - if (!pipe->readers) {
47045 + if (!atomic_read(&pipe->readers)) {
47046 send_sig(SIGPIPE, current, 0);
47047 if (!ret)
47048 ret = -EPIPE;
47049 @@ -644,9 +644,9 @@ redo2:
47050 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47051 do_wakeup = 0;
47052 }
47053 - pipe->waiting_writers++;
47054 + atomic_inc(&pipe->waiting_writers);
47055 pipe_wait(pipe);
47056 - pipe->waiting_writers--;
47057 + atomic_dec(&pipe->waiting_writers);
47058 }
47059 out:
47060 mutex_unlock(&inode->i_mutex);
47061 @@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47062 mask = 0;
47063 if (filp->f_mode & FMODE_READ) {
47064 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47065 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47066 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47067 mask |= POLLHUP;
47068 }
47069
47070 @@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47071 * Most Unices do not set POLLERR for FIFOs but on Linux they
47072 * behave exactly like pipes for poll().
47073 */
47074 - if (!pipe->readers)
47075 + if (!atomic_read(&pipe->readers))
47076 mask |= POLLERR;
47077 }
47078
47079 @@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47080
47081 mutex_lock(&inode->i_mutex);
47082 pipe = inode->i_pipe;
47083 - pipe->readers -= decr;
47084 - pipe->writers -= decw;
47085 + atomic_sub(decr, &pipe->readers);
47086 + atomic_sub(decw, &pipe->writers);
47087
47088 - if (!pipe->readers && !pipe->writers) {
47089 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47090 free_pipe_info(inode);
47091 } else {
47092 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47093 @@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47094
47095 if (inode->i_pipe) {
47096 ret = 0;
47097 - inode->i_pipe->readers++;
47098 + atomic_inc(&inode->i_pipe->readers);
47099 }
47100
47101 mutex_unlock(&inode->i_mutex);
47102 @@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47103
47104 if (inode->i_pipe) {
47105 ret = 0;
47106 - inode->i_pipe->writers++;
47107 + atomic_inc(&inode->i_pipe->writers);
47108 }
47109
47110 mutex_unlock(&inode->i_mutex);
47111 @@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47112 if (inode->i_pipe) {
47113 ret = 0;
47114 if (filp->f_mode & FMODE_READ)
47115 - inode->i_pipe->readers++;
47116 + atomic_inc(&inode->i_pipe->readers);
47117 if (filp->f_mode & FMODE_WRITE)
47118 - inode->i_pipe->writers++;
47119 + atomic_inc(&inode->i_pipe->writers);
47120 }
47121
47122 mutex_unlock(&inode->i_mutex);
47123 @@ -959,7 +959,7 @@ void free_pipe_info(struct inode *inode)
47124 inode->i_pipe = NULL;
47125 }
47126
47127 -static struct vfsmount *pipe_mnt __read_mostly;
47128 +struct vfsmount *pipe_mnt __read_mostly;
47129
47130 /*
47131 * pipefs_dname() is called from d_path().
47132 @@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(void)
47133 goto fail_iput;
47134 inode->i_pipe = pipe;
47135
47136 - pipe->readers = pipe->writers = 1;
47137 + atomic_set(&pipe->readers, 1);
47138 + atomic_set(&pipe->writers, 1);
47139 inode->i_fop = &rdwr_pipefifo_fops;
47140
47141 /*
47142 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47143 index 15af622..0e9f4467 100644
47144 --- a/fs/proc/Kconfig
47145 +++ b/fs/proc/Kconfig
47146 @@ -30,12 +30,12 @@ config PROC_FS
47147
47148 config PROC_KCORE
47149 bool "/proc/kcore support" if !ARM
47150 - depends on PROC_FS && MMU
47151 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47152
47153 config PROC_VMCORE
47154 bool "/proc/vmcore support"
47155 - depends on PROC_FS && CRASH_DUMP
47156 - default y
47157 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47158 + default n
47159 help
47160 Exports the dump image of crashed kernel in ELF format.
47161
47162 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47163 limited in memory.
47164
47165 config PROC_PAGE_MONITOR
47166 - default y
47167 - depends on PROC_FS && MMU
47168 + default n
47169 + depends on PROC_FS && MMU && !GRKERNSEC
47170 bool "Enable /proc page monitoring" if EXPERT
47171 help
47172 Various /proc files exist to monitor process memory utilization:
47173 diff --git a/fs/proc/array.c b/fs/proc/array.c
47174 index f9bd395..acb7847 100644
47175 --- a/fs/proc/array.c
47176 +++ b/fs/proc/array.c
47177 @@ -60,6 +60,7 @@
47178 #include <linux/tty.h>
47179 #include <linux/string.h>
47180 #include <linux/mman.h>
47181 +#include <linux/grsecurity.h>
47182 #include <linux/proc_fs.h>
47183 #include <linux/ioport.h>
47184 #include <linux/uaccess.h>
47185 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47186 seq_putc(m, '\n');
47187 }
47188
47189 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47190 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47191 +{
47192 + if (p->mm)
47193 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47194 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47195 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47196 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47197 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47198 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47199 + else
47200 + seq_printf(m, "PaX:\t-----\n");
47201 +}
47202 +#endif
47203 +
47204 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47205 struct pid *pid, struct task_struct *task)
47206 {
47207 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47208 task_cpus_allowed(m, task);
47209 cpuset_task_status_allowed(m, task);
47210 task_context_switch_counts(m, task);
47211 +
47212 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47213 + task_pax(m, task);
47214 +#endif
47215 +
47216 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47217 + task_grsec_rbac(m, task);
47218 +#endif
47219 +
47220 return 0;
47221 }
47222
47223 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47224 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47225 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47226 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47227 +#endif
47228 +
47229 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47230 struct pid *pid, struct task_struct *task, int whole)
47231 {
47232 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47233 char tcomm[sizeof(task->comm)];
47234 unsigned long flags;
47235
47236 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47237 + if (current->exec_id != m->exec_id) {
47238 + gr_log_badprocpid("stat");
47239 + return 0;
47240 + }
47241 +#endif
47242 +
47243 state = *get_task_state(task);
47244 vsize = eip = esp = 0;
47245 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47246 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47247 gtime = task->gtime;
47248 }
47249
47250 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47251 + if (PAX_RAND_FLAGS(mm)) {
47252 + eip = 0;
47253 + esp = 0;
47254 + wchan = 0;
47255 + }
47256 +#endif
47257 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47258 + wchan = 0;
47259 + eip =0;
47260 + esp =0;
47261 +#endif
47262 +
47263 /* scale priority and nice values from timeslices to -20..20 */
47264 /* to make it look like a "normal" Unix priority/nice value */
47265 priority = task_prio(task);
47266 @@ -485,9 +536,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47267 seq_put_decimal_ull(m, ' ', vsize);
47268 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
47269 seq_put_decimal_ull(m, ' ', rsslim);
47270 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47271 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
47272 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
47273 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
47274 +#else
47275 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
47276 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
47277 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
47278 +#endif
47279 seq_put_decimal_ull(m, ' ', esp);
47280 seq_put_decimal_ull(m, ' ', eip);
47281 /* The signal information here is obsolete.
47282 @@ -508,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47283 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
47284 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
47285 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
47286 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47287 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_data : 0));
47288 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->end_data : 0));
47289 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_brk : 0));
47290 +#else
47291 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
47292 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
47293 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
47294 +#endif
47295 seq_putc(m, '\n');
47296 if (mm)
47297 mmput(mm);
47298 @@ -533,8 +596,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47299 struct pid *pid, struct task_struct *task)
47300 {
47301 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47302 - struct mm_struct *mm = get_task_mm(task);
47303 + struct mm_struct *mm;
47304
47305 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47306 + if (current->exec_id != m->exec_id) {
47307 + gr_log_badprocpid("statm");
47308 + return 0;
47309 + }
47310 +#endif
47311 + mm = get_task_mm(task);
47312 if (mm) {
47313 size = task_statm(mm, &shared, &text, &data, &resident);
47314 mmput(mm);
47315 @@ -556,3 +626,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47316
47317 return 0;
47318 }
47319 +
47320 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47321 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47322 +{
47323 + u32 curr_ip = 0;
47324 + unsigned long flags;
47325 +
47326 + if (lock_task_sighand(task, &flags)) {
47327 + curr_ip = task->signal->curr_ip;
47328 + unlock_task_sighand(task, &flags);
47329 + }
47330 +
47331 + return sprintf(buffer, "%pI4\n", &curr_ip);
47332 +}
47333 +#endif
47334 diff --git a/fs/proc/base.c b/fs/proc/base.c
47335 index 9fc77b4..04761b8 100644
47336 --- a/fs/proc/base.c
47337 +++ b/fs/proc/base.c
47338 @@ -109,6 +109,14 @@ struct pid_entry {
47339 union proc_op op;
47340 };
47341
47342 +struct getdents_callback {
47343 + struct linux_dirent __user * current_dir;
47344 + struct linux_dirent __user * previous;
47345 + struct file * file;
47346 + int count;
47347 + int error;
47348 +};
47349 +
47350 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47351 .name = (NAME), \
47352 .len = sizeof(NAME) - 1, \
47353 @@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47354 if (!mm->arg_end)
47355 goto out_mm; /* Shh! No looking before we're done */
47356
47357 + if (gr_acl_handle_procpidmem(task))
47358 + goto out_mm;
47359 +
47360 len = mm->arg_end - mm->arg_start;
47361
47362 if (len > PAGE_SIZE)
47363 @@ -240,12 +251,28 @@ out:
47364 return res;
47365 }
47366
47367 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47368 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47369 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47370 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47371 +#endif
47372 +
47373 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47374 {
47375 struct mm_struct *mm = mm_for_maps(task);
47376 int res = PTR_ERR(mm);
47377 if (mm && !IS_ERR(mm)) {
47378 unsigned int nwords = 0;
47379 +
47380 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47381 + /* allow if we're currently ptracing this task */
47382 + if (PAX_RAND_FLAGS(mm) &&
47383 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47384 + mmput(mm);
47385 + return 0;
47386 + }
47387 +#endif
47388 +
47389 do {
47390 nwords += 2;
47391 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47392 @@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47393 }
47394
47395
47396 -#ifdef CONFIG_KALLSYMS
47397 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47398 /*
47399 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47400 * Returns the resolved symbol. If that fails, simply return the address.
47401 @@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
47402 mutex_unlock(&task->signal->cred_guard_mutex);
47403 }
47404
47405 -#ifdef CONFIG_STACKTRACE
47406 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47407
47408 #define MAX_STACK_TRACE_DEPTH 64
47409
47410 @@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47411 return count;
47412 }
47413
47414 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47415 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47416 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47417 {
47418 long nr;
47419 @@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47420 /************************************************************************/
47421
47422 /* permission checks */
47423 -static int proc_fd_access_allowed(struct inode *inode)
47424 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47425 {
47426 struct task_struct *task;
47427 int allowed = 0;
47428 @@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47429 */
47430 task = get_proc_task(inode);
47431 if (task) {
47432 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47433 + if (log)
47434 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47435 + else
47436 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47437 put_task_struct(task);
47438 }
47439 return allowed;
47440 @@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47441 struct task_struct *task,
47442 int hide_pid_min)
47443 {
47444 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47445 + return false;
47446 +
47447 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47448 + rcu_read_lock();
47449 + {
47450 + const struct cred *tmpcred = current_cred();
47451 + const struct cred *cred = __task_cred(task);
47452 +
47453 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47454 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47455 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47456 +#endif
47457 + ) {
47458 + rcu_read_unlock();
47459 + return true;
47460 + }
47461 + }
47462 + rcu_read_unlock();
47463 +
47464 + if (!pid->hide_pid)
47465 + return false;
47466 +#endif
47467 +
47468 if (pid->hide_pid < hide_pid_min)
47469 return true;
47470 if (in_group_p(pid->pid_gid))
47471 return true;
47472 +
47473 return ptrace_may_access(task, PTRACE_MODE_READ);
47474 }
47475
47476 @@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47477 put_task_struct(task);
47478
47479 if (!has_perms) {
47480 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47481 + {
47482 +#else
47483 if (pid->hide_pid == 2) {
47484 +#endif
47485 /*
47486 * Let's make getdents(), stat(), and open()
47487 * consistent with each other. If a process
47488 @@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
47489 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47490 file->private_data = mm;
47491
47492 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47493 + file->f_version = current->exec_id;
47494 +#endif
47495 +
47496 return 0;
47497 }
47498
47499 @@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47500 ssize_t copied;
47501 char *page;
47502
47503 +#ifdef CONFIG_GRKERNSEC
47504 + if (write)
47505 + return -EPERM;
47506 +#endif
47507 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47508 + if (file->f_version != current->exec_id) {
47509 + gr_log_badprocpid("mem");
47510 + return 0;
47511 + }
47512 +#endif
47513 +
47514 if (!mm)
47515 return 0;
47516
47517 @@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47518 if (!task)
47519 goto out_no_task;
47520
47521 + if (gr_acl_handle_procpidmem(task))
47522 + goto out;
47523 +
47524 ret = -ENOMEM;
47525 page = (char *)__get_free_page(GFP_TEMPORARY);
47526 if (!page)
47527 @@ -1433,7 +1510,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47528 path_put(&nd->path);
47529
47530 /* Are we allowed to snoop on the tasks file descriptors? */
47531 - if (!proc_fd_access_allowed(inode))
47532 + if (!proc_fd_access_allowed(inode, 0))
47533 goto out;
47534
47535 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
47536 @@ -1472,8 +1549,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47537 struct path path;
47538
47539 /* Are we allowed to snoop on the tasks file descriptors? */
47540 - if (!proc_fd_access_allowed(inode))
47541 - goto out;
47542 + /* logging this is needed for learning on chromium to work properly,
47543 + but we don't want to flood the logs from 'ps' which does a readlink
47544 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47545 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
47546 + */
47547 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47548 + if (!proc_fd_access_allowed(inode,0))
47549 + goto out;
47550 + } else {
47551 + if (!proc_fd_access_allowed(inode,1))
47552 + goto out;
47553 + }
47554
47555 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
47556 if (error)
47557 @@ -1538,7 +1625,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47558 rcu_read_lock();
47559 cred = __task_cred(task);
47560 inode->i_uid = cred->euid;
47561 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47562 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47563 +#else
47564 inode->i_gid = cred->egid;
47565 +#endif
47566 rcu_read_unlock();
47567 }
47568 security_task_to_inode(task, inode);
47569 @@ -1574,10 +1665,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47570 return -ENOENT;
47571 }
47572 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47573 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47574 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47575 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47576 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47577 +#endif
47578 task_dumpable(task)) {
47579 cred = __task_cred(task);
47580 stat->uid = cred->euid;
47581 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47582 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47583 +#else
47584 stat->gid = cred->egid;
47585 +#endif
47586 }
47587 }
47588 rcu_read_unlock();
47589 @@ -1615,11 +1715,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47590
47591 if (task) {
47592 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47593 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47594 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47595 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47596 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47597 +#endif
47598 task_dumpable(task)) {
47599 rcu_read_lock();
47600 cred = __task_cred(task);
47601 inode->i_uid = cred->euid;
47602 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47603 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47604 +#else
47605 inode->i_gid = cred->egid;
47606 +#endif
47607 rcu_read_unlock();
47608 } else {
47609 inode->i_uid = 0;
47610 @@ -1737,7 +1846,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47611 int fd = proc_fd(inode);
47612
47613 if (task) {
47614 - files = get_files_struct(task);
47615 + if (!gr_acl_handle_procpidmem(task))
47616 + files = get_files_struct(task);
47617 put_task_struct(task);
47618 }
47619 if (files) {
47620 @@ -2338,11 +2448,21 @@ static const struct file_operations proc_map_files_operations = {
47621 */
47622 static int proc_fd_permission(struct inode *inode, int mask)
47623 {
47624 + struct task_struct *task;
47625 int rv = generic_permission(inode, mask);
47626 - if (rv == 0)
47627 - return 0;
47628 +
47629 if (task_pid(current) == proc_pid(inode))
47630 rv = 0;
47631 +
47632 + task = get_proc_task(inode);
47633 + if (task == NULL)
47634 + return rv;
47635 +
47636 + if (gr_acl_handle_procpidmem(task))
47637 + rv = -EACCES;
47638 +
47639 + put_task_struct(task);
47640 +
47641 return rv;
47642 }
47643
47644 @@ -2452,6 +2572,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47645 if (!task)
47646 goto out_no_task;
47647
47648 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47649 + goto out;
47650 +
47651 /*
47652 * Yes, it does not scale. And it should not. Don't add
47653 * new entries into /proc/<tgid>/ without very good reasons.
47654 @@ -2496,6 +2619,9 @@ static int proc_pident_readdir(struct file *filp,
47655 if (!task)
47656 goto out_no_task;
47657
47658 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47659 + goto out;
47660 +
47661 ret = 0;
47662 i = filp->f_pos;
47663 switch (i) {
47664 @@ -2766,7 +2892,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47665 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47666 void *cookie)
47667 {
47668 - char *s = nd_get_link(nd);
47669 + const char *s = nd_get_link(nd);
47670 if (!IS_ERR(s))
47671 __putname(s);
47672 }
47673 @@ -2967,7 +3093,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47674 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47675 #endif
47676 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47677 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47678 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47679 INF("syscall", S_IRUGO, proc_pid_syscall),
47680 #endif
47681 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47682 @@ -2992,10 +3118,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47683 #ifdef CONFIG_SECURITY
47684 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47685 #endif
47686 -#ifdef CONFIG_KALLSYMS
47687 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47688 INF("wchan", S_IRUGO, proc_pid_wchan),
47689 #endif
47690 -#ifdef CONFIG_STACKTRACE
47691 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47692 ONE("stack", S_IRUGO, proc_pid_stack),
47693 #endif
47694 #ifdef CONFIG_SCHEDSTATS
47695 @@ -3029,6 +3155,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47696 #ifdef CONFIG_HARDWALL
47697 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47698 #endif
47699 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47700 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47701 +#endif
47702 };
47703
47704 static int proc_tgid_base_readdir(struct file * filp,
47705 @@ -3155,7 +3284,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47706 if (!inode)
47707 goto out;
47708
47709 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47710 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47711 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47712 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47713 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47714 +#else
47715 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47716 +#endif
47717 inode->i_op = &proc_tgid_base_inode_operations;
47718 inode->i_fop = &proc_tgid_base_operations;
47719 inode->i_flags|=S_IMMUTABLE;
47720 @@ -3197,7 +3333,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47721 if (!task)
47722 goto out;
47723
47724 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47725 + goto out_put_task;
47726 +
47727 result = proc_pid_instantiate(dir, dentry, task, NULL);
47728 +out_put_task:
47729 put_task_struct(task);
47730 out:
47731 return result;
47732 @@ -3260,6 +3400,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
47733 static int fake_filldir(void *buf, const char *name, int namelen,
47734 loff_t offset, u64 ino, unsigned d_type)
47735 {
47736 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
47737 + __buf->error = -EINVAL;
47738 return 0;
47739 }
47740
47741 @@ -3326,7 +3468,7 @@ static const struct pid_entry tid_base_stuff[] = {
47742 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47743 #endif
47744 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47745 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47746 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47747 INF("syscall", S_IRUGO, proc_pid_syscall),
47748 #endif
47749 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47750 @@ -3350,10 +3492,10 @@ static const struct pid_entry tid_base_stuff[] = {
47751 #ifdef CONFIG_SECURITY
47752 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47753 #endif
47754 -#ifdef CONFIG_KALLSYMS
47755 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47756 INF("wchan", S_IRUGO, proc_pid_wchan),
47757 #endif
47758 -#ifdef CONFIG_STACKTRACE
47759 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47760 ONE("stack", S_IRUGO, proc_pid_stack),
47761 #endif
47762 #ifdef CONFIG_SCHEDSTATS
47763 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47764 index 82676e3..5f8518a 100644
47765 --- a/fs/proc/cmdline.c
47766 +++ b/fs/proc/cmdline.c
47767 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47768
47769 static int __init proc_cmdline_init(void)
47770 {
47771 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47772 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47773 +#else
47774 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47775 +#endif
47776 return 0;
47777 }
47778 module_init(proc_cmdline_init);
47779 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47780 index b143471..bb105e5 100644
47781 --- a/fs/proc/devices.c
47782 +++ b/fs/proc/devices.c
47783 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47784
47785 static int __init proc_devices_init(void)
47786 {
47787 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47788 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47789 +#else
47790 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47791 +#endif
47792 return 0;
47793 }
47794 module_init(proc_devices_init);
47795 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47796 index 205c922..2ee4c57 100644
47797 --- a/fs/proc/inode.c
47798 +++ b/fs/proc/inode.c
47799 @@ -21,11 +21,17 @@
47800 #include <linux/seq_file.h>
47801 #include <linux/slab.h>
47802 #include <linux/mount.h>
47803 +#include <linux/grsecurity.h>
47804
47805 #include <asm/uaccess.h>
47806
47807 #include "internal.h"
47808
47809 +#ifdef CONFIG_PROC_SYSCTL
47810 +extern const struct inode_operations proc_sys_inode_operations;
47811 +extern const struct inode_operations proc_sys_dir_operations;
47812 +#endif
47813 +
47814 static void proc_evict_inode(struct inode *inode)
47815 {
47816 struct proc_dir_entry *de;
47817 @@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
47818 ns_ops = PROC_I(inode)->ns_ops;
47819 if (ns_ops && ns_ops->put)
47820 ns_ops->put(PROC_I(inode)->ns);
47821 +
47822 +#ifdef CONFIG_PROC_SYSCTL
47823 + if (inode->i_op == &proc_sys_inode_operations ||
47824 + inode->i_op == &proc_sys_dir_operations)
47825 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47826 +#endif
47827 +
47828 }
47829
47830 static struct kmem_cache * proc_inode_cachep;
47831 @@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47832 if (de->mode) {
47833 inode->i_mode = de->mode;
47834 inode->i_uid = de->uid;
47835 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47836 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47837 +#else
47838 inode->i_gid = de->gid;
47839 +#endif
47840 }
47841 if (de->size)
47842 inode->i_size = de->size;
47843 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47844 index 5f79bb8..eeccee4 100644
47845 --- a/fs/proc/internal.h
47846 +++ b/fs/proc/internal.h
47847 @@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47848 struct pid *pid, struct task_struct *task);
47849 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47850 struct pid *pid, struct task_struct *task);
47851 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47852 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47853 +#endif
47854 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47855
47856 extern const struct file_operations proc_pid_maps_operations;
47857 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47858 index 86c67ee..cdca321 100644
47859 --- a/fs/proc/kcore.c
47860 +++ b/fs/proc/kcore.c
47861 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47862 * the addresses in the elf_phdr on our list.
47863 */
47864 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47865 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47866 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47867 + if (tsz > buflen)
47868 tsz = buflen;
47869 -
47870 +
47871 while (buflen) {
47872 struct kcore_list *m;
47873
47874 @@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47875 kfree(elf_buf);
47876 } else {
47877 if (kern_addr_valid(start)) {
47878 - unsigned long n;
47879 + char *elf_buf;
47880 + mm_segment_t oldfs;
47881
47882 - n = copy_to_user(buffer, (char *)start, tsz);
47883 - /*
47884 - * We cannot distinguish between fault on source
47885 - * and fault on destination. When this happens
47886 - * we clear too and hope it will trigger the
47887 - * EFAULT again.
47888 - */
47889 - if (n) {
47890 - if (clear_user(buffer + tsz - n,
47891 - n))
47892 + elf_buf = kmalloc(tsz, GFP_KERNEL);
47893 + if (!elf_buf)
47894 + return -ENOMEM;
47895 + oldfs = get_fs();
47896 + set_fs(KERNEL_DS);
47897 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
47898 + set_fs(oldfs);
47899 + if (copy_to_user(buffer, elf_buf, tsz)) {
47900 + kfree(elf_buf);
47901 return -EFAULT;
47902 + }
47903 }
47904 + set_fs(oldfs);
47905 + kfree(elf_buf);
47906 } else {
47907 if (clear_user(buffer, tsz))
47908 return -EFAULT;
47909 @@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47910
47911 static int open_kcore(struct inode *inode, struct file *filp)
47912 {
47913 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47914 + return -EPERM;
47915 +#endif
47916 if (!capable(CAP_SYS_RAWIO))
47917 return -EPERM;
47918 if (kcore_need_update)
47919 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
47920 index 80e4645..53e5fcf 100644
47921 --- a/fs/proc/meminfo.c
47922 +++ b/fs/proc/meminfo.c
47923 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
47924 vmi.used >> 10,
47925 vmi.largest_chunk >> 10
47926 #ifdef CONFIG_MEMORY_FAILURE
47927 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47928 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47929 #endif
47930 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47931 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47932 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
47933 index b1822dd..df622cb 100644
47934 --- a/fs/proc/nommu.c
47935 +++ b/fs/proc/nommu.c
47936 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
47937 if (len < 1)
47938 len = 1;
47939 seq_printf(m, "%*c", len, ' ');
47940 - seq_path(m, &file->f_path, "");
47941 + seq_path(m, &file->f_path, "\n\\");
47942 }
47943
47944 seq_putc(m, '\n');
47945 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
47946 index 06e1cc1..177cd98 100644
47947 --- a/fs/proc/proc_net.c
47948 +++ b/fs/proc/proc_net.c
47949 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
47950 struct task_struct *task;
47951 struct nsproxy *ns;
47952 struct net *net = NULL;
47953 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47954 + const struct cred *cred = current_cred();
47955 +#endif
47956 +
47957 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47958 + if (cred->fsuid)
47959 + return net;
47960 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47961 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
47962 + return net;
47963 +#endif
47964
47965 rcu_read_lock();
47966 task = pid_task(proc_pid(dir), PIDTYPE_PID);
47967 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
47968 index 21d836f..bebf3ee 100644
47969 --- a/fs/proc/proc_sysctl.c
47970 +++ b/fs/proc/proc_sysctl.c
47971 @@ -12,11 +12,15 @@
47972 #include <linux/module.h>
47973 #include "internal.h"
47974
47975 +extern int gr_handle_chroot_sysctl(const int op);
47976 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
47977 + const int op);
47978 +
47979 static const struct dentry_operations proc_sys_dentry_operations;
47980 static const struct file_operations proc_sys_file_operations;
47981 -static const struct inode_operations proc_sys_inode_operations;
47982 +const struct inode_operations proc_sys_inode_operations;
47983 static const struct file_operations proc_sys_dir_file_operations;
47984 -static const struct inode_operations proc_sys_dir_operations;
47985 +const struct inode_operations proc_sys_dir_operations;
47986
47987 void proc_sys_poll_notify(struct ctl_table_poll *poll)
47988 {
47989 @@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
47990
47991 err = NULL;
47992 d_set_d_op(dentry, &proc_sys_dentry_operations);
47993 +
47994 + gr_handle_proc_create(dentry, inode);
47995 +
47996 d_add(dentry, inode);
47997
47998 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
47999 + err = ERR_PTR(-ENOENT);
48000 +
48001 out:
48002 sysctl_head_finish(head);
48003 return err;
48004 @@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48005 struct inode *inode = filp->f_path.dentry->d_inode;
48006 struct ctl_table_header *head = grab_header(inode);
48007 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
48008 + int op = write ? MAY_WRITE : MAY_READ;
48009 ssize_t error;
48010 size_t res;
48011
48012 if (IS_ERR(head))
48013 return PTR_ERR(head);
48014
48015 +
48016 /*
48017 * At this point we know that the sysctl was not unregistered
48018 * and won't be until we finish.
48019 */
48020 error = -EPERM;
48021 - if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
48022 + if (sysctl_perm(head->root, table, op))
48023 goto out;
48024
48025 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
48026 @@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48027 if (!table->proc_handler)
48028 goto out;
48029
48030 +#ifdef CONFIG_GRKERNSEC
48031 + error = -EPERM;
48032 + if (gr_handle_chroot_sysctl(op))
48033 + goto out;
48034 + dget(filp->f_path.dentry);
48035 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
48036 + dput(filp->f_path.dentry);
48037 + goto out;
48038 + }
48039 + dput(filp->f_path.dentry);
48040 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
48041 + goto out;
48042 + if (write && !capable(CAP_SYS_ADMIN))
48043 + goto out;
48044 +#endif
48045 +
48046 /* careful: calling conventions are nasty here */
48047 res = count;
48048 error = table->proc_handler(table, write, buf, &res, ppos);
48049 @@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48050 return -ENOMEM;
48051 } else {
48052 d_set_d_op(child, &proc_sys_dentry_operations);
48053 +
48054 + gr_handle_proc_create(child, inode);
48055 +
48056 d_add(child, inode);
48057 }
48058 } else {
48059 @@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48060 if ((*pos)++ < file->f_pos)
48061 return 0;
48062
48063 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
48064 + return 0;
48065 +
48066 if (unlikely(S_ISLNK(table->mode)))
48067 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
48068 else
48069 @@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48070 if (IS_ERR(head))
48071 return PTR_ERR(head);
48072
48073 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
48074 + return -ENOENT;
48075 +
48076 generic_fillattr(inode, stat);
48077 if (table)
48078 stat->mode = (stat->mode & S_IFMT) | table->mode;
48079 @@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48080 .llseek = generic_file_llseek,
48081 };
48082
48083 -static const struct inode_operations proc_sys_inode_operations = {
48084 +const struct inode_operations proc_sys_inode_operations = {
48085 .permission = proc_sys_permission,
48086 .setattr = proc_sys_setattr,
48087 .getattr = proc_sys_getattr,
48088 };
48089
48090 -static const struct inode_operations proc_sys_dir_operations = {
48091 +const struct inode_operations proc_sys_dir_operations = {
48092 .lookup = proc_sys_lookup,
48093 .permission = proc_sys_permission,
48094 .setattr = proc_sys_setattr,
48095 diff --git a/fs/proc/root.c b/fs/proc/root.c
48096 index eed44bf..abeb499 100644
48097 --- a/fs/proc/root.c
48098 +++ b/fs/proc/root.c
48099 @@ -188,7 +188,15 @@ void __init proc_root_init(void)
48100 #ifdef CONFIG_PROC_DEVICETREE
48101 proc_device_tree_init();
48102 #endif
48103 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48104 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48105 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48106 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48107 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48108 +#endif
48109 +#else
48110 proc_mkdir("bus", NULL);
48111 +#endif
48112 proc_sys_init();
48113 }
48114
48115 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48116 index 7faaf2a..096c28b 100644
48117 --- a/fs/proc/task_mmu.c
48118 +++ b/fs/proc/task_mmu.c
48119 @@ -11,12 +11,19 @@
48120 #include <linux/rmap.h>
48121 #include <linux/swap.h>
48122 #include <linux/swapops.h>
48123 +#include <linux/grsecurity.h>
48124
48125 #include <asm/elf.h>
48126 #include <asm/uaccess.h>
48127 #include <asm/tlbflush.h>
48128 #include "internal.h"
48129
48130 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48131 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48132 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48133 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48134 +#endif
48135 +
48136 void task_mem(struct seq_file *m, struct mm_struct *mm)
48137 {
48138 unsigned long data, text, lib, swap;
48139 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48140 "VmExe:\t%8lu kB\n"
48141 "VmLib:\t%8lu kB\n"
48142 "VmPTE:\t%8lu kB\n"
48143 - "VmSwap:\t%8lu kB\n",
48144 - hiwater_vm << (PAGE_SHIFT-10),
48145 + "VmSwap:\t%8lu kB\n"
48146 +
48147 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48148 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48149 +#endif
48150 +
48151 + ,hiwater_vm << (PAGE_SHIFT-10),
48152 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48153 mm->locked_vm << (PAGE_SHIFT-10),
48154 mm->pinned_vm << (PAGE_SHIFT-10),
48155 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48156 data << (PAGE_SHIFT-10),
48157 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48158 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48159 - swap << (PAGE_SHIFT-10));
48160 + swap << (PAGE_SHIFT-10)
48161 +
48162 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48163 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48164 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
48165 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
48166 +#else
48167 + , mm->context.user_cs_base
48168 + , mm->context.user_cs_limit
48169 +#endif
48170 +#endif
48171 +
48172 + );
48173 }
48174
48175 unsigned long task_vsize(struct mm_struct *mm)
48176 @@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48177 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48178 }
48179
48180 - /* We don't show the stack guard page in /proc/maps */
48181 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48182 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48183 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48184 +#else
48185 start = vma->vm_start;
48186 - if (stack_guard_page_start(vma, start))
48187 - start += PAGE_SIZE;
48188 end = vma->vm_end;
48189 - if (stack_guard_page_end(vma, end))
48190 - end -= PAGE_SIZE;
48191 +#endif
48192
48193 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48194 start,
48195 @@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48196 flags & VM_WRITE ? 'w' : '-',
48197 flags & VM_EXEC ? 'x' : '-',
48198 flags & VM_MAYSHARE ? 's' : 'p',
48199 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48200 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48201 +#else
48202 pgoff,
48203 +#endif
48204 MAJOR(dev), MINOR(dev), ino, &len);
48205
48206 /*
48207 @@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48208 */
48209 if (file) {
48210 pad_len_spaces(m, len);
48211 - seq_path(m, &file->f_path, "\n");
48212 + seq_path(m, &file->f_path, "\n\\");
48213 goto done;
48214 }
48215
48216 @@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48217 * Thread stack in /proc/PID/task/TID/maps or
48218 * the main process stack.
48219 */
48220 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
48221 - vma->vm_end >= mm->start_stack)) {
48222 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48223 + (vma->vm_start <= mm->start_stack &&
48224 + vma->vm_end >= mm->start_stack)) {
48225 name = "[stack]";
48226 } else {
48227 /* Thread stack in /proc/PID/maps */
48228 @@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
48229 struct proc_maps_private *priv = m->private;
48230 struct task_struct *task = priv->task;
48231
48232 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48233 + if (current->exec_id != m->exec_id) {
48234 + gr_log_badprocpid("maps");
48235 + return 0;
48236 + }
48237 +#endif
48238 +
48239 show_map_vma(m, vma, is_pid);
48240
48241 if (m->count < m->size) /* vma is copied successfully */
48242 @@ -482,12 +518,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48243 .private = &mss,
48244 };
48245
48246 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48247 + if (current->exec_id != m->exec_id) {
48248 + gr_log_badprocpid("smaps");
48249 + return 0;
48250 + }
48251 +#endif
48252 memset(&mss, 0, sizeof mss);
48253 - mss.vma = vma;
48254 - /* mmap_sem is held in m_start */
48255 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48256 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48257 -
48258 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48259 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48260 +#endif
48261 + mss.vma = vma;
48262 + /* mmap_sem is held in m_start */
48263 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48264 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48265 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48266 + }
48267 +#endif
48268 show_map_vma(m, vma, is_pid);
48269
48270 seq_printf(m,
48271 @@ -505,7 +552,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48272 "KernelPageSize: %8lu kB\n"
48273 "MMUPageSize: %8lu kB\n"
48274 "Locked: %8lu kB\n",
48275 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48276 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48277 +#else
48278 (vma->vm_end - vma->vm_start) >> 10,
48279 +#endif
48280 mss.resident >> 10,
48281 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48282 mss.shared_clean >> 10,
48283 @@ -1138,6 +1189,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48284 int n;
48285 char buffer[50];
48286
48287 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48288 + if (current->exec_id != m->exec_id) {
48289 + gr_log_badprocpid("numa_maps");
48290 + return 0;
48291 + }
48292 +#endif
48293 +
48294 if (!mm)
48295 return 0;
48296
48297 @@ -1155,11 +1213,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48298 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48299 mpol_cond_put(pol);
48300
48301 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48302 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48303 +#else
48304 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48305 +#endif
48306
48307 if (file) {
48308 seq_printf(m, " file=");
48309 - seq_path(m, &file->f_path, "\n\t= ");
48310 + seq_path(m, &file->f_path, "\n\t\\= ");
48311 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48312 seq_printf(m, " heap");
48313 } else {
48314 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48315 index 74fe164..899e77b 100644
48316 --- a/fs/proc/task_nommu.c
48317 +++ b/fs/proc/task_nommu.c
48318 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48319 else
48320 bytes += kobjsize(mm);
48321
48322 - if (current->fs && current->fs->users > 1)
48323 + if (current->fs && atomic_read(&current->fs->users) > 1)
48324 sbytes += kobjsize(current->fs);
48325 else
48326 bytes += kobjsize(current->fs);
48327 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
48328
48329 if (file) {
48330 pad_len_spaces(m, len);
48331 - seq_path(m, &file->f_path, "");
48332 + seq_path(m, &file->f_path, "\n\\");
48333 } else if (mm) {
48334 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
48335
48336 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48337 index d67908b..d13f6a6 100644
48338 --- a/fs/quota/netlink.c
48339 +++ b/fs/quota/netlink.c
48340 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48341 void quota_send_warning(short type, unsigned int id, dev_t dev,
48342 const char warntype)
48343 {
48344 - static atomic_t seq;
48345 + static atomic_unchecked_t seq;
48346 struct sk_buff *skb;
48347 void *msg_head;
48348 int ret;
48349 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48350 "VFS: Not enough memory to send quota warning.\n");
48351 return;
48352 }
48353 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48354 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48355 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48356 if (!msg_head) {
48357 printk(KERN_ERR
48358 diff --git a/fs/readdir.c b/fs/readdir.c
48359 index cc0a822..43cb195 100644
48360 --- a/fs/readdir.c
48361 +++ b/fs/readdir.c
48362 @@ -17,6 +17,7 @@
48363 #include <linux/security.h>
48364 #include <linux/syscalls.h>
48365 #include <linux/unistd.h>
48366 +#include <linux/namei.h>
48367
48368 #include <asm/uaccess.h>
48369
48370 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48371
48372 struct readdir_callback {
48373 struct old_linux_dirent __user * dirent;
48374 + struct file * file;
48375 int result;
48376 };
48377
48378 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48379 buf->result = -EOVERFLOW;
48380 return -EOVERFLOW;
48381 }
48382 +
48383 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48384 + return 0;
48385 +
48386 buf->result++;
48387 dirent = buf->dirent;
48388 if (!access_ok(VERIFY_WRITE, dirent,
48389 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48390
48391 buf.result = 0;
48392 buf.dirent = dirent;
48393 + buf.file = file;
48394
48395 error = vfs_readdir(file, fillonedir, &buf);
48396 if (buf.result)
48397 @@ -142,6 +149,7 @@ struct linux_dirent {
48398 struct getdents_callback {
48399 struct linux_dirent __user * current_dir;
48400 struct linux_dirent __user * previous;
48401 + struct file * file;
48402 int count;
48403 int error;
48404 };
48405 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48406 buf->error = -EOVERFLOW;
48407 return -EOVERFLOW;
48408 }
48409 +
48410 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48411 + return 0;
48412 +
48413 dirent = buf->previous;
48414 if (dirent) {
48415 if (__put_user(offset, &dirent->d_off))
48416 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48417 buf.previous = NULL;
48418 buf.count = count;
48419 buf.error = 0;
48420 + buf.file = file;
48421
48422 error = vfs_readdir(file, filldir, &buf);
48423 if (error >= 0)
48424 @@ -229,6 +242,7 @@ out:
48425 struct getdents_callback64 {
48426 struct linux_dirent64 __user * current_dir;
48427 struct linux_dirent64 __user * previous;
48428 + struct file *file;
48429 int count;
48430 int error;
48431 };
48432 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48433 buf->error = -EINVAL; /* only used if we fail.. */
48434 if (reclen > buf->count)
48435 return -EINVAL;
48436 +
48437 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48438 + return 0;
48439 +
48440 dirent = buf->previous;
48441 if (dirent) {
48442 if (__put_user(offset, &dirent->d_off))
48443 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48444
48445 buf.current_dir = dirent;
48446 buf.previous = NULL;
48447 + buf.file = file;
48448 buf.count = count;
48449 buf.error = 0;
48450
48451 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48452 error = buf.error;
48453 lastdirent = buf.previous;
48454 if (lastdirent) {
48455 - typeof(lastdirent->d_off) d_off = file->f_pos;
48456 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48457 if (__put_user(d_off, &lastdirent->d_off))
48458 error = -EFAULT;
48459 else
48460 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48461 index 2b7882b..1c5ef48 100644
48462 --- a/fs/reiserfs/do_balan.c
48463 +++ b/fs/reiserfs/do_balan.c
48464 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48465 return;
48466 }
48467
48468 - atomic_inc(&(fs_generation(tb->tb_sb)));
48469 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48470 do_balance_starts(tb);
48471
48472 /* balance leaf returns 0 except if combining L R and S into
48473 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48474 index 2c1ade6..8c59d8d 100644
48475 --- a/fs/reiserfs/procfs.c
48476 +++ b/fs/reiserfs/procfs.c
48477 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48478 "SMALL_TAILS " : "NO_TAILS ",
48479 replay_only(sb) ? "REPLAY_ONLY " : "",
48480 convert_reiserfs(sb) ? "CONV " : "",
48481 - atomic_read(&r->s_generation_counter),
48482 + atomic_read_unchecked(&r->s_generation_counter),
48483 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48484 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48485 SF(s_good_search_by_key_reada), SF(s_bmaps),
48486 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
48487 index a59d271..e12d1cf 100644
48488 --- a/fs/reiserfs/reiserfs.h
48489 +++ b/fs/reiserfs/reiserfs.h
48490 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
48491 /* Comment? -Hans */
48492 wait_queue_head_t s_wait;
48493 /* To be obsoleted soon by per buffer seals.. -Hans */
48494 - atomic_t s_generation_counter; // increased by one every time the
48495 + atomic_unchecked_t s_generation_counter; // increased by one every time the
48496 // tree gets re-balanced
48497 unsigned long s_properties; /* File system properties. Currently holds
48498 on-disk FS format */
48499 @@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
48500 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
48501
48502 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
48503 -#define get_generation(s) atomic_read (&fs_generation(s))
48504 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
48505 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
48506 #define __fs_changed(gen,s) (gen != get_generation (s))
48507 #define fs_changed(gen,s) \
48508 diff --git a/fs/select.c b/fs/select.c
48509 index 17d33d0..da0bf5c 100644
48510 --- a/fs/select.c
48511 +++ b/fs/select.c
48512 @@ -20,6 +20,7 @@
48513 #include <linux/export.h>
48514 #include <linux/slab.h>
48515 #include <linux/poll.h>
48516 +#include <linux/security.h>
48517 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48518 #include <linux/file.h>
48519 #include <linux/fdtable.h>
48520 @@ -833,6 +834,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48521 struct poll_list *walk = head;
48522 unsigned long todo = nfds;
48523
48524 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48525 if (nfds > rlimit(RLIMIT_NOFILE))
48526 return -EINVAL;
48527
48528 diff --git a/fs/seq_file.c b/fs/seq_file.c
48529 index 0cbd049..cab1127 100644
48530 --- a/fs/seq_file.c
48531 +++ b/fs/seq_file.c
48532 @@ -9,6 +9,7 @@
48533 #include <linux/export.h>
48534 #include <linux/seq_file.h>
48535 #include <linux/slab.h>
48536 +#include <linux/sched.h>
48537
48538 #include <asm/uaccess.h>
48539 #include <asm/page.h>
48540 @@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48541 memset(p, 0, sizeof(*p));
48542 mutex_init(&p->lock);
48543 p->op = op;
48544 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48545 + p->exec_id = current->exec_id;
48546 +#endif
48547
48548 /*
48549 * Wrappers around seq_open(e.g. swaps_open) need to be
48550 @@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
48551 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48552 void *data)
48553 {
48554 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48555 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48556 int res = -ENOMEM;
48557
48558 if (op) {
48559 diff --git a/fs/splice.c b/fs/splice.c
48560 index f847684..156619e 100644
48561 --- a/fs/splice.c
48562 +++ b/fs/splice.c
48563 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48564 pipe_lock(pipe);
48565
48566 for (;;) {
48567 - if (!pipe->readers) {
48568 + if (!atomic_read(&pipe->readers)) {
48569 send_sig(SIGPIPE, current, 0);
48570 if (!ret)
48571 ret = -EPIPE;
48572 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48573 do_wakeup = 0;
48574 }
48575
48576 - pipe->waiting_writers++;
48577 + atomic_inc(&pipe->waiting_writers);
48578 pipe_wait(pipe);
48579 - pipe->waiting_writers--;
48580 + atomic_dec(&pipe->waiting_writers);
48581 }
48582
48583 pipe_unlock(pipe);
48584 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48585 old_fs = get_fs();
48586 set_fs(get_ds());
48587 /* The cast to a user pointer is valid due to the set_fs() */
48588 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48589 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48590 set_fs(old_fs);
48591
48592 return res;
48593 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48594 old_fs = get_fs();
48595 set_fs(get_ds());
48596 /* The cast to a user pointer is valid due to the set_fs() */
48597 - res = vfs_write(file, (const char __user *)buf, count, &pos);
48598 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48599 set_fs(old_fs);
48600
48601 return res;
48602 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48603 goto err;
48604
48605 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48606 - vec[i].iov_base = (void __user *) page_address(page);
48607 + vec[i].iov_base = (void __force_user *) page_address(page);
48608 vec[i].iov_len = this_len;
48609 spd.pages[i] = page;
48610 spd.nr_pages++;
48611 @@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48612 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48613 {
48614 while (!pipe->nrbufs) {
48615 - if (!pipe->writers)
48616 + if (!atomic_read(&pipe->writers))
48617 return 0;
48618
48619 - if (!pipe->waiting_writers && sd->num_spliced)
48620 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48621 return 0;
48622
48623 if (sd->flags & SPLICE_F_NONBLOCK)
48624 @@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48625 * out of the pipe right after the splice_to_pipe(). So set
48626 * PIPE_READERS appropriately.
48627 */
48628 - pipe->readers = 1;
48629 + atomic_set(&pipe->readers, 1);
48630
48631 current->splice_pipe = pipe;
48632 }
48633 @@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48634 ret = -ERESTARTSYS;
48635 break;
48636 }
48637 - if (!pipe->writers)
48638 + if (!atomic_read(&pipe->writers))
48639 break;
48640 - if (!pipe->waiting_writers) {
48641 + if (!atomic_read(&pipe->waiting_writers)) {
48642 if (flags & SPLICE_F_NONBLOCK) {
48643 ret = -EAGAIN;
48644 break;
48645 @@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48646 pipe_lock(pipe);
48647
48648 while (pipe->nrbufs >= pipe->buffers) {
48649 - if (!pipe->readers) {
48650 + if (!atomic_read(&pipe->readers)) {
48651 send_sig(SIGPIPE, current, 0);
48652 ret = -EPIPE;
48653 break;
48654 @@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48655 ret = -ERESTARTSYS;
48656 break;
48657 }
48658 - pipe->waiting_writers++;
48659 + atomic_inc(&pipe->waiting_writers);
48660 pipe_wait(pipe);
48661 - pipe->waiting_writers--;
48662 + atomic_dec(&pipe->waiting_writers);
48663 }
48664
48665 pipe_unlock(pipe);
48666 @@ -1818,14 +1818,14 @@ retry:
48667 pipe_double_lock(ipipe, opipe);
48668
48669 do {
48670 - if (!opipe->readers) {
48671 + if (!atomic_read(&opipe->readers)) {
48672 send_sig(SIGPIPE, current, 0);
48673 if (!ret)
48674 ret = -EPIPE;
48675 break;
48676 }
48677
48678 - if (!ipipe->nrbufs && !ipipe->writers)
48679 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48680 break;
48681
48682 /*
48683 @@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48684 pipe_double_lock(ipipe, opipe);
48685
48686 do {
48687 - if (!opipe->readers) {
48688 + if (!atomic_read(&opipe->readers)) {
48689 send_sig(SIGPIPE, current, 0);
48690 if (!ret)
48691 ret = -EPIPE;
48692 @@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48693 * return EAGAIN if we have the potential of some data in the
48694 * future, otherwise just return 0
48695 */
48696 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48697 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48698 ret = -EAGAIN;
48699
48700 pipe_unlock(ipipe);
48701 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48702 index 35a36d3..23424b2 100644
48703 --- a/fs/sysfs/dir.c
48704 +++ b/fs/sysfs/dir.c
48705 @@ -657,6 +657,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48706 struct sysfs_dirent *sd;
48707 int rc;
48708
48709 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48710 + const char *parent_name = parent_sd->s_name;
48711 +
48712 + mode = S_IFDIR | S_IRWXU;
48713 +
48714 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48715 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48716 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48717 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48718 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48719 +#endif
48720 +
48721 /* allocate */
48722 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48723 if (!sd)
48724 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48725 index 00012e3..8392349 100644
48726 --- a/fs/sysfs/file.c
48727 +++ b/fs/sysfs/file.c
48728 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48729
48730 struct sysfs_open_dirent {
48731 atomic_t refcnt;
48732 - atomic_t event;
48733 + atomic_unchecked_t event;
48734 wait_queue_head_t poll;
48735 struct list_head buffers; /* goes through sysfs_buffer.list */
48736 };
48737 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48738 if (!sysfs_get_active(attr_sd))
48739 return -ENODEV;
48740
48741 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48742 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48743 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48744
48745 sysfs_put_active(attr_sd);
48746 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48747 return -ENOMEM;
48748
48749 atomic_set(&new_od->refcnt, 0);
48750 - atomic_set(&new_od->event, 1);
48751 + atomic_set_unchecked(&new_od->event, 1);
48752 init_waitqueue_head(&new_od->poll);
48753 INIT_LIST_HEAD(&new_od->buffers);
48754 goto retry;
48755 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48756
48757 sysfs_put_active(attr_sd);
48758
48759 - if (buffer->event != atomic_read(&od->event))
48760 + if (buffer->event != atomic_read_unchecked(&od->event))
48761 goto trigger;
48762
48763 return DEFAULT_POLLMASK;
48764 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48765
48766 od = sd->s_attr.open;
48767 if (od) {
48768 - atomic_inc(&od->event);
48769 + atomic_inc_unchecked(&od->event);
48770 wake_up_interruptible(&od->poll);
48771 }
48772
48773 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48774 index a7ac78f..02158e1 100644
48775 --- a/fs/sysfs/symlink.c
48776 +++ b/fs/sysfs/symlink.c
48777 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48778
48779 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48780 {
48781 - char *page = nd_get_link(nd);
48782 + const char *page = nd_get_link(nd);
48783 if (!IS_ERR(page))
48784 free_page((unsigned long)page);
48785 }
48786 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48787 index c175b4d..8f36a16 100644
48788 --- a/fs/udf/misc.c
48789 +++ b/fs/udf/misc.c
48790 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48791
48792 u8 udf_tag_checksum(const struct tag *t)
48793 {
48794 - u8 *data = (u8 *)t;
48795 + const u8 *data = (const u8 *)t;
48796 u8 checksum = 0;
48797 int i;
48798 for (i = 0; i < sizeof(struct tag); ++i)
48799 diff --git a/fs/utimes.c b/fs/utimes.c
48800 index ba653f3..06ea4b1 100644
48801 --- a/fs/utimes.c
48802 +++ b/fs/utimes.c
48803 @@ -1,6 +1,7 @@
48804 #include <linux/compiler.h>
48805 #include <linux/file.h>
48806 #include <linux/fs.h>
48807 +#include <linux/security.h>
48808 #include <linux/linkage.h>
48809 #include <linux/mount.h>
48810 #include <linux/namei.h>
48811 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48812 goto mnt_drop_write_and_out;
48813 }
48814 }
48815 +
48816 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48817 + error = -EACCES;
48818 + goto mnt_drop_write_and_out;
48819 + }
48820 +
48821 mutex_lock(&inode->i_mutex);
48822 error = notify_change(path->dentry, &newattrs);
48823 mutex_unlock(&inode->i_mutex);
48824 diff --git a/fs/xattr.c b/fs/xattr.c
48825 index 3c8c1cc..a83c398 100644
48826 --- a/fs/xattr.c
48827 +++ b/fs/xattr.c
48828 @@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48829 * Extended attribute SET operations
48830 */
48831 static long
48832 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
48833 +setxattr(struct path *path, const char __user *name, const void __user *value,
48834 size_t size, int flags)
48835 {
48836 int error;
48837 @@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48838 }
48839 }
48840
48841 - error = vfs_setxattr(d, kname, kvalue, size, flags);
48842 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48843 + error = -EACCES;
48844 + goto out;
48845 + }
48846 +
48847 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48848 out:
48849 if (vvalue)
48850 vfree(vvalue);
48851 @@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48852 return error;
48853 error = mnt_want_write(path.mnt);
48854 if (!error) {
48855 - error = setxattr(path.dentry, name, value, size, flags);
48856 + error = setxattr(&path, name, value, size, flags);
48857 mnt_drop_write(path.mnt);
48858 }
48859 path_put(&path);
48860 @@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48861 return error;
48862 error = mnt_want_write(path.mnt);
48863 if (!error) {
48864 - error = setxattr(path.dentry, name, value, size, flags);
48865 + error = setxattr(&path, name, value, size, flags);
48866 mnt_drop_write(path.mnt);
48867 }
48868 path_put(&path);
48869 @@ -400,17 +405,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48870 const void __user *,value, size_t, size, int, flags)
48871 {
48872 struct file *f;
48873 - struct dentry *dentry;
48874 int error = -EBADF;
48875
48876 f = fget(fd);
48877 if (!f)
48878 return error;
48879 - dentry = f->f_path.dentry;
48880 - audit_inode(NULL, dentry);
48881 + audit_inode(NULL, f->f_path.dentry);
48882 error = mnt_want_write_file(f);
48883 if (!error) {
48884 - error = setxattr(dentry, name, value, size, flags);
48885 + error = setxattr(&f->f_path, name, value, size, flags);
48886 mnt_drop_write_file(f);
48887 }
48888 fput(f);
48889 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
48890 index 69d06b0..c0996e5 100644
48891 --- a/fs/xattr_acl.c
48892 +++ b/fs/xattr_acl.c
48893 @@ -17,8 +17,8 @@
48894 struct posix_acl *
48895 posix_acl_from_xattr(const void *value, size_t size)
48896 {
48897 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
48898 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
48899 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
48900 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
48901 int count;
48902 struct posix_acl *acl;
48903 struct posix_acl_entry *acl_e;
48904 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
48905 index 85e7e32..5344e52 100644
48906 --- a/fs/xfs/xfs_bmap.c
48907 +++ b/fs/xfs/xfs_bmap.c
48908 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
48909 int nmap,
48910 int ret_nmap);
48911 #else
48912 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48913 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48914 #endif /* DEBUG */
48915
48916 STATIC int
48917 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
48918 index 79d05e8..e3e5861 100644
48919 --- a/fs/xfs/xfs_dir2_sf.c
48920 +++ b/fs/xfs/xfs_dir2_sf.c
48921 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48922 }
48923
48924 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48925 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48926 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48927 + char name[sfep->namelen];
48928 + memcpy(name, sfep->name, sfep->namelen);
48929 + if (filldir(dirent, name, sfep->namelen,
48930 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
48931 + *offset = off & 0x7fffffff;
48932 + return 0;
48933 + }
48934 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48935 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48936 *offset = off & 0x7fffffff;
48937 return 0;
48938 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
48939 index 91f8ff5..0ce68f9 100644
48940 --- a/fs/xfs/xfs_ioctl.c
48941 +++ b/fs/xfs/xfs_ioctl.c
48942 @@ -128,7 +128,7 @@ xfs_find_handle(
48943 }
48944
48945 error = -EFAULT;
48946 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48947 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48948 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48949 goto out_put;
48950
48951 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
48952 index 3011b87..1ab03e9 100644
48953 --- a/fs/xfs/xfs_iops.c
48954 +++ b/fs/xfs/xfs_iops.c
48955 @@ -397,7 +397,7 @@ xfs_vn_put_link(
48956 struct nameidata *nd,
48957 void *p)
48958 {
48959 - char *s = nd_get_link(nd);
48960 + const char *s = nd_get_link(nd);
48961
48962 if (!IS_ERR(s))
48963 kfree(s);
48964 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
48965 new file mode 100644
48966 index 0000000..2645296
48967 --- /dev/null
48968 +++ b/grsecurity/Kconfig
48969 @@ -0,0 +1,1079 @@
48970 +#
48971 +# grecurity configuration
48972 +#
48973 +
48974 +menu "Grsecurity"
48975 +
48976 +config GRKERNSEC
48977 + bool "Grsecurity"
48978 + select CRYPTO
48979 + select CRYPTO_SHA256
48980 + help
48981 + If you say Y here, you will be able to configure many features
48982 + that will enhance the security of your system. It is highly
48983 + recommended that you say Y here and read through the help
48984 + for each option so that you fully understand the features and
48985 + can evaluate their usefulness for your machine.
48986 +
48987 +choice
48988 + prompt "Security Level"
48989 + depends on GRKERNSEC
48990 + default GRKERNSEC_CUSTOM
48991 +
48992 +config GRKERNSEC_LOW
48993 + bool "Low"
48994 + select GRKERNSEC_LINK
48995 + select GRKERNSEC_FIFO
48996 + select GRKERNSEC_RANDNET
48997 + select GRKERNSEC_DMESG
48998 + select GRKERNSEC_CHROOT
48999 + select GRKERNSEC_CHROOT_CHDIR
49000 +
49001 + help
49002 + If you choose this option, several of the grsecurity options will
49003 + be enabled that will give you greater protection against a number
49004 + of attacks, while assuring that none of your software will have any
49005 + conflicts with the additional security measures. If you run a lot
49006 + of unusual software, or you are having problems with the higher
49007 + security levels, you should say Y here. With this option, the
49008 + following features are enabled:
49009 +
49010 + - Linking restrictions
49011 + - FIFO restrictions
49012 + - Restricted dmesg
49013 + - Enforced chdir("/") on chroot
49014 + - Runtime module disabling
49015 +
49016 +config GRKERNSEC_MEDIUM
49017 + bool "Medium"
49018 + select PAX
49019 + select PAX_EI_PAX
49020 + select PAX_PT_PAX_FLAGS
49021 + select PAX_HAVE_ACL_FLAGS
49022 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49023 + select GRKERNSEC_CHROOT
49024 + select GRKERNSEC_CHROOT_SYSCTL
49025 + select GRKERNSEC_LINK
49026 + select GRKERNSEC_FIFO
49027 + select GRKERNSEC_DMESG
49028 + select GRKERNSEC_RANDNET
49029 + select GRKERNSEC_FORKFAIL
49030 + select GRKERNSEC_TIME
49031 + select GRKERNSEC_SIGNAL
49032 + select GRKERNSEC_CHROOT
49033 + select GRKERNSEC_CHROOT_UNIX
49034 + select GRKERNSEC_CHROOT_MOUNT
49035 + select GRKERNSEC_CHROOT_PIVOT
49036 + select GRKERNSEC_CHROOT_DOUBLE
49037 + select GRKERNSEC_CHROOT_CHDIR
49038 + select GRKERNSEC_CHROOT_MKNOD
49039 + select GRKERNSEC_PROC
49040 + select GRKERNSEC_PROC_USERGROUP
49041 + select PAX_RANDUSTACK
49042 + select PAX_ASLR
49043 + select PAX_RANDMMAP
49044 + select PAX_REFCOUNT if (X86 || SPARC64)
49045 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49046 +
49047 + help
49048 + If you say Y here, several features in addition to those included
49049 + in the low additional security level will be enabled. These
49050 + features provide even more security to your system, though in rare
49051 + cases they may be incompatible with very old or poorly written
49052 + software. If you enable this option, make sure that your auth
49053 + service (identd) is running as gid 1001. With this option,
49054 + the following features (in addition to those provided in the
49055 + low additional security level) will be enabled:
49056 +
49057 + - Failed fork logging
49058 + - Time change logging
49059 + - Signal logging
49060 + - Deny mounts in chroot
49061 + - Deny double chrooting
49062 + - Deny sysctl writes in chroot
49063 + - Deny mknod in chroot
49064 + - Deny access to abstract AF_UNIX sockets out of chroot
49065 + - Deny pivot_root in chroot
49066 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49067 + - /proc restrictions with special GID set to 10 (usually wheel)
49068 + - Address Space Layout Randomization (ASLR)
49069 + - Prevent exploitation of most refcount overflows
49070 + - Bounds checking of copying between the kernel and userland
49071 +
49072 +config GRKERNSEC_HIGH
49073 + bool "High"
49074 + select GRKERNSEC_LINK
49075 + select GRKERNSEC_FIFO
49076 + select GRKERNSEC_DMESG
49077 + select GRKERNSEC_FORKFAIL
49078 + select GRKERNSEC_TIME
49079 + select GRKERNSEC_SIGNAL
49080 + select GRKERNSEC_CHROOT
49081 + select GRKERNSEC_CHROOT_SHMAT
49082 + select GRKERNSEC_CHROOT_UNIX
49083 + select GRKERNSEC_CHROOT_MOUNT
49084 + select GRKERNSEC_CHROOT_FCHDIR
49085 + select GRKERNSEC_CHROOT_PIVOT
49086 + select GRKERNSEC_CHROOT_DOUBLE
49087 + select GRKERNSEC_CHROOT_CHDIR
49088 + select GRKERNSEC_CHROOT_MKNOD
49089 + select GRKERNSEC_CHROOT_CAPS
49090 + select GRKERNSEC_CHROOT_SYSCTL
49091 + select GRKERNSEC_CHROOT_FINDTASK
49092 + select GRKERNSEC_SYSFS_RESTRICT
49093 + select GRKERNSEC_PROC
49094 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49095 + select GRKERNSEC_HIDESYM
49096 + select GRKERNSEC_BRUTE
49097 + select GRKERNSEC_PROC_USERGROUP
49098 + select GRKERNSEC_KMEM
49099 + select GRKERNSEC_RESLOG
49100 + select GRKERNSEC_RANDNET
49101 + select GRKERNSEC_PROC_ADD
49102 + select GRKERNSEC_CHROOT_CHMOD
49103 + select GRKERNSEC_CHROOT_NICE
49104 + select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
49105 + select GRKERNSEC_AUDIT_MOUNT
49106 + select GRKERNSEC_MODHARDEN if (MODULES)
49107 + select GRKERNSEC_HARDEN_PTRACE
49108 + select GRKERNSEC_PTRACE_READEXEC
49109 + select GRKERNSEC_VM86 if (X86_32)
49110 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49111 + select PAX
49112 + select PAX_RANDUSTACK
49113 + select PAX_ASLR
49114 + select PAX_RANDMMAP
49115 + select PAX_NOEXEC
49116 + select PAX_MPROTECT
49117 + select PAX_EI_PAX
49118 + select PAX_PT_PAX_FLAGS
49119 + select PAX_HAVE_ACL_FLAGS
49120 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49121 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49122 + select PAX_RANDKSTACK if (X86_TSC && X86)
49123 + select PAX_SEGMEXEC if (X86_32)
49124 + select PAX_PAGEEXEC
49125 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49126 + select PAX_EMUTRAMP if (PARISC)
49127 + select PAX_EMUSIGRT if (PARISC)
49128 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49129 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49130 + select PAX_REFCOUNT if (X86 || SPARC64)
49131 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49132 + help
49133 + If you say Y here, many of the features of grsecurity will be
49134 + enabled, which will protect you against many kinds of attacks
49135 + against your system. The heightened security comes at a cost
49136 + of an increased chance of incompatibilities with rare software
49137 + on your machine. Since this security level enables PaX, you should
49138 + view <http://pax.grsecurity.net> and read about the PaX
49139 + project. While you are there, download chpax and run it on
49140 + binaries that cause problems with PaX. Also remember that
49141 + since the /proc restrictions are enabled, you must run your
49142 + identd as gid 1001. This security level enables the following
49143 + features in addition to those listed in the low and medium
49144 + security levels:
49145 +
49146 + - Additional /proc restrictions
49147 + - Chmod restrictions in chroot
49148 + - No signals, ptrace, or viewing of processes outside of chroot
49149 + - Capability restrictions in chroot
49150 + - Deny fchdir out of chroot
49151 + - Priority restrictions in chroot
49152 + - Segmentation-based implementation of PaX
49153 + - Mprotect restrictions
49154 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49155 + - Kernel stack randomization
49156 + - Mount/unmount/remount logging
49157 + - Kernel symbol hiding
49158 + - Hardening of module auto-loading
49159 + - Ptrace restrictions
49160 + - Restricted vm86 mode
49161 + - Restricted sysfs/debugfs
49162 + - Active kernel exploit response
49163 +
49164 +config GRKERNSEC_CUSTOM
49165 + bool "Custom"
49166 + help
49167 + If you say Y here, you will be able to configure every grsecurity
49168 + option, which allows you to enable many more features that aren't
49169 + covered in the basic security levels. These additional features
49170 + include TPE, socket restrictions, and the sysctl system for
49171 + grsecurity. It is advised that you read through the help for
49172 + each option to determine its usefulness in your situation.
49173 +
49174 +endchoice
49175 +
49176 +menu "Memory Protections"
49177 +depends on GRKERNSEC
49178 +
49179 +config GRKERNSEC_KMEM
49180 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49181 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49182 + help
49183 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49184 + be written to or read from to modify or leak the contents of the running
49185 + kernel. /dev/port will also not be allowed to be opened. If you have module
49186 + support disabled, enabling this will close up four ways that are
49187 + currently used to insert malicious code into the running kernel.
49188 + Even with all these features enabled, we still highly recommend that
49189 + you use the RBAC system, as it is still possible for an attacker to
49190 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49191 + If you are not using XFree86, you may be able to stop this additional
49192 + case by enabling the 'Disable privileged I/O' option. Though nothing
49193 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49194 + but only to video memory, which is the only writing we allow in this
49195 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49196 + not be allowed to mprotect it with PROT_WRITE later.
49197 + It is highly recommended that you say Y here if you meet all the
49198 + conditions above.
49199 +
49200 +config GRKERNSEC_VM86
49201 + bool "Restrict VM86 mode"
49202 + depends on X86_32
49203 +
49204 + help
49205 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49206 + make use of a special execution mode on 32bit x86 processors called
49207 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49208 + video cards and will still work with this option enabled. The purpose
49209 + of the option is to prevent exploitation of emulation errors in
49210 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49211 + Nearly all users should be able to enable this option.
49212 +
49213 +config GRKERNSEC_IO
49214 + bool "Disable privileged I/O"
49215 + depends on X86
49216 + select RTC_CLASS
49217 + select RTC_INTF_DEV
49218 + select RTC_DRV_CMOS
49219 +
49220 + help
49221 + If you say Y here, all ioperm and iopl calls will return an error.
49222 + Ioperm and iopl can be used to modify the running kernel.
49223 + Unfortunately, some programs need this access to operate properly,
49224 + the most notable of which are XFree86 and hwclock. hwclock can be
49225 + remedied by having RTC support in the kernel, so real-time
49226 + clock support is enabled if this option is enabled, to ensure
49227 + that hwclock operates correctly. XFree86 still will not
49228 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49229 + IF YOU USE XFree86. If you use XFree86 and you still want to
49230 + protect your kernel against modification, use the RBAC system.
49231 +
49232 +config GRKERNSEC_PROC_MEMMAP
49233 + bool "Harden ASLR against information leaks and entropy reduction"
49234 + default y if (PAX_NOEXEC || PAX_ASLR)
49235 + depends on PAX_NOEXEC || PAX_ASLR
49236 + help
49237 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49238 + give no information about the addresses of its mappings if
49239 + PaX features that rely on random addresses are enabled on the task.
49240 + In addition to sanitizing this information and disabling other
49241 + dangerous sources of information, this option causes reads of sensitive
49242 + /proc/<pid> entries where the file descriptor was opened in a different
49243 + task than the one performing the read. Such attempts are logged.
49244 + This option also limits argv/env strings for suid/sgid binaries
49245 + to 512KB to prevent a complete exhaustion of the stack entropy provided
49246 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49247 + binaries to prevent alternative mmap layouts from being abused.
49248 +
49249 + If you use PaX it is essential that you say Y here as it closes up
49250 + several holes that make full ASLR useless locally.
49251 +
49252 +config GRKERNSEC_BRUTE
49253 + bool "Deter exploit bruteforcing"
49254 + help
49255 + If you say Y here, attempts to bruteforce exploits against forking
49256 + daemons such as apache or sshd, as well as against suid/sgid binaries
49257 + will be deterred. When a child of a forking daemon is killed by PaX
49258 + or crashes due to an illegal instruction or other suspicious signal,
49259 + the parent process will be delayed 30 seconds upon every subsequent
49260 + fork until the administrator is able to assess the situation and
49261 + restart the daemon.
49262 + In the suid/sgid case, the attempt is logged, the user has all their
49263 + processes terminated, and they are prevented from executing any further
49264 + processes for 15 minutes.
49265 + It is recommended that you also enable signal logging in the auditing
49266 + section so that logs are generated when a process triggers a suspicious
49267 + signal.
49268 + If the sysctl option is enabled, a sysctl option with name
49269 + "deter_bruteforce" is created.
49270 +
49271 +
49272 +config GRKERNSEC_MODHARDEN
49273 + bool "Harden module auto-loading"
49274 + depends on MODULES
49275 + help
49276 + If you say Y here, module auto-loading in response to use of some
49277 + feature implemented by an unloaded module will be restricted to
49278 + root users. Enabling this option helps defend against attacks
49279 + by unprivileged users who abuse the auto-loading behavior to
49280 + cause a vulnerable module to load that is then exploited.
49281 +
49282 + If this option prevents a legitimate use of auto-loading for a
49283 + non-root user, the administrator can execute modprobe manually
49284 + with the exact name of the module mentioned in the alert log.
49285 + Alternatively, the administrator can add the module to the list
49286 + of modules loaded at boot by modifying init scripts.
49287 +
49288 + Modification of init scripts will most likely be needed on
49289 + Ubuntu servers with encrypted home directory support enabled,
49290 + as the first non-root user logging in will cause the ecb(aes),
49291 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49292 +
49293 +config GRKERNSEC_HIDESYM
49294 + bool "Hide kernel symbols"
49295 + help
49296 + If you say Y here, getting information on loaded modules, and
49297 + displaying all kernel symbols through a syscall will be restricted
49298 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49299 + /proc/kallsyms will be restricted to the root user. The RBAC
49300 + system can hide that entry even from root.
49301 +
49302 + This option also prevents leaking of kernel addresses through
49303 + several /proc entries.
49304 +
49305 + Note that this option is only effective provided the following
49306 + conditions are met:
49307 + 1) The kernel using grsecurity is not precompiled by some distribution
49308 + 2) You have also enabled GRKERNSEC_DMESG
49309 + 3) You are using the RBAC system and hiding other files such as your
49310 + kernel image and System.map. Alternatively, enabling this option
49311 + causes the permissions on /boot, /lib/modules, and the kernel
49312 + source directory to change at compile time to prevent
49313 + reading by non-root users.
49314 + If the above conditions are met, this option will aid in providing a
49315 + useful protection against local kernel exploitation of overflows
49316 + and arbitrary read/write vulnerabilities.
49317 +
49318 +config GRKERNSEC_KERN_LOCKOUT
49319 + bool "Active kernel exploit response"
49320 + depends on X86 || ARM || PPC || SPARC
49321 + help
49322 + If you say Y here, when a PaX alert is triggered due to suspicious
49323 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49324 + or an OOPs occurs due to bad memory accesses, instead of just
49325 + terminating the offending process (and potentially allowing
49326 + a subsequent exploit from the same user), we will take one of two
49327 + actions:
49328 + If the user was root, we will panic the system
49329 + If the user was non-root, we will log the attempt, terminate
49330 + all processes owned by the user, then prevent them from creating
49331 + any new processes until the system is restarted
49332 + This deters repeated kernel exploitation/bruteforcing attempts
49333 + and is useful for later forensics.
49334 +
49335 +endmenu
49336 +menu "Role Based Access Control Options"
49337 +depends on GRKERNSEC
49338 +
49339 +config GRKERNSEC_RBAC_DEBUG
49340 + bool
49341 +
49342 +config GRKERNSEC_NO_RBAC
49343 + bool "Disable RBAC system"
49344 + help
49345 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49346 + preventing the RBAC system from being enabled. You should only say Y
49347 + here if you have no intention of using the RBAC system, so as to prevent
49348 + an attacker with root access from misusing the RBAC system to hide files
49349 + and processes when loadable module support and /dev/[k]mem have been
49350 + locked down.
49351 +
49352 +config GRKERNSEC_ACL_HIDEKERN
49353 + bool "Hide kernel processes"
49354 + help
49355 + If you say Y here, all kernel threads will be hidden to all
49356 + processes but those whose subject has the "view hidden processes"
49357 + flag.
49358 +
49359 +config GRKERNSEC_ACL_MAXTRIES
49360 + int "Maximum tries before password lockout"
49361 + default 3
49362 + help
49363 + This option enforces the maximum number of times a user can attempt
49364 + to authorize themselves with the grsecurity RBAC system before being
49365 + denied the ability to attempt authorization again for a specified time.
49366 + The lower the number, the harder it will be to brute-force a password.
49367 +
49368 +config GRKERNSEC_ACL_TIMEOUT
49369 + int "Time to wait after max password tries, in seconds"
49370 + default 30
49371 + help
49372 + This option specifies the time the user must wait after attempting to
49373 + authorize to the RBAC system with the maximum number of invalid
49374 + passwords. The higher the number, the harder it will be to brute-force
49375 + a password.
49376 +
49377 +endmenu
49378 +menu "Filesystem Protections"
49379 +depends on GRKERNSEC
49380 +
49381 +config GRKERNSEC_PROC
49382 + bool "Proc restrictions"
49383 + help
49384 + If you say Y here, the permissions of the /proc filesystem
49385 + will be altered to enhance system security and privacy. You MUST
49386 + choose either a user only restriction or a user and group restriction.
49387 + Depending upon the option you choose, you can either restrict users to
49388 + see only the processes they themselves run, or choose a group that can
49389 + view all processes and files normally restricted to root if you choose
49390 + the "restrict to user only" option. NOTE: If you're running identd or
49391 + ntpd as a non-root user, you will have to run it as the group you
49392 + specify here.
49393 +
49394 +config GRKERNSEC_PROC_USER
49395 + bool "Restrict /proc to user only"
49396 + depends on GRKERNSEC_PROC
49397 + help
49398 + If you say Y here, non-root users will only be able to view their own
49399 + processes, and restricts them from viewing network-related information,
49400 + and viewing kernel symbol and module information.
49401 +
49402 +config GRKERNSEC_PROC_USERGROUP
49403 + bool "Allow special group"
49404 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49405 + help
49406 + If you say Y here, you will be able to select a group that will be
49407 + able to view all processes and network-related information. If you've
49408 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49409 + remain hidden. This option is useful if you want to run identd as
49410 + a non-root user.
49411 +
49412 +config GRKERNSEC_PROC_GID
49413 + int "GID for special group"
49414 + depends on GRKERNSEC_PROC_USERGROUP
49415 + default 1001
49416 +
49417 +config GRKERNSEC_PROC_ADD
49418 + bool "Additional restrictions"
49419 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49420 + help
49421 + If you say Y here, additional restrictions will be placed on
49422 + /proc that keep normal users from viewing device information and
49423 + slabinfo information that could be useful for exploits.
49424 +
49425 +config GRKERNSEC_LINK
49426 + bool "Linking restrictions"
49427 + help
49428 + If you say Y here, /tmp race exploits will be prevented, since users
49429 + will no longer be able to follow symlinks owned by other users in
49430 + world-writable +t directories (e.g. /tmp), unless the owner of the
49431 + symlink is the owner of the directory. users will also not be
49432 + able to hardlink to files they do not own. If the sysctl option is
49433 + enabled, a sysctl option with name "linking_restrictions" is created.
49434 +
49435 +config GRKERNSEC_FIFO
49436 + bool "FIFO restrictions"
49437 + help
49438 + If you say Y here, users will not be able to write to FIFOs they don't
49439 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49440 + the FIFO is the same owner of the directory it's held in. If the sysctl
49441 + option is enabled, a sysctl option with name "fifo_restrictions" is
49442 + created.
49443 +
49444 +config GRKERNSEC_SYSFS_RESTRICT
49445 + bool "Sysfs/debugfs restriction"
49446 + depends on SYSFS
49447 + help
49448 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49449 + any filesystem normally mounted under it (e.g. debugfs) will be
49450 + mostly accessible only by root. These filesystems generally provide access
49451 + to hardware and debug information that isn't appropriate for unprivileged
49452 + users of the system. Sysfs and debugfs have also become a large source
49453 + of new vulnerabilities, ranging from infoleaks to local compromise.
49454 + There has been very little oversight with an eye toward security involved
49455 + in adding new exporters of information to these filesystems, so their
49456 + use is discouraged.
49457 + For reasons of compatibility, a few directories have been whitelisted
49458 + for access by non-root users:
49459 + /sys/fs/selinux
49460 + /sys/fs/fuse
49461 + /sys/devices/system/cpu
49462 +
49463 +config GRKERNSEC_ROFS
49464 + bool "Runtime read-only mount protection"
49465 + help
49466 + If you say Y here, a sysctl option with name "romount_protect" will
49467 + be created. By setting this option to 1 at runtime, filesystems
49468 + will be protected in the following ways:
49469 + * No new writable mounts will be allowed
49470 + * Existing read-only mounts won't be able to be remounted read/write
49471 + * Write operations will be denied on all block devices
49472 + This option acts independently of grsec_lock: once it is set to 1,
49473 + it cannot be turned off. Therefore, please be mindful of the resulting
49474 + behavior if this option is enabled in an init script on a read-only
49475 + filesystem. This feature is mainly intended for secure embedded systems.
49476 +
49477 +config GRKERNSEC_CHROOT
49478 + bool "Chroot jail restrictions"
49479 + help
49480 + If you say Y here, you will be able to choose several options that will
49481 + make breaking out of a chrooted jail much more difficult. If you
49482 + encounter no software incompatibilities with the following options, it
49483 + is recommended that you enable each one.
49484 +
49485 +config GRKERNSEC_CHROOT_MOUNT
49486 + bool "Deny mounts"
49487 + depends on GRKERNSEC_CHROOT
49488 + help
49489 + If you say Y here, processes inside a chroot will not be able to
49490 + mount or remount filesystems. If the sysctl option is enabled, a
49491 + sysctl option with name "chroot_deny_mount" is created.
49492 +
49493 +config GRKERNSEC_CHROOT_DOUBLE
49494 + bool "Deny double-chroots"
49495 + depends on GRKERNSEC_CHROOT
49496 + help
49497 + If you say Y here, processes inside a chroot will not be able to chroot
49498 + again outside the chroot. This is a widely used method of breaking
49499 + out of a chroot jail and should not be allowed. If the sysctl
49500 + option is enabled, a sysctl option with name
49501 + "chroot_deny_chroot" is created.
49502 +
49503 +config GRKERNSEC_CHROOT_PIVOT
49504 + bool "Deny pivot_root in chroot"
49505 + depends on GRKERNSEC_CHROOT
49506 + help
49507 + If you say Y here, processes inside a chroot will not be able to use
49508 + a function called pivot_root() that was introduced in Linux 2.3.41. It
49509 + works similar to chroot in that it changes the root filesystem. This
49510 + function could be misused in a chrooted process to attempt to break out
49511 + of the chroot, and therefore should not be allowed. If the sysctl
49512 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
49513 + created.
49514 +
49515 +config GRKERNSEC_CHROOT_CHDIR
49516 + bool "Enforce chdir(\"/\") on all chroots"
49517 + depends on GRKERNSEC_CHROOT
49518 + help
49519 + If you say Y here, the current working directory of all newly-chrooted
49520 + applications will be set to the the root directory of the chroot.
49521 + The man page on chroot(2) states:
49522 + Note that this call does not change the current working
49523 + directory, so that `.' can be outside the tree rooted at
49524 + `/'. In particular, the super-user can escape from a
49525 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49526 +
49527 + It is recommended that you say Y here, since it's not known to break
49528 + any software. If the sysctl option is enabled, a sysctl option with
49529 + name "chroot_enforce_chdir" is created.
49530 +
49531 +config GRKERNSEC_CHROOT_CHMOD
49532 + bool "Deny (f)chmod +s"
49533 + depends on GRKERNSEC_CHROOT
49534 + help
49535 + If you say Y here, processes inside a chroot will not be able to chmod
49536 + or fchmod files to make them have suid or sgid bits. This protects
49537 + against another published method of breaking a chroot. If the sysctl
49538 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
49539 + created.
49540 +
49541 +config GRKERNSEC_CHROOT_FCHDIR
49542 + bool "Deny fchdir out of chroot"
49543 + depends on GRKERNSEC_CHROOT
49544 + help
49545 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
49546 + to a file descriptor of the chrooting process that points to a directory
49547 + outside the filesystem will be stopped. If the sysctl option
49548 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49549 +
49550 +config GRKERNSEC_CHROOT_MKNOD
49551 + bool "Deny mknod"
49552 + depends on GRKERNSEC_CHROOT
49553 + help
49554 + If you say Y here, processes inside a chroot will not be allowed to
49555 + mknod. The problem with using mknod inside a chroot is that it
49556 + would allow an attacker to create a device entry that is the same
49557 + as one on the physical root of your system, which could range from
49558 + anything from the console device to a device for your harddrive (which
49559 + they could then use to wipe the drive or steal data). It is recommended
49560 + that you say Y here, unless you run into software incompatibilities.
49561 + If the sysctl option is enabled, a sysctl option with name
49562 + "chroot_deny_mknod" is created.
49563 +
49564 +config GRKERNSEC_CHROOT_SHMAT
49565 + bool "Deny shmat() out of chroot"
49566 + depends on GRKERNSEC_CHROOT
49567 + help
49568 + If you say Y here, processes inside a chroot will not be able to attach
49569 + to shared memory segments that were created outside of the chroot jail.
49570 + It is recommended that you say Y here. If the sysctl option is enabled,
49571 + a sysctl option with name "chroot_deny_shmat" is created.
49572 +
49573 +config GRKERNSEC_CHROOT_UNIX
49574 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
49575 + depends on GRKERNSEC_CHROOT
49576 + help
49577 + If you say Y here, processes inside a chroot will not be able to
49578 + connect to abstract (meaning not belonging to a filesystem) Unix
49579 + domain sockets that were bound outside of a chroot. It is recommended
49580 + that you say Y here. If the sysctl option is enabled, a sysctl option
49581 + with name "chroot_deny_unix" is created.
49582 +
49583 +config GRKERNSEC_CHROOT_FINDTASK
49584 + bool "Protect outside processes"
49585 + depends on GRKERNSEC_CHROOT
49586 + help
49587 + If you say Y here, processes inside a chroot will not be able to
49588 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49589 + getsid, or view any process outside of the chroot. If the sysctl
49590 + option is enabled, a sysctl option with name "chroot_findtask" is
49591 + created.
49592 +
49593 +config GRKERNSEC_CHROOT_NICE
49594 + bool "Restrict priority changes"
49595 + depends on GRKERNSEC_CHROOT
49596 + help
49597 + If you say Y here, processes inside a chroot will not be able to raise
49598 + the priority of processes in the chroot, or alter the priority of
49599 + processes outside the chroot. This provides more security than simply
49600 + removing CAP_SYS_NICE from the process' capability set. If the
49601 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49602 + is created.
49603 +
49604 +config GRKERNSEC_CHROOT_SYSCTL
49605 + bool "Deny sysctl writes"
49606 + depends on GRKERNSEC_CHROOT
49607 + help
49608 + If you say Y here, an attacker in a chroot will not be able to
49609 + write to sysctl entries, either by sysctl(2) or through a /proc
49610 + interface. It is strongly recommended that you say Y here. If the
49611 + sysctl option is enabled, a sysctl option with name
49612 + "chroot_deny_sysctl" is created.
49613 +
49614 +config GRKERNSEC_CHROOT_CAPS
49615 + bool "Capability restrictions"
49616 + depends on GRKERNSEC_CHROOT
49617 + help
49618 + If you say Y here, the capabilities on all processes within a
49619 + chroot jail will be lowered to stop module insertion, raw i/o,
49620 + system and net admin tasks, rebooting the system, modifying immutable
49621 + files, modifying IPC owned by another, and changing the system time.
49622 + This is left an option because it can break some apps. Disable this
49623 + if your chrooted apps are having problems performing those kinds of
49624 + tasks. If the sysctl option is enabled, a sysctl option with
49625 + name "chroot_caps" is created.
49626 +
49627 +endmenu
49628 +menu "Kernel Auditing"
49629 +depends on GRKERNSEC
49630 +
49631 +config GRKERNSEC_AUDIT_GROUP
49632 + bool "Single group for auditing"
49633 + help
49634 + If you say Y here, the exec, chdir, and (un)mount logging features
49635 + will only operate on a group you specify. This option is recommended
49636 + if you only want to watch certain users instead of having a large
49637 + amount of logs from the entire system. If the sysctl option is enabled,
49638 + a sysctl option with name "audit_group" is created.
49639 +
49640 +config GRKERNSEC_AUDIT_GID
49641 + int "GID for auditing"
49642 + depends on GRKERNSEC_AUDIT_GROUP
49643 + default 1007
49644 +
49645 +config GRKERNSEC_EXECLOG
49646 + bool "Exec logging"
49647 + help
49648 + If you say Y here, all execve() calls will be logged (since the
49649 + other exec*() calls are frontends to execve(), all execution
49650 + will be logged). Useful for shell-servers that like to keep track
49651 + of their users. If the sysctl option is enabled, a sysctl option with
49652 + name "exec_logging" is created.
49653 + WARNING: This option when enabled will produce a LOT of logs, especially
49654 + on an active system.
49655 +
49656 +config GRKERNSEC_RESLOG
49657 + bool "Resource logging"
49658 + help
49659 + If you say Y here, all attempts to overstep resource limits will
49660 + be logged with the resource name, the requested size, and the current
49661 + limit. It is highly recommended that you say Y here. If the sysctl
49662 + option is enabled, a sysctl option with name "resource_logging" is
49663 + created. If the RBAC system is enabled, the sysctl value is ignored.
49664 +
49665 +config GRKERNSEC_CHROOT_EXECLOG
49666 + bool "Log execs within chroot"
49667 + help
49668 + If you say Y here, all executions inside a chroot jail will be logged
49669 + to syslog. This can cause a large amount of logs if certain
49670 + applications (eg. djb's daemontools) are installed on the system, and
49671 + is therefore left as an option. If the sysctl option is enabled, a
49672 + sysctl option with name "chroot_execlog" is created.
49673 +
49674 +config GRKERNSEC_AUDIT_PTRACE
49675 + bool "Ptrace logging"
49676 + help
49677 + If you say Y here, all attempts to attach to a process via ptrace
49678 + will be logged. If the sysctl option is enabled, a sysctl option
49679 + with name "audit_ptrace" is created.
49680 +
49681 +config GRKERNSEC_AUDIT_CHDIR
49682 + bool "Chdir logging"
49683 + help
49684 + If you say Y here, all chdir() calls will be logged. If the sysctl
49685 + option is enabled, a sysctl option with name "audit_chdir" is created.
49686 +
49687 +config GRKERNSEC_AUDIT_MOUNT
49688 + bool "(Un)Mount logging"
49689 + help
49690 + If you say Y here, all mounts and unmounts will be logged. If the
49691 + sysctl option is enabled, a sysctl option with name "audit_mount" is
49692 + created.
49693 +
49694 +config GRKERNSEC_SIGNAL
49695 + bool "Signal logging"
49696 + help
49697 + If you say Y here, certain important signals will be logged, such as
49698 + SIGSEGV, which will as a result inform you of when a error in a program
49699 + occurred, which in some cases could mean a possible exploit attempt.
49700 + If the sysctl option is enabled, a sysctl option with name
49701 + "signal_logging" is created.
49702 +
49703 +config GRKERNSEC_FORKFAIL
49704 + bool "Fork failure logging"
49705 + help
49706 + If you say Y here, all failed fork() attempts will be logged.
49707 + This could suggest a fork bomb, or someone attempting to overstep
49708 + their process limit. If the sysctl option is enabled, a sysctl option
49709 + with name "forkfail_logging" is created.
49710 +
49711 +config GRKERNSEC_TIME
49712 + bool "Time change logging"
49713 + help
49714 + If you say Y here, any changes of the system clock will be logged.
49715 + If the sysctl option is enabled, a sysctl option with name
49716 + "timechange_logging" is created.
49717 +
49718 +config GRKERNSEC_PROC_IPADDR
49719 + bool "/proc/<pid>/ipaddr support"
49720 + help
49721 + If you say Y here, a new entry will be added to each /proc/<pid>
49722 + directory that contains the IP address of the person using the task.
49723 + The IP is carried across local TCP and AF_UNIX stream sockets.
49724 + This information can be useful for IDS/IPSes to perform remote response
49725 + to a local attack. The entry is readable by only the owner of the
49726 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49727 + the RBAC system), and thus does not create privacy concerns.
49728 +
49729 +config GRKERNSEC_RWXMAP_LOG
49730 + bool 'Denied RWX mmap/mprotect logging'
49731 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49732 + help
49733 + If you say Y here, calls to mmap() and mprotect() with explicit
49734 + usage of PROT_WRITE and PROT_EXEC together will be logged when
49735 + denied by the PAX_MPROTECT feature. If the sysctl option is
49736 + enabled, a sysctl option with name "rwxmap_logging" is created.
49737 +
49738 +config GRKERNSEC_AUDIT_TEXTREL
49739 + bool 'ELF text relocations logging (READ HELP)'
49740 + depends on PAX_MPROTECT
49741 + help
49742 + If you say Y here, text relocations will be logged with the filename
49743 + of the offending library or binary. The purpose of the feature is
49744 + to help Linux distribution developers get rid of libraries and
49745 + binaries that need text relocations which hinder the future progress
49746 + of PaX. Only Linux distribution developers should say Y here, and
49747 + never on a production machine, as this option creates an information
49748 + leak that could aid an attacker in defeating the randomization of
49749 + a single memory region. If the sysctl option is enabled, a sysctl
49750 + option with name "audit_textrel" is created.
49751 +
49752 +endmenu
49753 +
49754 +menu "Executable Protections"
49755 +depends on GRKERNSEC
49756 +
49757 +config GRKERNSEC_DMESG
49758 + bool "Dmesg(8) restriction"
49759 + help
49760 + If you say Y here, non-root users will not be able to use dmesg(8)
49761 + to view up to the last 4kb of messages in the kernel's log buffer.
49762 + The kernel's log buffer often contains kernel addresses and other
49763 + identifying information useful to an attacker in fingerprinting a
49764 + system for a targeted exploit.
49765 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
49766 + created.
49767 +
49768 +config GRKERNSEC_HARDEN_PTRACE
49769 + bool "Deter ptrace-based process snooping"
49770 + help
49771 + If you say Y here, TTY sniffers and other malicious monitoring
49772 + programs implemented through ptrace will be defeated. If you
49773 + have been using the RBAC system, this option has already been
49774 + enabled for several years for all users, with the ability to make
49775 + fine-grained exceptions.
49776 +
49777 + This option only affects the ability of non-root users to ptrace
49778 + processes that are not a descendent of the ptracing process.
49779 + This means that strace ./binary and gdb ./binary will still work,
49780 + but attaching to arbitrary processes will not. If the sysctl
49781 + option is enabled, a sysctl option with name "harden_ptrace" is
49782 + created.
49783 +
49784 +config GRKERNSEC_PTRACE_READEXEC
49785 + bool "Require read access to ptrace sensitive binaries"
49786 + help
49787 + If you say Y here, unprivileged users will not be able to ptrace unreadable
49788 + binaries. This option is useful in environments that
49789 + remove the read bits (e.g. file mode 4711) from suid binaries to
49790 + prevent infoleaking of their contents. This option adds
49791 + consistency to the use of that file mode, as the binary could normally
49792 + be read out when run without privileges while ptracing.
49793 +
49794 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49795 + is created.
49796 +
49797 +config GRKERNSEC_SETXID
49798 + bool "Enforce consistent multithreaded privileges"
49799 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
49800 + help
49801 + If you say Y here, a change from a root uid to a non-root uid
49802 + in a multithreaded application will cause the resulting uids,
49803 + gids, supplementary groups, and capabilities in that thread
49804 + to be propagated to the other threads of the process. In most
49805 + cases this is unnecessary, as glibc will emulate this behavior
49806 + on behalf of the application. Other libcs do not act in the
49807 + same way, allowing the other threads of the process to continue
49808 + running with root privileges. If the sysctl option is enabled,
49809 + a sysctl option with name "consistent_setxid" is created.
49810 +
49811 +config GRKERNSEC_TPE
49812 + bool "Trusted Path Execution (TPE)"
49813 + help
49814 + If you say Y here, you will be able to choose a gid to add to the
49815 + supplementary groups of users you want to mark as "untrusted."
49816 + These users will not be able to execute any files that are not in
49817 + root-owned directories writable only by root. If the sysctl option
49818 + is enabled, a sysctl option with name "tpe" is created.
49819 +
49820 +config GRKERNSEC_TPE_ALL
49821 + bool "Partially restrict all non-root users"
49822 + depends on GRKERNSEC_TPE
49823 + help
49824 + If you say Y here, all non-root users will be covered under
49825 + a weaker TPE restriction. This is separate from, and in addition to,
49826 + the main TPE options that you have selected elsewhere. Thus, if a
49827 + "trusted" GID is chosen, this restriction applies to even that GID.
49828 + Under this restriction, all non-root users will only be allowed to
49829 + execute files in directories they own that are not group or
49830 + world-writable, or in directories owned by root and writable only by
49831 + root. If the sysctl option is enabled, a sysctl option with name
49832 + "tpe_restrict_all" is created.
49833 +
49834 +config GRKERNSEC_TPE_INVERT
49835 + bool "Invert GID option"
49836 + depends on GRKERNSEC_TPE
49837 + help
49838 + If you say Y here, the group you specify in the TPE configuration will
49839 + decide what group TPE restrictions will be *disabled* for. This
49840 + option is useful if you want TPE restrictions to be applied to most
49841 + users on the system. If the sysctl option is enabled, a sysctl option
49842 + with name "tpe_invert" is created. Unlike other sysctl options, this
49843 + entry will default to on for backward-compatibility.
49844 +
49845 +config GRKERNSEC_TPE_GID
49846 + int "GID for untrusted users"
49847 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49848 + default 1005
49849 + help
49850 + Setting this GID determines what group TPE restrictions will be
49851 + *enabled* for. If the sysctl option is enabled, a sysctl option
49852 + with name "tpe_gid" is created.
49853 +
49854 +config GRKERNSEC_TPE_GID
49855 + int "GID for trusted users"
49856 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49857 + default 1005
49858 + help
49859 + Setting this GID determines what group TPE restrictions will be
49860 + *disabled* for. If the sysctl option is enabled, a sysctl option
49861 + with name "tpe_gid" is created.
49862 +
49863 +endmenu
49864 +menu "Network Protections"
49865 +depends on GRKERNSEC
49866 +
49867 +config GRKERNSEC_RANDNET
49868 + bool "Larger entropy pools"
49869 + help
49870 + If you say Y here, the entropy pools used for many features of Linux
49871 + and grsecurity will be doubled in size. Since several grsecurity
49872 + features use additional randomness, it is recommended that you say Y
49873 + here. Saying Y here has a similar effect as modifying
49874 + /proc/sys/kernel/random/poolsize.
49875 +
49876 +config GRKERNSEC_BLACKHOLE
49877 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49878 + depends on NET
49879 + help
49880 + If you say Y here, neither TCP resets nor ICMP
49881 + destination-unreachable packets will be sent in response to packets
49882 + sent to ports for which no associated listening process exists.
49883 + This feature supports both IPV4 and IPV6 and exempts the
49884 + loopback interface from blackholing. Enabling this feature
49885 + makes a host more resilient to DoS attacks and reduces network
49886 + visibility against scanners.
49887 +
49888 + The blackhole feature as-implemented is equivalent to the FreeBSD
49889 + blackhole feature, as it prevents RST responses to all packets, not
49890 + just SYNs. Under most application behavior this causes no
49891 + problems, but applications (like haproxy) may not close certain
49892 + connections in a way that cleanly terminates them on the remote
49893 + end, leaving the remote host in LAST_ACK state. Because of this
49894 + side-effect and to prevent intentional LAST_ACK DoSes, this
49895 + feature also adds automatic mitigation against such attacks.
49896 + The mitigation drastically reduces the amount of time a socket
49897 + can spend in LAST_ACK state. If you're using haproxy and not
49898 + all servers it connects to have this option enabled, consider
49899 + disabling this feature on the haproxy host.
49900 +
49901 + If the sysctl option is enabled, two sysctl options with names
49902 + "ip_blackhole" and "lastack_retries" will be created.
49903 + While "ip_blackhole" takes the standard zero/non-zero on/off
49904 + toggle, "lastack_retries" uses the same kinds of values as
49905 + "tcp_retries1" and "tcp_retries2". The default value of 4
49906 + prevents a socket from lasting more than 45 seconds in LAST_ACK
49907 + state.
49908 +
49909 +config GRKERNSEC_SOCKET
49910 + bool "Socket restrictions"
49911 + depends on NET
49912 + help
49913 + If you say Y here, you will be able to choose from several options.
49914 + If you assign a GID on your system and add it to the supplementary
49915 + groups of users you want to restrict socket access to, this patch
49916 + will perform up to three things, based on the option(s) you choose.
49917 +
49918 +config GRKERNSEC_SOCKET_ALL
49919 + bool "Deny any sockets to group"
49920 + depends on GRKERNSEC_SOCKET
49921 + help
49922 + If you say Y here, you will be able to choose a GID of whose users will
49923 + be unable to connect to other hosts from your machine or run server
49924 + applications from your machine. If the sysctl option is enabled, a
49925 + sysctl option with name "socket_all" is created.
49926 +
49927 +config GRKERNSEC_SOCKET_ALL_GID
49928 + int "GID to deny all sockets for"
49929 + depends on GRKERNSEC_SOCKET_ALL
49930 + default 1004
49931 + help
49932 + Here you can choose the GID to disable socket access for. Remember to
49933 + add the users you want socket access disabled for to the GID
49934 + specified here. If the sysctl option is enabled, a sysctl option
49935 + with name "socket_all_gid" is created.
49936 +
49937 +config GRKERNSEC_SOCKET_CLIENT
49938 + bool "Deny client sockets to group"
49939 + depends on GRKERNSEC_SOCKET
49940 + help
49941 + If you say Y here, you will be able to choose a GID of whose users will
49942 + be unable to connect to other hosts from your machine, but will be
49943 + able to run servers. If this option is enabled, all users in the group
49944 + you specify will have to use passive mode when initiating ftp transfers
49945 + from the shell on your machine. If the sysctl option is enabled, a
49946 + sysctl option with name "socket_client" is created.
49947 +
49948 +config GRKERNSEC_SOCKET_CLIENT_GID
49949 + int "GID to deny client sockets for"
49950 + depends on GRKERNSEC_SOCKET_CLIENT
49951 + default 1003
49952 + help
49953 + Here you can choose the GID to disable client socket access for.
49954 + Remember to add the users you want client socket access disabled for to
49955 + the GID specified here. If the sysctl option is enabled, a sysctl
49956 + option with name "socket_client_gid" is created.
49957 +
49958 +config GRKERNSEC_SOCKET_SERVER
49959 + bool "Deny server sockets to group"
49960 + depends on GRKERNSEC_SOCKET
49961 + help
49962 + If you say Y here, you will be able to choose a GID of whose users will
49963 + be unable to run server applications from your machine. If the sysctl
49964 + option is enabled, a sysctl option with name "socket_server" is created.
49965 +
49966 +config GRKERNSEC_SOCKET_SERVER_GID
49967 + int "GID to deny server sockets for"
49968 + depends on GRKERNSEC_SOCKET_SERVER
49969 + default 1002
49970 + help
49971 + Here you can choose the GID to disable server socket access for.
49972 + Remember to add the users you want server socket access disabled for to
49973 + the GID specified here. If the sysctl option is enabled, a sysctl
49974 + option with name "socket_server_gid" is created.
49975 +
49976 +endmenu
49977 +menu "Sysctl support"
49978 +depends on GRKERNSEC && SYSCTL
49979 +
49980 +config GRKERNSEC_SYSCTL
49981 + bool "Sysctl support"
49982 + help
49983 + If you say Y here, you will be able to change the options that
49984 + grsecurity runs with at bootup, without having to recompile your
49985 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
49986 + to enable (1) or disable (0) various features. All the sysctl entries
49987 + are mutable until the "grsec_lock" entry is set to a non-zero value.
49988 + All features enabled in the kernel configuration are disabled at boot
49989 + if you do not say Y to the "Turn on features by default" option.
49990 + All options should be set at startup, and the grsec_lock entry should
49991 + be set to a non-zero value after all the options are set.
49992 + *THIS IS EXTREMELY IMPORTANT*
49993 +
49994 +config GRKERNSEC_SYSCTL_DISTRO
49995 + bool "Extra sysctl support for distro makers (READ HELP)"
49996 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
49997 + help
49998 + If you say Y here, additional sysctl options will be created
49999 + for features that affect processes running as root. Therefore,
50000 + it is critical when using this option that the grsec_lock entry be
50001 + enabled after boot. Only distros with prebuilt kernel packages
50002 + with this option enabled that can ensure grsec_lock is enabled
50003 + after boot should use this option.
50004 + *Failure to set grsec_lock after boot makes all grsec features
50005 + this option covers useless*
50006 +
50007 + Currently this option creates the following sysctl entries:
50008 + "Disable Privileged I/O": "disable_priv_io"
50009 +
50010 +config GRKERNSEC_SYSCTL_ON
50011 + bool "Turn on features by default"
50012 + depends on GRKERNSEC_SYSCTL
50013 + help
50014 + If you say Y here, instead of having all features enabled in the
50015 + kernel configuration disabled at boot time, the features will be
50016 + enabled at boot time. It is recommended you say Y here unless
50017 + there is some reason you would want all sysctl-tunable features to
50018 + be disabled by default. As mentioned elsewhere, it is important
50019 + to enable the grsec_lock entry once you have finished modifying
50020 + the sysctl entries.
50021 +
50022 +endmenu
50023 +menu "Logging Options"
50024 +depends on GRKERNSEC
50025 +
50026 +config GRKERNSEC_FLOODTIME
50027 + int "Seconds in between log messages (minimum)"
50028 + default 10
50029 + help
50030 + This option allows you to enforce the number of seconds between
50031 + grsecurity log messages. The default should be suitable for most
50032 + people, however, if you choose to change it, choose a value small enough
50033 + to allow informative logs to be produced, but large enough to
50034 + prevent flooding.
50035 +
50036 +config GRKERNSEC_FLOODBURST
50037 + int "Number of messages in a burst (maximum)"
50038 + default 6
50039 + help
50040 + This option allows you to choose the maximum number of messages allowed
50041 + within the flood time interval you chose in a separate option. The
50042 + default should be suitable for most people, however if you find that
50043 + many of your logs are being interpreted as flooding, you may want to
50044 + raise this value.
50045 +
50046 +endmenu
50047 +
50048 +endmenu
50049 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50050 new file mode 100644
50051 index 0000000..1b9afa9
50052 --- /dev/null
50053 +++ b/grsecurity/Makefile
50054 @@ -0,0 +1,38 @@
50055 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50056 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50057 +# into an RBAC system
50058 +#
50059 +# All code in this directory and various hooks inserted throughout the kernel
50060 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50061 +# under the GPL v2 or higher
50062 +
50063 +KBUILD_CFLAGS += -Werror
50064 +
50065 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50066 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50067 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50068 +
50069 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50070 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50071 + gracl_learn.o grsec_log.o
50072 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50073 +
50074 +ifdef CONFIG_NET
50075 +obj-y += grsec_sock.o
50076 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50077 +endif
50078 +
50079 +ifndef CONFIG_GRKERNSEC
50080 +obj-y += grsec_disabled.o
50081 +endif
50082 +
50083 +ifdef CONFIG_GRKERNSEC_HIDESYM
50084 +extra-y := grsec_hidesym.o
50085 +$(obj)/grsec_hidesym.o:
50086 + @-chmod -f 500 /boot
50087 + @-chmod -f 500 /lib/modules
50088 + @-chmod -f 500 /lib64/modules
50089 + @-chmod -f 500 /lib32/modules
50090 + @-chmod -f 700 .
50091 + @echo ' grsec: protected kernel image paths'
50092 +endif
50093 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50094 new file mode 100644
50095 index 0000000..00b6c54
50096 --- /dev/null
50097 +++ b/grsecurity/gracl.c
50098 @@ -0,0 +1,4012 @@
50099 +#include <linux/kernel.h>
50100 +#include <linux/module.h>
50101 +#include <linux/sched.h>
50102 +#include <linux/mm.h>
50103 +#include <linux/file.h>
50104 +#include <linux/fs.h>
50105 +#include <linux/namei.h>
50106 +#include <linux/mount.h>
50107 +#include <linux/tty.h>
50108 +#include <linux/proc_fs.h>
50109 +#include <linux/lglock.h>
50110 +#include <linux/slab.h>
50111 +#include <linux/vmalloc.h>
50112 +#include <linux/types.h>
50113 +#include <linux/sysctl.h>
50114 +#include <linux/netdevice.h>
50115 +#include <linux/ptrace.h>
50116 +#include <linux/gracl.h>
50117 +#include <linux/gralloc.h>
50118 +#include <linux/security.h>
50119 +#include <linux/grinternal.h>
50120 +#include <linux/pid_namespace.h>
50121 +#include <linux/fdtable.h>
50122 +#include <linux/percpu.h>
50123 +#include "../fs/mount.h"
50124 +
50125 +#include <asm/uaccess.h>
50126 +#include <asm/errno.h>
50127 +#include <asm/mman.h>
50128 +
50129 +static struct acl_role_db acl_role_set;
50130 +static struct name_db name_set;
50131 +static struct inodev_db inodev_set;
50132 +
50133 +/* for keeping track of userspace pointers used for subjects, so we
50134 + can share references in the kernel as well
50135 +*/
50136 +
50137 +static struct path real_root;
50138 +
50139 +static struct acl_subj_map_db subj_map_set;
50140 +
50141 +static struct acl_role_label *default_role;
50142 +
50143 +static struct acl_role_label *role_list;
50144 +
50145 +static u16 acl_sp_role_value;
50146 +
50147 +extern char *gr_shared_page[4];
50148 +static DEFINE_MUTEX(gr_dev_mutex);
50149 +DEFINE_RWLOCK(gr_inode_lock);
50150 +
50151 +struct gr_arg *gr_usermode;
50152 +
50153 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50154 +
50155 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50156 +extern void gr_clear_learn_entries(void);
50157 +
50158 +#ifdef CONFIG_GRKERNSEC_RESLOG
50159 +extern void gr_log_resource(const struct task_struct *task,
50160 + const int res, const unsigned long wanted, const int gt);
50161 +#endif
50162 +
50163 +unsigned char *gr_system_salt;
50164 +unsigned char *gr_system_sum;
50165 +
50166 +static struct sprole_pw **acl_special_roles = NULL;
50167 +static __u16 num_sprole_pws = 0;
50168 +
50169 +static struct acl_role_label *kernel_role = NULL;
50170 +
50171 +static unsigned int gr_auth_attempts = 0;
50172 +static unsigned long gr_auth_expires = 0UL;
50173 +
50174 +#ifdef CONFIG_NET
50175 +extern struct vfsmount *sock_mnt;
50176 +#endif
50177 +
50178 +extern struct vfsmount *pipe_mnt;
50179 +extern struct vfsmount *shm_mnt;
50180 +#ifdef CONFIG_HUGETLBFS
50181 +extern struct vfsmount *hugetlbfs_vfsmount;
50182 +#endif
50183 +
50184 +static struct acl_object_label *fakefs_obj_rw;
50185 +static struct acl_object_label *fakefs_obj_rwx;
50186 +
50187 +extern int gr_init_uidset(void);
50188 +extern void gr_free_uidset(void);
50189 +extern void gr_remove_uid(uid_t uid);
50190 +extern int gr_find_uid(uid_t uid);
50191 +
50192 +DECLARE_BRLOCK(vfsmount_lock);
50193 +
50194 +__inline__ int
50195 +gr_acl_is_enabled(void)
50196 +{
50197 + return (gr_status & GR_READY);
50198 +}
50199 +
50200 +#ifdef CONFIG_BTRFS_FS
50201 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50202 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50203 +#endif
50204 +
50205 +static inline dev_t __get_dev(const struct dentry *dentry)
50206 +{
50207 +#ifdef CONFIG_BTRFS_FS
50208 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50209 + return get_btrfs_dev_from_inode(dentry->d_inode);
50210 + else
50211 +#endif
50212 + return dentry->d_inode->i_sb->s_dev;
50213 +}
50214 +
50215 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50216 +{
50217 + return __get_dev(dentry);
50218 +}
50219 +
50220 +static char gr_task_roletype_to_char(struct task_struct *task)
50221 +{
50222 + switch (task->role->roletype &
50223 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50224 + GR_ROLE_SPECIAL)) {
50225 + case GR_ROLE_DEFAULT:
50226 + return 'D';
50227 + case GR_ROLE_USER:
50228 + return 'U';
50229 + case GR_ROLE_GROUP:
50230 + return 'G';
50231 + case GR_ROLE_SPECIAL:
50232 + return 'S';
50233 + }
50234 +
50235 + return 'X';
50236 +}
50237 +
50238 +char gr_roletype_to_char(void)
50239 +{
50240 + return gr_task_roletype_to_char(current);
50241 +}
50242 +
50243 +__inline__ int
50244 +gr_acl_tpe_check(void)
50245 +{
50246 + if (unlikely(!(gr_status & GR_READY)))
50247 + return 0;
50248 + if (current->role->roletype & GR_ROLE_TPE)
50249 + return 1;
50250 + else
50251 + return 0;
50252 +}
50253 +
50254 +int
50255 +gr_handle_rawio(const struct inode *inode)
50256 +{
50257 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50258 + if (inode && S_ISBLK(inode->i_mode) &&
50259 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50260 + !capable(CAP_SYS_RAWIO))
50261 + return 1;
50262 +#endif
50263 + return 0;
50264 +}
50265 +
50266 +static int
50267 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50268 +{
50269 + if (likely(lena != lenb))
50270 + return 0;
50271 +
50272 + return !memcmp(a, b, lena);
50273 +}
50274 +
50275 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50276 +{
50277 + *buflen -= namelen;
50278 + if (*buflen < 0)
50279 + return -ENAMETOOLONG;
50280 + *buffer -= namelen;
50281 + memcpy(*buffer, str, namelen);
50282 + return 0;
50283 +}
50284 +
50285 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50286 +{
50287 + return prepend(buffer, buflen, name->name, name->len);
50288 +}
50289 +
50290 +static int prepend_path(const struct path *path, struct path *root,
50291 + char **buffer, int *buflen)
50292 +{
50293 + struct dentry *dentry = path->dentry;
50294 + struct vfsmount *vfsmnt = path->mnt;
50295 + struct mount *mnt = real_mount(vfsmnt);
50296 + bool slash = false;
50297 + int error = 0;
50298 +
50299 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50300 + struct dentry * parent;
50301 +
50302 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50303 + /* Global root? */
50304 + if (!mnt_has_parent(mnt)) {
50305 + goto out;
50306 + }
50307 + dentry = mnt->mnt_mountpoint;
50308 + mnt = mnt->mnt_parent;
50309 + vfsmnt = &mnt->mnt;
50310 + continue;
50311 + }
50312 + parent = dentry->d_parent;
50313 + prefetch(parent);
50314 + spin_lock(&dentry->d_lock);
50315 + error = prepend_name(buffer, buflen, &dentry->d_name);
50316 + spin_unlock(&dentry->d_lock);
50317 + if (!error)
50318 + error = prepend(buffer, buflen, "/", 1);
50319 + if (error)
50320 + break;
50321 +
50322 + slash = true;
50323 + dentry = parent;
50324 + }
50325 +
50326 +out:
50327 + if (!error && !slash)
50328 + error = prepend(buffer, buflen, "/", 1);
50329 +
50330 + return error;
50331 +}
50332 +
50333 +/* this must be called with vfsmount_lock and rename_lock held */
50334 +
50335 +static char *__our_d_path(const struct path *path, struct path *root,
50336 + char *buf, int buflen)
50337 +{
50338 + char *res = buf + buflen;
50339 + int error;
50340 +
50341 + prepend(&res, &buflen, "\0", 1);
50342 + error = prepend_path(path, root, &res, &buflen);
50343 + if (error)
50344 + return ERR_PTR(error);
50345 +
50346 + return res;
50347 +}
50348 +
50349 +static char *
50350 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50351 +{
50352 + char *retval;
50353 +
50354 + retval = __our_d_path(path, root, buf, buflen);
50355 + if (unlikely(IS_ERR(retval)))
50356 + retval = strcpy(buf, "<path too long>");
50357 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50358 + retval[1] = '\0';
50359 +
50360 + return retval;
50361 +}
50362 +
50363 +static char *
50364 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50365 + char *buf, int buflen)
50366 +{
50367 + struct path path;
50368 + char *res;
50369 +
50370 + path.dentry = (struct dentry *)dentry;
50371 + path.mnt = (struct vfsmount *)vfsmnt;
50372 +
50373 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50374 + by the RBAC system */
50375 + res = gen_full_path(&path, &real_root, buf, buflen);
50376 +
50377 + return res;
50378 +}
50379 +
50380 +static char *
50381 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50382 + char *buf, int buflen)
50383 +{
50384 + char *res;
50385 + struct path path;
50386 + struct path root;
50387 + struct task_struct *reaper = init_pid_ns.child_reaper;
50388 +
50389 + path.dentry = (struct dentry *)dentry;
50390 + path.mnt = (struct vfsmount *)vfsmnt;
50391 +
50392 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50393 + get_fs_root(reaper->fs, &root);
50394 +
50395 + write_seqlock(&rename_lock);
50396 + br_read_lock(vfsmount_lock);
50397 + res = gen_full_path(&path, &root, buf, buflen);
50398 + br_read_unlock(vfsmount_lock);
50399 + write_sequnlock(&rename_lock);
50400 +
50401 + path_put(&root);
50402 + return res;
50403 +}
50404 +
50405 +static char *
50406 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50407 +{
50408 + char *ret;
50409 + write_seqlock(&rename_lock);
50410 + br_read_lock(vfsmount_lock);
50411 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50412 + PAGE_SIZE);
50413 + br_read_unlock(vfsmount_lock);
50414 + write_sequnlock(&rename_lock);
50415 + return ret;
50416 +}
50417 +
50418 +static char *
50419 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50420 +{
50421 + char *ret;
50422 + char *buf;
50423 + int buflen;
50424 +
50425 + write_seqlock(&rename_lock);
50426 + br_read_lock(vfsmount_lock);
50427 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50428 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50429 + buflen = (int)(ret - buf);
50430 + if (buflen >= 5)
50431 + prepend(&ret, &buflen, "/proc", 5);
50432 + else
50433 + ret = strcpy(buf, "<path too long>");
50434 + br_read_unlock(vfsmount_lock);
50435 + write_sequnlock(&rename_lock);
50436 + return ret;
50437 +}
50438 +
50439 +char *
50440 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50441 +{
50442 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50443 + PAGE_SIZE);
50444 +}
50445 +
50446 +char *
50447 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50448 +{
50449 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50450 + PAGE_SIZE);
50451 +}
50452 +
50453 +char *
50454 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50455 +{
50456 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50457 + PAGE_SIZE);
50458 +}
50459 +
50460 +char *
50461 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50462 +{
50463 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50464 + PAGE_SIZE);
50465 +}
50466 +
50467 +char *
50468 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50469 +{
50470 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50471 + PAGE_SIZE);
50472 +}
50473 +
50474 +__inline__ __u32
50475 +to_gr_audit(const __u32 reqmode)
50476 +{
50477 + /* masks off auditable permission flags, then shifts them to create
50478 + auditing flags, and adds the special case of append auditing if
50479 + we're requesting write */
50480 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50481 +}
50482 +
50483 +struct acl_subject_label *
50484 +lookup_subject_map(const struct acl_subject_label *userp)
50485 +{
50486 + unsigned int index = shash(userp, subj_map_set.s_size);
50487 + struct subject_map *match;
50488 +
50489 + match = subj_map_set.s_hash[index];
50490 +
50491 + while (match && match->user != userp)
50492 + match = match->next;
50493 +
50494 + if (match != NULL)
50495 + return match->kernel;
50496 + else
50497 + return NULL;
50498 +}
50499 +
50500 +static void
50501 +insert_subj_map_entry(struct subject_map *subjmap)
50502 +{
50503 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50504 + struct subject_map **curr;
50505 +
50506 + subjmap->prev = NULL;
50507 +
50508 + curr = &subj_map_set.s_hash[index];
50509 + if (*curr != NULL)
50510 + (*curr)->prev = subjmap;
50511 +
50512 + subjmap->next = *curr;
50513 + *curr = subjmap;
50514 +
50515 + return;
50516 +}
50517 +
50518 +static struct acl_role_label *
50519 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50520 + const gid_t gid)
50521 +{
50522 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50523 + struct acl_role_label *match;
50524 + struct role_allowed_ip *ipp;
50525 + unsigned int x;
50526 + u32 curr_ip = task->signal->curr_ip;
50527 +
50528 + task->signal->saved_ip = curr_ip;
50529 +
50530 + match = acl_role_set.r_hash[index];
50531 +
50532 + while (match) {
50533 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50534 + for (x = 0; x < match->domain_child_num; x++) {
50535 + if (match->domain_children[x] == uid)
50536 + goto found;
50537 + }
50538 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50539 + break;
50540 + match = match->next;
50541 + }
50542 +found:
50543 + if (match == NULL) {
50544 + try_group:
50545 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50546 + match = acl_role_set.r_hash[index];
50547 +
50548 + while (match) {
50549 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50550 + for (x = 0; x < match->domain_child_num; x++) {
50551 + if (match->domain_children[x] == gid)
50552 + goto found2;
50553 + }
50554 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50555 + break;
50556 + match = match->next;
50557 + }
50558 +found2:
50559 + if (match == NULL)
50560 + match = default_role;
50561 + if (match->allowed_ips == NULL)
50562 + return match;
50563 + else {
50564 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50565 + if (likely
50566 + ((ntohl(curr_ip) & ipp->netmask) ==
50567 + (ntohl(ipp->addr) & ipp->netmask)))
50568 + return match;
50569 + }
50570 + match = default_role;
50571 + }
50572 + } else if (match->allowed_ips == NULL) {
50573 + return match;
50574 + } else {
50575 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50576 + if (likely
50577 + ((ntohl(curr_ip) & ipp->netmask) ==
50578 + (ntohl(ipp->addr) & ipp->netmask)))
50579 + return match;
50580 + }
50581 + goto try_group;
50582 + }
50583 +
50584 + return match;
50585 +}
50586 +
50587 +struct acl_subject_label *
50588 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50589 + const struct acl_role_label *role)
50590 +{
50591 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50592 + struct acl_subject_label *match;
50593 +
50594 + match = role->subj_hash[index];
50595 +
50596 + while (match && (match->inode != ino || match->device != dev ||
50597 + (match->mode & GR_DELETED))) {
50598 + match = match->next;
50599 + }
50600 +
50601 + if (match && !(match->mode & GR_DELETED))
50602 + return match;
50603 + else
50604 + return NULL;
50605 +}
50606 +
50607 +struct acl_subject_label *
50608 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50609 + const struct acl_role_label *role)
50610 +{
50611 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50612 + struct acl_subject_label *match;
50613 +
50614 + match = role->subj_hash[index];
50615 +
50616 + while (match && (match->inode != ino || match->device != dev ||
50617 + !(match->mode & GR_DELETED))) {
50618 + match = match->next;
50619 + }
50620 +
50621 + if (match && (match->mode & GR_DELETED))
50622 + return match;
50623 + else
50624 + return NULL;
50625 +}
50626 +
50627 +static struct acl_object_label *
50628 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50629 + const struct acl_subject_label *subj)
50630 +{
50631 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50632 + struct acl_object_label *match;
50633 +
50634 + match = subj->obj_hash[index];
50635 +
50636 + while (match && (match->inode != ino || match->device != dev ||
50637 + (match->mode & GR_DELETED))) {
50638 + match = match->next;
50639 + }
50640 +
50641 + if (match && !(match->mode & GR_DELETED))
50642 + return match;
50643 + else
50644 + return NULL;
50645 +}
50646 +
50647 +static struct acl_object_label *
50648 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50649 + const struct acl_subject_label *subj)
50650 +{
50651 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50652 + struct acl_object_label *match;
50653 +
50654 + match = subj->obj_hash[index];
50655 +
50656 + while (match && (match->inode != ino || match->device != dev ||
50657 + !(match->mode & GR_DELETED))) {
50658 + match = match->next;
50659 + }
50660 +
50661 + if (match && (match->mode & GR_DELETED))
50662 + return match;
50663 +
50664 + match = subj->obj_hash[index];
50665 +
50666 + while (match && (match->inode != ino || match->device != dev ||
50667 + (match->mode & GR_DELETED))) {
50668 + match = match->next;
50669 + }
50670 +
50671 + if (match && !(match->mode & GR_DELETED))
50672 + return match;
50673 + else
50674 + return NULL;
50675 +}
50676 +
50677 +static struct name_entry *
50678 +lookup_name_entry(const char *name)
50679 +{
50680 + unsigned int len = strlen(name);
50681 + unsigned int key = full_name_hash(name, len);
50682 + unsigned int index = key % name_set.n_size;
50683 + struct name_entry *match;
50684 +
50685 + match = name_set.n_hash[index];
50686 +
50687 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50688 + match = match->next;
50689 +
50690 + return match;
50691 +}
50692 +
50693 +static struct name_entry *
50694 +lookup_name_entry_create(const char *name)
50695 +{
50696 + unsigned int len = strlen(name);
50697 + unsigned int key = full_name_hash(name, len);
50698 + unsigned int index = key % name_set.n_size;
50699 + struct name_entry *match;
50700 +
50701 + match = name_set.n_hash[index];
50702 +
50703 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50704 + !match->deleted))
50705 + match = match->next;
50706 +
50707 + if (match && match->deleted)
50708 + return match;
50709 +
50710 + match = name_set.n_hash[index];
50711 +
50712 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50713 + match->deleted))
50714 + match = match->next;
50715 +
50716 + if (match && !match->deleted)
50717 + return match;
50718 + else
50719 + return NULL;
50720 +}
50721 +
50722 +static struct inodev_entry *
50723 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
50724 +{
50725 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
50726 + struct inodev_entry *match;
50727 +
50728 + match = inodev_set.i_hash[index];
50729 +
50730 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50731 + match = match->next;
50732 +
50733 + return match;
50734 +}
50735 +
50736 +static void
50737 +insert_inodev_entry(struct inodev_entry *entry)
50738 +{
50739 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50740 + inodev_set.i_size);
50741 + struct inodev_entry **curr;
50742 +
50743 + entry->prev = NULL;
50744 +
50745 + curr = &inodev_set.i_hash[index];
50746 + if (*curr != NULL)
50747 + (*curr)->prev = entry;
50748 +
50749 + entry->next = *curr;
50750 + *curr = entry;
50751 +
50752 + return;
50753 +}
50754 +
50755 +static void
50756 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50757 +{
50758 + unsigned int index =
50759 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50760 + struct acl_role_label **curr;
50761 + struct acl_role_label *tmp, *tmp2;
50762 +
50763 + curr = &acl_role_set.r_hash[index];
50764 +
50765 + /* simple case, slot is empty, just set it to our role */
50766 + if (*curr == NULL) {
50767 + *curr = role;
50768 + } else {
50769 + /* example:
50770 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
50771 + 2 -> 3
50772 + */
50773 + /* first check to see if we can already be reached via this slot */
50774 + tmp = *curr;
50775 + while (tmp && tmp != role)
50776 + tmp = tmp->next;
50777 + if (tmp == role) {
50778 + /* we don't need to add ourselves to this slot's chain */
50779 + return;
50780 + }
50781 + /* we need to add ourselves to this chain, two cases */
50782 + if (role->next == NULL) {
50783 + /* simple case, append the current chain to our role */
50784 + role->next = *curr;
50785 + *curr = role;
50786 + } else {
50787 + /* 1 -> 2 -> 3 -> 4
50788 + 2 -> 3 -> 4
50789 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
50790 + */
50791 + /* trickier case: walk our role's chain until we find
50792 + the role for the start of the current slot's chain */
50793 + tmp = role;
50794 + tmp2 = *curr;
50795 + while (tmp->next && tmp->next != tmp2)
50796 + tmp = tmp->next;
50797 + if (tmp->next == tmp2) {
50798 + /* from example above, we found 3, so just
50799 + replace this slot's chain with ours */
50800 + *curr = role;
50801 + } else {
50802 + /* we didn't find a subset of our role's chain
50803 + in the current slot's chain, so append their
50804 + chain to ours, and set us as the first role in
50805 + the slot's chain
50806 +
50807 + we could fold this case with the case above,
50808 + but making it explicit for clarity
50809 + */
50810 + tmp->next = tmp2;
50811 + *curr = role;
50812 + }
50813 + }
50814 + }
50815 +
50816 + return;
50817 +}
50818 +
50819 +static void
50820 +insert_acl_role_label(struct acl_role_label *role)
50821 +{
50822 + int i;
50823 +
50824 + if (role_list == NULL) {
50825 + role_list = role;
50826 + role->prev = NULL;
50827 + } else {
50828 + role->prev = role_list;
50829 + role_list = role;
50830 + }
50831 +
50832 + /* used for hash chains */
50833 + role->next = NULL;
50834 +
50835 + if (role->roletype & GR_ROLE_DOMAIN) {
50836 + for (i = 0; i < role->domain_child_num; i++)
50837 + __insert_acl_role_label(role, role->domain_children[i]);
50838 + } else
50839 + __insert_acl_role_label(role, role->uidgid);
50840 +}
50841 +
50842 +static int
50843 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50844 +{
50845 + struct name_entry **curr, *nentry;
50846 + struct inodev_entry *ientry;
50847 + unsigned int len = strlen(name);
50848 + unsigned int key = full_name_hash(name, len);
50849 + unsigned int index = key % name_set.n_size;
50850 +
50851 + curr = &name_set.n_hash[index];
50852 +
50853 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50854 + curr = &((*curr)->next);
50855 +
50856 + if (*curr != NULL)
50857 + return 1;
50858 +
50859 + nentry = acl_alloc(sizeof (struct name_entry));
50860 + if (nentry == NULL)
50861 + return 0;
50862 + ientry = acl_alloc(sizeof (struct inodev_entry));
50863 + if (ientry == NULL)
50864 + return 0;
50865 + ientry->nentry = nentry;
50866 +
50867 + nentry->key = key;
50868 + nentry->name = name;
50869 + nentry->inode = inode;
50870 + nentry->device = device;
50871 + nentry->len = len;
50872 + nentry->deleted = deleted;
50873 +
50874 + nentry->prev = NULL;
50875 + curr = &name_set.n_hash[index];
50876 + if (*curr != NULL)
50877 + (*curr)->prev = nentry;
50878 + nentry->next = *curr;
50879 + *curr = nentry;
50880 +
50881 + /* insert us into the table searchable by inode/dev */
50882 + insert_inodev_entry(ientry);
50883 +
50884 + return 1;
50885 +}
50886 +
50887 +static void
50888 +insert_acl_obj_label(struct acl_object_label *obj,
50889 + struct acl_subject_label *subj)
50890 +{
50891 + unsigned int index =
50892 + fhash(obj->inode, obj->device, subj->obj_hash_size);
50893 + struct acl_object_label **curr;
50894 +
50895 +
50896 + obj->prev = NULL;
50897 +
50898 + curr = &subj->obj_hash[index];
50899 + if (*curr != NULL)
50900 + (*curr)->prev = obj;
50901 +
50902 + obj->next = *curr;
50903 + *curr = obj;
50904 +
50905 + return;
50906 +}
50907 +
50908 +static void
50909 +insert_acl_subj_label(struct acl_subject_label *obj,
50910 + struct acl_role_label *role)
50911 +{
50912 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
50913 + struct acl_subject_label **curr;
50914 +
50915 + obj->prev = NULL;
50916 +
50917 + curr = &role->subj_hash[index];
50918 + if (*curr != NULL)
50919 + (*curr)->prev = obj;
50920 +
50921 + obj->next = *curr;
50922 + *curr = obj;
50923 +
50924 + return;
50925 +}
50926 +
50927 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
50928 +
50929 +static void *
50930 +create_table(__u32 * len, int elementsize)
50931 +{
50932 + unsigned int table_sizes[] = {
50933 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
50934 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
50935 + 4194301, 8388593, 16777213, 33554393, 67108859
50936 + };
50937 + void *newtable = NULL;
50938 + unsigned int pwr = 0;
50939 +
50940 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
50941 + table_sizes[pwr] <= *len)
50942 + pwr++;
50943 +
50944 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
50945 + return newtable;
50946 +
50947 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
50948 + newtable =
50949 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
50950 + else
50951 + newtable = vmalloc(table_sizes[pwr] * elementsize);
50952 +
50953 + *len = table_sizes[pwr];
50954 +
50955 + return newtable;
50956 +}
50957 +
50958 +static int
50959 +init_variables(const struct gr_arg *arg)
50960 +{
50961 + struct task_struct *reaper = init_pid_ns.child_reaper;
50962 + unsigned int stacksize;
50963 +
50964 + subj_map_set.s_size = arg->role_db.num_subjects;
50965 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
50966 + name_set.n_size = arg->role_db.num_objects;
50967 + inodev_set.i_size = arg->role_db.num_objects;
50968 +
50969 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
50970 + !name_set.n_size || !inodev_set.i_size)
50971 + return 1;
50972 +
50973 + if (!gr_init_uidset())
50974 + return 1;
50975 +
50976 + /* set up the stack that holds allocation info */
50977 +
50978 + stacksize = arg->role_db.num_pointers + 5;
50979 +
50980 + if (!acl_alloc_stack_init(stacksize))
50981 + return 1;
50982 +
50983 + /* grab reference for the real root dentry and vfsmount */
50984 + get_fs_root(reaper->fs, &real_root);
50985 +
50986 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50987 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
50988 +#endif
50989 +
50990 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
50991 + if (fakefs_obj_rw == NULL)
50992 + return 1;
50993 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
50994 +
50995 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
50996 + if (fakefs_obj_rwx == NULL)
50997 + return 1;
50998 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
50999 +
51000 + subj_map_set.s_hash =
51001 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51002 + acl_role_set.r_hash =
51003 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51004 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51005 + inodev_set.i_hash =
51006 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51007 +
51008 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51009 + !name_set.n_hash || !inodev_set.i_hash)
51010 + return 1;
51011 +
51012 + memset(subj_map_set.s_hash, 0,
51013 + sizeof(struct subject_map *) * subj_map_set.s_size);
51014 + memset(acl_role_set.r_hash, 0,
51015 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
51016 + memset(name_set.n_hash, 0,
51017 + sizeof (struct name_entry *) * name_set.n_size);
51018 + memset(inodev_set.i_hash, 0,
51019 + sizeof (struct inodev_entry *) * inodev_set.i_size);
51020 +
51021 + return 0;
51022 +}
51023 +
51024 +/* free information not needed after startup
51025 + currently contains user->kernel pointer mappings for subjects
51026 +*/
51027 +
51028 +static void
51029 +free_init_variables(void)
51030 +{
51031 + __u32 i;
51032 +
51033 + if (subj_map_set.s_hash) {
51034 + for (i = 0; i < subj_map_set.s_size; i++) {
51035 + if (subj_map_set.s_hash[i]) {
51036 + kfree(subj_map_set.s_hash[i]);
51037 + subj_map_set.s_hash[i] = NULL;
51038 + }
51039 + }
51040 +
51041 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51042 + PAGE_SIZE)
51043 + kfree(subj_map_set.s_hash);
51044 + else
51045 + vfree(subj_map_set.s_hash);
51046 + }
51047 +
51048 + return;
51049 +}
51050 +
51051 +static void
51052 +free_variables(void)
51053 +{
51054 + struct acl_subject_label *s;
51055 + struct acl_role_label *r;
51056 + struct task_struct *task, *task2;
51057 + unsigned int x;
51058 +
51059 + gr_clear_learn_entries();
51060 +
51061 + read_lock(&tasklist_lock);
51062 + do_each_thread(task2, task) {
51063 + task->acl_sp_role = 0;
51064 + task->acl_role_id = 0;
51065 + task->acl = NULL;
51066 + task->role = NULL;
51067 + } while_each_thread(task2, task);
51068 + read_unlock(&tasklist_lock);
51069 +
51070 + /* release the reference to the real root dentry and vfsmount */
51071 + path_put(&real_root);
51072 + memset(&real_root, 0, sizeof(real_root));
51073 +
51074 + /* free all object hash tables */
51075 +
51076 + FOR_EACH_ROLE_START(r)
51077 + if (r->subj_hash == NULL)
51078 + goto next_role;
51079 + FOR_EACH_SUBJECT_START(r, s, x)
51080 + if (s->obj_hash == NULL)
51081 + break;
51082 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51083 + kfree(s->obj_hash);
51084 + else
51085 + vfree(s->obj_hash);
51086 + FOR_EACH_SUBJECT_END(s, x)
51087 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51088 + if (s->obj_hash == NULL)
51089 + break;
51090 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51091 + kfree(s->obj_hash);
51092 + else
51093 + vfree(s->obj_hash);
51094 + FOR_EACH_NESTED_SUBJECT_END(s)
51095 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51096 + kfree(r->subj_hash);
51097 + else
51098 + vfree(r->subj_hash);
51099 + r->subj_hash = NULL;
51100 +next_role:
51101 + FOR_EACH_ROLE_END(r)
51102 +
51103 + acl_free_all();
51104 +
51105 + if (acl_role_set.r_hash) {
51106 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51107 + PAGE_SIZE)
51108 + kfree(acl_role_set.r_hash);
51109 + else
51110 + vfree(acl_role_set.r_hash);
51111 + }
51112 + if (name_set.n_hash) {
51113 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51114 + PAGE_SIZE)
51115 + kfree(name_set.n_hash);
51116 + else
51117 + vfree(name_set.n_hash);
51118 + }
51119 +
51120 + if (inodev_set.i_hash) {
51121 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51122 + PAGE_SIZE)
51123 + kfree(inodev_set.i_hash);
51124 + else
51125 + vfree(inodev_set.i_hash);
51126 + }
51127 +
51128 + gr_free_uidset();
51129 +
51130 + memset(&name_set, 0, sizeof (struct name_db));
51131 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51132 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51133 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51134 +
51135 + default_role = NULL;
51136 + kernel_role = NULL;
51137 + role_list = NULL;
51138 +
51139 + return;
51140 +}
51141 +
51142 +static __u32
51143 +count_user_objs(struct acl_object_label *userp)
51144 +{
51145 + struct acl_object_label o_tmp;
51146 + __u32 num = 0;
51147 +
51148 + while (userp) {
51149 + if (copy_from_user(&o_tmp, userp,
51150 + sizeof (struct acl_object_label)))
51151 + break;
51152 +
51153 + userp = o_tmp.prev;
51154 + num++;
51155 + }
51156 +
51157 + return num;
51158 +}
51159 +
51160 +static struct acl_subject_label *
51161 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51162 +
51163 +static int
51164 +copy_user_glob(struct acl_object_label *obj)
51165 +{
51166 + struct acl_object_label *g_tmp, **guser;
51167 + unsigned int len;
51168 + char *tmp;
51169 +
51170 + if (obj->globbed == NULL)
51171 + return 0;
51172 +
51173 + guser = &obj->globbed;
51174 + while (*guser) {
51175 + g_tmp = (struct acl_object_label *)
51176 + acl_alloc(sizeof (struct acl_object_label));
51177 + if (g_tmp == NULL)
51178 + return -ENOMEM;
51179 +
51180 + if (copy_from_user(g_tmp, *guser,
51181 + sizeof (struct acl_object_label)))
51182 + return -EFAULT;
51183 +
51184 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51185 +
51186 + if (!len || len >= PATH_MAX)
51187 + return -EINVAL;
51188 +
51189 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51190 + return -ENOMEM;
51191 +
51192 + if (copy_from_user(tmp, g_tmp->filename, len))
51193 + return -EFAULT;
51194 + tmp[len-1] = '\0';
51195 + g_tmp->filename = tmp;
51196 +
51197 + *guser = g_tmp;
51198 + guser = &(g_tmp->next);
51199 + }
51200 +
51201 + return 0;
51202 +}
51203 +
51204 +static int
51205 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51206 + struct acl_role_label *role)
51207 +{
51208 + struct acl_object_label *o_tmp;
51209 + unsigned int len;
51210 + int ret;
51211 + char *tmp;
51212 +
51213 + while (userp) {
51214 + if ((o_tmp = (struct acl_object_label *)
51215 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51216 + return -ENOMEM;
51217 +
51218 + if (copy_from_user(o_tmp, userp,
51219 + sizeof (struct acl_object_label)))
51220 + return -EFAULT;
51221 +
51222 + userp = o_tmp->prev;
51223 +
51224 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51225 +
51226 + if (!len || len >= PATH_MAX)
51227 + return -EINVAL;
51228 +
51229 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51230 + return -ENOMEM;
51231 +
51232 + if (copy_from_user(tmp, o_tmp->filename, len))
51233 + return -EFAULT;
51234 + tmp[len-1] = '\0';
51235 + o_tmp->filename = tmp;
51236 +
51237 + insert_acl_obj_label(o_tmp, subj);
51238 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51239 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51240 + return -ENOMEM;
51241 +
51242 + ret = copy_user_glob(o_tmp);
51243 + if (ret)
51244 + return ret;
51245 +
51246 + if (o_tmp->nested) {
51247 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51248 + if (IS_ERR(o_tmp->nested))
51249 + return PTR_ERR(o_tmp->nested);
51250 +
51251 + /* insert into nested subject list */
51252 + o_tmp->nested->next = role->hash->first;
51253 + role->hash->first = o_tmp->nested;
51254 + }
51255 + }
51256 +
51257 + return 0;
51258 +}
51259 +
51260 +static __u32
51261 +count_user_subjs(struct acl_subject_label *userp)
51262 +{
51263 + struct acl_subject_label s_tmp;
51264 + __u32 num = 0;
51265 +
51266 + while (userp) {
51267 + if (copy_from_user(&s_tmp, userp,
51268 + sizeof (struct acl_subject_label)))
51269 + break;
51270 +
51271 + userp = s_tmp.prev;
51272 + /* do not count nested subjects against this count, since
51273 + they are not included in the hash table, but are
51274 + attached to objects. We have already counted
51275 + the subjects in userspace for the allocation
51276 + stack
51277 + */
51278 + if (!(s_tmp.mode & GR_NESTED))
51279 + num++;
51280 + }
51281 +
51282 + return num;
51283 +}
51284 +
51285 +static int
51286 +copy_user_allowedips(struct acl_role_label *rolep)
51287 +{
51288 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51289 +
51290 + ruserip = rolep->allowed_ips;
51291 +
51292 + while (ruserip) {
51293 + rlast = rtmp;
51294 +
51295 + if ((rtmp = (struct role_allowed_ip *)
51296 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51297 + return -ENOMEM;
51298 +
51299 + if (copy_from_user(rtmp, ruserip,
51300 + sizeof (struct role_allowed_ip)))
51301 + return -EFAULT;
51302 +
51303 + ruserip = rtmp->prev;
51304 +
51305 + if (!rlast) {
51306 + rtmp->prev = NULL;
51307 + rolep->allowed_ips = rtmp;
51308 + } else {
51309 + rlast->next = rtmp;
51310 + rtmp->prev = rlast;
51311 + }
51312 +
51313 + if (!ruserip)
51314 + rtmp->next = NULL;
51315 + }
51316 +
51317 + return 0;
51318 +}
51319 +
51320 +static int
51321 +copy_user_transitions(struct acl_role_label *rolep)
51322 +{
51323 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51324 +
51325 + unsigned int len;
51326 + char *tmp;
51327 +
51328 + rusertp = rolep->transitions;
51329 +
51330 + while (rusertp) {
51331 + rlast = rtmp;
51332 +
51333 + if ((rtmp = (struct role_transition *)
51334 + acl_alloc(sizeof (struct role_transition))) == NULL)
51335 + return -ENOMEM;
51336 +
51337 + if (copy_from_user(rtmp, rusertp,
51338 + sizeof (struct role_transition)))
51339 + return -EFAULT;
51340 +
51341 + rusertp = rtmp->prev;
51342 +
51343 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51344 +
51345 + if (!len || len >= GR_SPROLE_LEN)
51346 + return -EINVAL;
51347 +
51348 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51349 + return -ENOMEM;
51350 +
51351 + if (copy_from_user(tmp, rtmp->rolename, len))
51352 + return -EFAULT;
51353 + tmp[len-1] = '\0';
51354 + rtmp->rolename = tmp;
51355 +
51356 + if (!rlast) {
51357 + rtmp->prev = NULL;
51358 + rolep->transitions = rtmp;
51359 + } else {
51360 + rlast->next = rtmp;
51361 + rtmp->prev = rlast;
51362 + }
51363 +
51364 + if (!rusertp)
51365 + rtmp->next = NULL;
51366 + }
51367 +
51368 + return 0;
51369 +}
51370 +
51371 +static struct acl_subject_label *
51372 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51373 +{
51374 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51375 + unsigned int len;
51376 + char *tmp;
51377 + __u32 num_objs;
51378 + struct acl_ip_label **i_tmp, *i_utmp2;
51379 + struct gr_hash_struct ghash;
51380 + struct subject_map *subjmap;
51381 + unsigned int i_num;
51382 + int err;
51383 +
51384 + s_tmp = lookup_subject_map(userp);
51385 +
51386 + /* we've already copied this subject into the kernel, just return
51387 + the reference to it, and don't copy it over again
51388 + */
51389 + if (s_tmp)
51390 + return(s_tmp);
51391 +
51392 + if ((s_tmp = (struct acl_subject_label *)
51393 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51394 + return ERR_PTR(-ENOMEM);
51395 +
51396 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51397 + if (subjmap == NULL)
51398 + return ERR_PTR(-ENOMEM);
51399 +
51400 + subjmap->user = userp;
51401 + subjmap->kernel = s_tmp;
51402 + insert_subj_map_entry(subjmap);
51403 +
51404 + if (copy_from_user(s_tmp, userp,
51405 + sizeof (struct acl_subject_label)))
51406 + return ERR_PTR(-EFAULT);
51407 +
51408 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51409 +
51410 + if (!len || len >= PATH_MAX)
51411 + return ERR_PTR(-EINVAL);
51412 +
51413 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51414 + return ERR_PTR(-ENOMEM);
51415 +
51416 + if (copy_from_user(tmp, s_tmp->filename, len))
51417 + return ERR_PTR(-EFAULT);
51418 + tmp[len-1] = '\0';
51419 + s_tmp->filename = tmp;
51420 +
51421 + if (!strcmp(s_tmp->filename, "/"))
51422 + role->root_label = s_tmp;
51423 +
51424 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51425 + return ERR_PTR(-EFAULT);
51426 +
51427 + /* copy user and group transition tables */
51428 +
51429 + if (s_tmp->user_trans_num) {
51430 + uid_t *uidlist;
51431 +
51432 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51433 + if (uidlist == NULL)
51434 + return ERR_PTR(-ENOMEM);
51435 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51436 + return ERR_PTR(-EFAULT);
51437 +
51438 + s_tmp->user_transitions = uidlist;
51439 + }
51440 +
51441 + if (s_tmp->group_trans_num) {
51442 + gid_t *gidlist;
51443 +
51444 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51445 + if (gidlist == NULL)
51446 + return ERR_PTR(-ENOMEM);
51447 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51448 + return ERR_PTR(-EFAULT);
51449 +
51450 + s_tmp->group_transitions = gidlist;
51451 + }
51452 +
51453 + /* set up object hash table */
51454 + num_objs = count_user_objs(ghash.first);
51455 +
51456 + s_tmp->obj_hash_size = num_objs;
51457 + s_tmp->obj_hash =
51458 + (struct acl_object_label **)
51459 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51460 +
51461 + if (!s_tmp->obj_hash)
51462 + return ERR_PTR(-ENOMEM);
51463 +
51464 + memset(s_tmp->obj_hash, 0,
51465 + s_tmp->obj_hash_size *
51466 + sizeof (struct acl_object_label *));
51467 +
51468 + /* add in objects */
51469 + err = copy_user_objs(ghash.first, s_tmp, role);
51470 +
51471 + if (err)
51472 + return ERR_PTR(err);
51473 +
51474 + /* set pointer for parent subject */
51475 + if (s_tmp->parent_subject) {
51476 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51477 +
51478 + if (IS_ERR(s_tmp2))
51479 + return s_tmp2;
51480 +
51481 + s_tmp->parent_subject = s_tmp2;
51482 + }
51483 +
51484 + /* add in ip acls */
51485 +
51486 + if (!s_tmp->ip_num) {
51487 + s_tmp->ips = NULL;
51488 + goto insert;
51489 + }
51490 +
51491 + i_tmp =
51492 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51493 + sizeof (struct acl_ip_label *));
51494 +
51495 + if (!i_tmp)
51496 + return ERR_PTR(-ENOMEM);
51497 +
51498 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51499 + *(i_tmp + i_num) =
51500 + (struct acl_ip_label *)
51501 + acl_alloc(sizeof (struct acl_ip_label));
51502 + if (!*(i_tmp + i_num))
51503 + return ERR_PTR(-ENOMEM);
51504 +
51505 + if (copy_from_user
51506 + (&i_utmp2, s_tmp->ips + i_num,
51507 + sizeof (struct acl_ip_label *)))
51508 + return ERR_PTR(-EFAULT);
51509 +
51510 + if (copy_from_user
51511 + (*(i_tmp + i_num), i_utmp2,
51512 + sizeof (struct acl_ip_label)))
51513 + return ERR_PTR(-EFAULT);
51514 +
51515 + if ((*(i_tmp + i_num))->iface == NULL)
51516 + continue;
51517 +
51518 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51519 + if (!len || len >= IFNAMSIZ)
51520 + return ERR_PTR(-EINVAL);
51521 + tmp = acl_alloc(len);
51522 + if (tmp == NULL)
51523 + return ERR_PTR(-ENOMEM);
51524 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51525 + return ERR_PTR(-EFAULT);
51526 + (*(i_tmp + i_num))->iface = tmp;
51527 + }
51528 +
51529 + s_tmp->ips = i_tmp;
51530 +
51531 +insert:
51532 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51533 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51534 + return ERR_PTR(-ENOMEM);
51535 +
51536 + return s_tmp;
51537 +}
51538 +
51539 +static int
51540 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51541 +{
51542 + struct acl_subject_label s_pre;
51543 + struct acl_subject_label * ret;
51544 + int err;
51545 +
51546 + while (userp) {
51547 + if (copy_from_user(&s_pre, userp,
51548 + sizeof (struct acl_subject_label)))
51549 + return -EFAULT;
51550 +
51551 + /* do not add nested subjects here, add
51552 + while parsing objects
51553 + */
51554 +
51555 + if (s_pre.mode & GR_NESTED) {
51556 + userp = s_pre.prev;
51557 + continue;
51558 + }
51559 +
51560 + ret = do_copy_user_subj(userp, role);
51561 +
51562 + err = PTR_ERR(ret);
51563 + if (IS_ERR(ret))
51564 + return err;
51565 +
51566 + insert_acl_subj_label(ret, role);
51567 +
51568 + userp = s_pre.prev;
51569 + }
51570 +
51571 + return 0;
51572 +}
51573 +
51574 +static int
51575 +copy_user_acl(struct gr_arg *arg)
51576 +{
51577 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51578 + struct sprole_pw *sptmp;
51579 + struct gr_hash_struct *ghash;
51580 + uid_t *domainlist;
51581 + unsigned int r_num;
51582 + unsigned int len;
51583 + char *tmp;
51584 + int err = 0;
51585 + __u16 i;
51586 + __u32 num_subjs;
51587 +
51588 + /* we need a default and kernel role */
51589 + if (arg->role_db.num_roles < 2)
51590 + return -EINVAL;
51591 +
51592 + /* copy special role authentication info from userspace */
51593 +
51594 + num_sprole_pws = arg->num_sprole_pws;
51595 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51596 +
51597 + if (!acl_special_roles && num_sprole_pws)
51598 + return -ENOMEM;
51599 +
51600 + for (i = 0; i < num_sprole_pws; i++) {
51601 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51602 + if (!sptmp)
51603 + return -ENOMEM;
51604 + if (copy_from_user(sptmp, arg->sprole_pws + i,
51605 + sizeof (struct sprole_pw)))
51606 + return -EFAULT;
51607 +
51608 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51609 +
51610 + if (!len || len >= GR_SPROLE_LEN)
51611 + return -EINVAL;
51612 +
51613 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51614 + return -ENOMEM;
51615 +
51616 + if (copy_from_user(tmp, sptmp->rolename, len))
51617 + return -EFAULT;
51618 +
51619 + tmp[len-1] = '\0';
51620 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51621 + printk(KERN_ALERT "Copying special role %s\n", tmp);
51622 +#endif
51623 + sptmp->rolename = tmp;
51624 + acl_special_roles[i] = sptmp;
51625 + }
51626 +
51627 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51628 +
51629 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51630 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
51631 +
51632 + if (!r_tmp)
51633 + return -ENOMEM;
51634 +
51635 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
51636 + sizeof (struct acl_role_label *)))
51637 + return -EFAULT;
51638 +
51639 + if (copy_from_user(r_tmp, r_utmp2,
51640 + sizeof (struct acl_role_label)))
51641 + return -EFAULT;
51642 +
51643 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51644 +
51645 + if (!len || len >= PATH_MAX)
51646 + return -EINVAL;
51647 +
51648 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51649 + return -ENOMEM;
51650 +
51651 + if (copy_from_user(tmp, r_tmp->rolename, len))
51652 + return -EFAULT;
51653 +
51654 + tmp[len-1] = '\0';
51655 + r_tmp->rolename = tmp;
51656 +
51657 + if (!strcmp(r_tmp->rolename, "default")
51658 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51659 + default_role = r_tmp;
51660 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51661 + kernel_role = r_tmp;
51662 + }
51663 +
51664 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
51665 + return -ENOMEM;
51666 +
51667 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
51668 + return -EFAULT;
51669 +
51670 + r_tmp->hash = ghash;
51671 +
51672 + num_subjs = count_user_subjs(r_tmp->hash->first);
51673 +
51674 + r_tmp->subj_hash_size = num_subjs;
51675 + r_tmp->subj_hash =
51676 + (struct acl_subject_label **)
51677 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51678 +
51679 + if (!r_tmp->subj_hash)
51680 + return -ENOMEM;
51681 +
51682 + err = copy_user_allowedips(r_tmp);
51683 + if (err)
51684 + return err;
51685 +
51686 + /* copy domain info */
51687 + if (r_tmp->domain_children != NULL) {
51688 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51689 + if (domainlist == NULL)
51690 + return -ENOMEM;
51691 +
51692 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
51693 + return -EFAULT;
51694 +
51695 + r_tmp->domain_children = domainlist;
51696 + }
51697 +
51698 + err = copy_user_transitions(r_tmp);
51699 + if (err)
51700 + return err;
51701 +
51702 + memset(r_tmp->subj_hash, 0,
51703 + r_tmp->subj_hash_size *
51704 + sizeof (struct acl_subject_label *));
51705 +
51706 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51707 +
51708 + if (err)
51709 + return err;
51710 +
51711 + /* set nested subject list to null */
51712 + r_tmp->hash->first = NULL;
51713 +
51714 + insert_acl_role_label(r_tmp);
51715 + }
51716 +
51717 + if (default_role == NULL || kernel_role == NULL)
51718 + return -EINVAL;
51719 +
51720 + return err;
51721 +}
51722 +
51723 +static int
51724 +gracl_init(struct gr_arg *args)
51725 +{
51726 + int error = 0;
51727 +
51728 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51729 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51730 +
51731 + if (init_variables(args)) {
51732 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51733 + error = -ENOMEM;
51734 + free_variables();
51735 + goto out;
51736 + }
51737 +
51738 + error = copy_user_acl(args);
51739 + free_init_variables();
51740 + if (error) {
51741 + free_variables();
51742 + goto out;
51743 + }
51744 +
51745 + if ((error = gr_set_acls(0))) {
51746 + free_variables();
51747 + goto out;
51748 + }
51749 +
51750 + pax_open_kernel();
51751 + gr_status |= GR_READY;
51752 + pax_close_kernel();
51753 +
51754 + out:
51755 + return error;
51756 +}
51757 +
51758 +/* derived from glibc fnmatch() 0: match, 1: no match*/
51759 +
51760 +static int
51761 +glob_match(const char *p, const char *n)
51762 +{
51763 + char c;
51764 +
51765 + while ((c = *p++) != '\0') {
51766 + switch (c) {
51767 + case '?':
51768 + if (*n == '\0')
51769 + return 1;
51770 + else if (*n == '/')
51771 + return 1;
51772 + break;
51773 + case '\\':
51774 + if (*n != c)
51775 + return 1;
51776 + break;
51777 + case '*':
51778 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
51779 + if (*n == '/')
51780 + return 1;
51781 + else if (c == '?') {
51782 + if (*n == '\0')
51783 + return 1;
51784 + else
51785 + ++n;
51786 + }
51787 + }
51788 + if (c == '\0') {
51789 + return 0;
51790 + } else {
51791 + const char *endp;
51792 +
51793 + if ((endp = strchr(n, '/')) == NULL)
51794 + endp = n + strlen(n);
51795 +
51796 + if (c == '[') {
51797 + for (--p; n < endp; ++n)
51798 + if (!glob_match(p, n))
51799 + return 0;
51800 + } else if (c == '/') {
51801 + while (*n != '\0' && *n != '/')
51802 + ++n;
51803 + if (*n == '/' && !glob_match(p, n + 1))
51804 + return 0;
51805 + } else {
51806 + for (--p; n < endp; ++n)
51807 + if (*n == c && !glob_match(p, n))
51808 + return 0;
51809 + }
51810 +
51811 + return 1;
51812 + }
51813 + case '[':
51814 + {
51815 + int not;
51816 + char cold;
51817 +
51818 + if (*n == '\0' || *n == '/')
51819 + return 1;
51820 +
51821 + not = (*p == '!' || *p == '^');
51822 + if (not)
51823 + ++p;
51824 +
51825 + c = *p++;
51826 + for (;;) {
51827 + unsigned char fn = (unsigned char)*n;
51828 +
51829 + if (c == '\0')
51830 + return 1;
51831 + else {
51832 + if (c == fn)
51833 + goto matched;
51834 + cold = c;
51835 + c = *p++;
51836 +
51837 + if (c == '-' && *p != ']') {
51838 + unsigned char cend = *p++;
51839 +
51840 + if (cend == '\0')
51841 + return 1;
51842 +
51843 + if (cold <= fn && fn <= cend)
51844 + goto matched;
51845 +
51846 + c = *p++;
51847 + }
51848 + }
51849 +
51850 + if (c == ']')
51851 + break;
51852 + }
51853 + if (!not)
51854 + return 1;
51855 + break;
51856 + matched:
51857 + while (c != ']') {
51858 + if (c == '\0')
51859 + return 1;
51860 +
51861 + c = *p++;
51862 + }
51863 + if (not)
51864 + return 1;
51865 + }
51866 + break;
51867 + default:
51868 + if (c != *n)
51869 + return 1;
51870 + }
51871 +
51872 + ++n;
51873 + }
51874 +
51875 + if (*n == '\0')
51876 + return 0;
51877 +
51878 + if (*n == '/')
51879 + return 0;
51880 +
51881 + return 1;
51882 +}
51883 +
51884 +static struct acl_object_label *
51885 +chk_glob_label(struct acl_object_label *globbed,
51886 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51887 +{
51888 + struct acl_object_label *tmp;
51889 +
51890 + if (*path == NULL)
51891 + *path = gr_to_filename_nolock(dentry, mnt);
51892 +
51893 + tmp = globbed;
51894 +
51895 + while (tmp) {
51896 + if (!glob_match(tmp->filename, *path))
51897 + return tmp;
51898 + tmp = tmp->next;
51899 + }
51900 +
51901 + return NULL;
51902 +}
51903 +
51904 +static struct acl_object_label *
51905 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51906 + const ino_t curr_ino, const dev_t curr_dev,
51907 + const struct acl_subject_label *subj, char **path, const int checkglob)
51908 +{
51909 + struct acl_subject_label *tmpsubj;
51910 + struct acl_object_label *retval;
51911 + struct acl_object_label *retval2;
51912 +
51913 + tmpsubj = (struct acl_subject_label *) subj;
51914 + read_lock(&gr_inode_lock);
51915 + do {
51916 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
51917 + if (retval) {
51918 + if (checkglob && retval->globbed) {
51919 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
51920 + if (retval2)
51921 + retval = retval2;
51922 + }
51923 + break;
51924 + }
51925 + } while ((tmpsubj = tmpsubj->parent_subject));
51926 + read_unlock(&gr_inode_lock);
51927 +
51928 + return retval;
51929 +}
51930 +
51931 +static __inline__ struct acl_object_label *
51932 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51933 + struct dentry *curr_dentry,
51934 + const struct acl_subject_label *subj, char **path, const int checkglob)
51935 +{
51936 + int newglob = checkglob;
51937 + ino_t inode;
51938 + dev_t device;
51939 +
51940 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
51941 + as we don't want a / * rule to match instead of the / object
51942 + don't do this for create lookups that call this function though, since they're looking up
51943 + on the parent and thus need globbing checks on all paths
51944 + */
51945 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
51946 + newglob = GR_NO_GLOB;
51947 +
51948 + spin_lock(&curr_dentry->d_lock);
51949 + inode = curr_dentry->d_inode->i_ino;
51950 + device = __get_dev(curr_dentry);
51951 + spin_unlock(&curr_dentry->d_lock);
51952 +
51953 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
51954 +}
51955 +
51956 +static struct acl_object_label *
51957 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51958 + const struct acl_subject_label *subj, char *path, const int checkglob)
51959 +{
51960 + struct dentry *dentry = (struct dentry *) l_dentry;
51961 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51962 + struct mount *real_mnt = real_mount(mnt);
51963 + struct acl_object_label *retval;
51964 + struct dentry *parent;
51965 +
51966 + write_seqlock(&rename_lock);
51967 + br_read_lock(vfsmount_lock);
51968 +
51969 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
51970 +#ifdef CONFIG_NET
51971 + mnt == sock_mnt ||
51972 +#endif
51973 +#ifdef CONFIG_HUGETLBFS
51974 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
51975 +#endif
51976 + /* ignore Eric Biederman */
51977 + IS_PRIVATE(l_dentry->d_inode))) {
51978 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
51979 + goto out;
51980 + }
51981 +
51982 + for (;;) {
51983 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51984 + break;
51985 +
51986 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51987 + if (!mnt_has_parent(real_mnt))
51988 + break;
51989 +
51990 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51991 + if (retval != NULL)
51992 + goto out;
51993 +
51994 + dentry = real_mnt->mnt_mountpoint;
51995 + real_mnt = real_mnt->mnt_parent;
51996 + mnt = &real_mnt->mnt;
51997 + continue;
51998 + }
51999 +
52000 + parent = dentry->d_parent;
52001 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52002 + if (retval != NULL)
52003 + goto out;
52004 +
52005 + dentry = parent;
52006 + }
52007 +
52008 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52009 +
52010 + /* real_root is pinned so we don't have to hold a reference */
52011 + if (retval == NULL)
52012 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52013 +out:
52014 + br_read_unlock(vfsmount_lock);
52015 + write_sequnlock(&rename_lock);
52016 +
52017 + BUG_ON(retval == NULL);
52018 +
52019 + return retval;
52020 +}
52021 +
52022 +static __inline__ struct acl_object_label *
52023 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52024 + const struct acl_subject_label *subj)
52025 +{
52026 + char *path = NULL;
52027 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52028 +}
52029 +
52030 +static __inline__ struct acl_object_label *
52031 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52032 + const struct acl_subject_label *subj)
52033 +{
52034 + char *path = NULL;
52035 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52036 +}
52037 +
52038 +static __inline__ struct acl_object_label *
52039 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52040 + const struct acl_subject_label *subj, char *path)
52041 +{
52042 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52043 +}
52044 +
52045 +static struct acl_subject_label *
52046 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52047 + const struct acl_role_label *role)
52048 +{
52049 + struct dentry *dentry = (struct dentry *) l_dentry;
52050 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52051 + struct mount *real_mnt = real_mount(mnt);
52052 + struct acl_subject_label *retval;
52053 + struct dentry *parent;
52054 +
52055 + write_seqlock(&rename_lock);
52056 + br_read_lock(vfsmount_lock);
52057 +
52058 + for (;;) {
52059 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52060 + break;
52061 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52062 + if (!mnt_has_parent(real_mnt))
52063 + break;
52064 +
52065 + spin_lock(&dentry->d_lock);
52066 + read_lock(&gr_inode_lock);
52067 + retval =
52068 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52069 + __get_dev(dentry), role);
52070 + read_unlock(&gr_inode_lock);
52071 + spin_unlock(&dentry->d_lock);
52072 + if (retval != NULL)
52073 + goto out;
52074 +
52075 + dentry = real_mnt->mnt_mountpoint;
52076 + real_mnt = real_mnt->mnt_parent;
52077 + mnt = &real_mnt->mnt;
52078 + continue;
52079 + }
52080 +
52081 + spin_lock(&dentry->d_lock);
52082 + read_lock(&gr_inode_lock);
52083 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52084 + __get_dev(dentry), role);
52085 + read_unlock(&gr_inode_lock);
52086 + parent = dentry->d_parent;
52087 + spin_unlock(&dentry->d_lock);
52088 +
52089 + if (retval != NULL)
52090 + goto out;
52091 +
52092 + dentry = parent;
52093 + }
52094 +
52095 + spin_lock(&dentry->d_lock);
52096 + read_lock(&gr_inode_lock);
52097 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52098 + __get_dev(dentry), role);
52099 + read_unlock(&gr_inode_lock);
52100 + spin_unlock(&dentry->d_lock);
52101 +
52102 + if (unlikely(retval == NULL)) {
52103 + /* real_root is pinned, we don't need to hold a reference */
52104 + read_lock(&gr_inode_lock);
52105 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52106 + __get_dev(real_root.dentry), role);
52107 + read_unlock(&gr_inode_lock);
52108 + }
52109 +out:
52110 + br_read_unlock(vfsmount_lock);
52111 + write_sequnlock(&rename_lock);
52112 +
52113 + BUG_ON(retval == NULL);
52114 +
52115 + return retval;
52116 +}
52117 +
52118 +static void
52119 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52120 +{
52121 + struct task_struct *task = current;
52122 + const struct cred *cred = current_cred();
52123 +
52124 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52125 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52126 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52127 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52128 +
52129 + return;
52130 +}
52131 +
52132 +static void
52133 +gr_log_learn_id_change(const char type, const unsigned int real,
52134 + const unsigned int effective, const unsigned int fs)
52135 +{
52136 + struct task_struct *task = current;
52137 + const struct cred *cred = current_cred();
52138 +
52139 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52140 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52141 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52142 + type, real, effective, fs, &task->signal->saved_ip);
52143 +
52144 + return;
52145 +}
52146 +
52147 +__u32
52148 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52149 + const struct vfsmount * mnt)
52150 +{
52151 + __u32 retval = mode;
52152 + struct acl_subject_label *curracl;
52153 + struct acl_object_label *currobj;
52154 +
52155 + if (unlikely(!(gr_status & GR_READY)))
52156 + return (mode & ~GR_AUDITS);
52157 +
52158 + curracl = current->acl;
52159 +
52160 + currobj = chk_obj_label(dentry, mnt, curracl);
52161 + retval = currobj->mode & mode;
52162 +
52163 + /* if we're opening a specified transfer file for writing
52164 + (e.g. /dev/initctl), then transfer our role to init
52165 + */
52166 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52167 + current->role->roletype & GR_ROLE_PERSIST)) {
52168 + struct task_struct *task = init_pid_ns.child_reaper;
52169 +
52170 + if (task->role != current->role) {
52171 + task->acl_sp_role = 0;
52172 + task->acl_role_id = current->acl_role_id;
52173 + task->role = current->role;
52174 + rcu_read_lock();
52175 + read_lock(&grsec_exec_file_lock);
52176 + gr_apply_subject_to_task(task);
52177 + read_unlock(&grsec_exec_file_lock);
52178 + rcu_read_unlock();
52179 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52180 + }
52181 + }
52182 +
52183 + if (unlikely
52184 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52185 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52186 + __u32 new_mode = mode;
52187 +
52188 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52189 +
52190 + retval = new_mode;
52191 +
52192 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52193 + new_mode |= GR_INHERIT;
52194 +
52195 + if (!(mode & GR_NOLEARN))
52196 + gr_log_learn(dentry, mnt, new_mode);
52197 + }
52198 +
52199 + return retval;
52200 +}
52201 +
52202 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52203 + const struct dentry *parent,
52204 + const struct vfsmount *mnt)
52205 +{
52206 + struct name_entry *match;
52207 + struct acl_object_label *matchpo;
52208 + struct acl_subject_label *curracl;
52209 + char *path;
52210 +
52211 + if (unlikely(!(gr_status & GR_READY)))
52212 + return NULL;
52213 +
52214 + preempt_disable();
52215 + path = gr_to_filename_rbac(new_dentry, mnt);
52216 + match = lookup_name_entry_create(path);
52217 +
52218 + curracl = current->acl;
52219 +
52220 + if (match) {
52221 + read_lock(&gr_inode_lock);
52222 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52223 + read_unlock(&gr_inode_lock);
52224 +
52225 + if (matchpo) {
52226 + preempt_enable();
52227 + return matchpo;
52228 + }
52229 + }
52230 +
52231 + // lookup parent
52232 +
52233 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52234 +
52235 + preempt_enable();
52236 + return matchpo;
52237 +}
52238 +
52239 +__u32
52240 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52241 + const struct vfsmount * mnt, const __u32 mode)
52242 +{
52243 + struct acl_object_label *matchpo;
52244 + __u32 retval;
52245 +
52246 + if (unlikely(!(gr_status & GR_READY)))
52247 + return (mode & ~GR_AUDITS);
52248 +
52249 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52250 +
52251 + retval = matchpo->mode & mode;
52252 +
52253 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52254 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52255 + __u32 new_mode = mode;
52256 +
52257 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52258 +
52259 + gr_log_learn(new_dentry, mnt, new_mode);
52260 + return new_mode;
52261 + }
52262 +
52263 + return retval;
52264 +}
52265 +
52266 +__u32
52267 +gr_check_link(const struct dentry * new_dentry,
52268 + const struct dentry * parent_dentry,
52269 + const struct vfsmount * parent_mnt,
52270 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52271 +{
52272 + struct acl_object_label *obj;
52273 + __u32 oldmode, newmode;
52274 + __u32 needmode;
52275 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52276 + GR_DELETE | GR_INHERIT;
52277 +
52278 + if (unlikely(!(gr_status & GR_READY)))
52279 + return (GR_CREATE | GR_LINK);
52280 +
52281 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52282 + oldmode = obj->mode;
52283 +
52284 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52285 + newmode = obj->mode;
52286 +
52287 + needmode = newmode & checkmodes;
52288 +
52289 + // old name for hardlink must have at least the permissions of the new name
52290 + if ((oldmode & needmode) != needmode)
52291 + goto bad;
52292 +
52293 + // if old name had restrictions/auditing, make sure the new name does as well
52294 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52295 +
52296 + // don't allow hardlinking of suid/sgid files without permission
52297 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52298 + needmode |= GR_SETID;
52299 +
52300 + if ((newmode & needmode) != needmode)
52301 + goto bad;
52302 +
52303 + // enforce minimum permissions
52304 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52305 + return newmode;
52306 +bad:
52307 + needmode = oldmode;
52308 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52309 + needmode |= GR_SETID;
52310 +
52311 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52312 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52313 + return (GR_CREATE | GR_LINK);
52314 + } else if (newmode & GR_SUPPRESS)
52315 + return GR_SUPPRESS;
52316 + else
52317 + return 0;
52318 +}
52319 +
52320 +int
52321 +gr_check_hidden_task(const struct task_struct *task)
52322 +{
52323 + if (unlikely(!(gr_status & GR_READY)))
52324 + return 0;
52325 +
52326 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52327 + return 1;
52328 +
52329 + return 0;
52330 +}
52331 +
52332 +int
52333 +gr_check_protected_task(const struct task_struct *task)
52334 +{
52335 + if (unlikely(!(gr_status & GR_READY) || !task))
52336 + return 0;
52337 +
52338 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52339 + task->acl != current->acl)
52340 + return 1;
52341 +
52342 + return 0;
52343 +}
52344 +
52345 +int
52346 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52347 +{
52348 + struct task_struct *p;
52349 + int ret = 0;
52350 +
52351 + if (unlikely(!(gr_status & GR_READY) || !pid))
52352 + return ret;
52353 +
52354 + read_lock(&tasklist_lock);
52355 + do_each_pid_task(pid, type, p) {
52356 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52357 + p->acl != current->acl) {
52358 + ret = 1;
52359 + goto out;
52360 + }
52361 + } while_each_pid_task(pid, type, p);
52362 +out:
52363 + read_unlock(&tasklist_lock);
52364 +
52365 + return ret;
52366 +}
52367 +
52368 +void
52369 +gr_copy_label(struct task_struct *tsk)
52370 +{
52371 + /* plain copying of fields is already done by dup_task_struct */
52372 + tsk->signal->used_accept = 0;
52373 + tsk->acl_sp_role = 0;
52374 + //tsk->acl_role_id = current->acl_role_id;
52375 + //tsk->acl = current->acl;
52376 + //tsk->role = current->role;
52377 + tsk->signal->curr_ip = current->signal->curr_ip;
52378 + tsk->signal->saved_ip = current->signal->saved_ip;
52379 + if (current->exec_file)
52380 + get_file(current->exec_file);
52381 + //tsk->exec_file = current->exec_file;
52382 + //tsk->is_writable = current->is_writable;
52383 + if (unlikely(current->signal->used_accept)) {
52384 + current->signal->curr_ip = 0;
52385 + current->signal->saved_ip = 0;
52386 + }
52387 +
52388 + return;
52389 +}
52390 +
52391 +static void
52392 +gr_set_proc_res(struct task_struct *task)
52393 +{
52394 + struct acl_subject_label *proc;
52395 + unsigned short i;
52396 +
52397 + proc = task->acl;
52398 +
52399 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52400 + return;
52401 +
52402 + for (i = 0; i < RLIM_NLIMITS; i++) {
52403 + if (!(proc->resmask & (1 << i)))
52404 + continue;
52405 +
52406 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52407 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52408 + }
52409 +
52410 + return;
52411 +}
52412 +
52413 +extern int __gr_process_user_ban(struct user_struct *user);
52414 +
52415 +int
52416 +gr_check_user_change(int real, int effective, int fs)
52417 +{
52418 + unsigned int i;
52419 + __u16 num;
52420 + uid_t *uidlist;
52421 + int curuid;
52422 + int realok = 0;
52423 + int effectiveok = 0;
52424 + int fsok = 0;
52425 +
52426 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52427 + struct user_struct *user;
52428 +
52429 + if (real == -1)
52430 + goto skipit;
52431 +
52432 + user = find_user(real);
52433 + if (user == NULL)
52434 + goto skipit;
52435 +
52436 + if (__gr_process_user_ban(user)) {
52437 + /* for find_user */
52438 + free_uid(user);
52439 + return 1;
52440 + }
52441 +
52442 + /* for find_user */
52443 + free_uid(user);
52444 +
52445 +skipit:
52446 +#endif
52447 +
52448 + if (unlikely(!(gr_status & GR_READY)))
52449 + return 0;
52450 +
52451 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52452 + gr_log_learn_id_change('u', real, effective, fs);
52453 +
52454 + num = current->acl->user_trans_num;
52455 + uidlist = current->acl->user_transitions;
52456 +
52457 + if (uidlist == NULL)
52458 + return 0;
52459 +
52460 + if (real == -1)
52461 + realok = 1;
52462 + if (effective == -1)
52463 + effectiveok = 1;
52464 + if (fs == -1)
52465 + fsok = 1;
52466 +
52467 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
52468 + for (i = 0; i < num; i++) {
52469 + curuid = (int)uidlist[i];
52470 + if (real == curuid)
52471 + realok = 1;
52472 + if (effective == curuid)
52473 + effectiveok = 1;
52474 + if (fs == curuid)
52475 + fsok = 1;
52476 + }
52477 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
52478 + for (i = 0; i < num; i++) {
52479 + curuid = (int)uidlist[i];
52480 + if (real == curuid)
52481 + break;
52482 + if (effective == curuid)
52483 + break;
52484 + if (fs == curuid)
52485 + break;
52486 + }
52487 + /* not in deny list */
52488 + if (i == num) {
52489 + realok = 1;
52490 + effectiveok = 1;
52491 + fsok = 1;
52492 + }
52493 + }
52494 +
52495 + if (realok && effectiveok && fsok)
52496 + return 0;
52497 + else {
52498 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52499 + return 1;
52500 + }
52501 +}
52502 +
52503 +int
52504 +gr_check_group_change(int real, int effective, int fs)
52505 +{
52506 + unsigned int i;
52507 + __u16 num;
52508 + gid_t *gidlist;
52509 + int curgid;
52510 + int realok = 0;
52511 + int effectiveok = 0;
52512 + int fsok = 0;
52513 +
52514 + if (unlikely(!(gr_status & GR_READY)))
52515 + return 0;
52516 +
52517 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52518 + gr_log_learn_id_change('g', real, effective, fs);
52519 +
52520 + num = current->acl->group_trans_num;
52521 + gidlist = current->acl->group_transitions;
52522 +
52523 + if (gidlist == NULL)
52524 + return 0;
52525 +
52526 + if (real == -1)
52527 + realok = 1;
52528 + if (effective == -1)
52529 + effectiveok = 1;
52530 + if (fs == -1)
52531 + fsok = 1;
52532 +
52533 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52534 + for (i = 0; i < num; i++) {
52535 + curgid = (int)gidlist[i];
52536 + if (real == curgid)
52537 + realok = 1;
52538 + if (effective == curgid)
52539 + effectiveok = 1;
52540 + if (fs == curgid)
52541 + fsok = 1;
52542 + }
52543 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
52544 + for (i = 0; i < num; i++) {
52545 + curgid = (int)gidlist[i];
52546 + if (real == curgid)
52547 + break;
52548 + if (effective == curgid)
52549 + break;
52550 + if (fs == curgid)
52551 + break;
52552 + }
52553 + /* not in deny list */
52554 + if (i == num) {
52555 + realok = 1;
52556 + effectiveok = 1;
52557 + fsok = 1;
52558 + }
52559 + }
52560 +
52561 + if (realok && effectiveok && fsok)
52562 + return 0;
52563 + else {
52564 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52565 + return 1;
52566 + }
52567 +}
52568 +
52569 +extern int gr_acl_is_capable(const int cap);
52570 +
52571 +void
52572 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52573 +{
52574 + struct acl_role_label *role = task->role;
52575 + struct acl_subject_label *subj = NULL;
52576 + struct acl_object_label *obj;
52577 + struct file *filp;
52578 +
52579 + if (unlikely(!(gr_status & GR_READY)))
52580 + return;
52581 +
52582 + filp = task->exec_file;
52583 +
52584 + /* kernel process, we'll give them the kernel role */
52585 + if (unlikely(!filp)) {
52586 + task->role = kernel_role;
52587 + task->acl = kernel_role->root_label;
52588 + return;
52589 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52590 + role = lookup_acl_role_label(task, uid, gid);
52591 +
52592 + /* don't change the role if we're not a privileged process */
52593 + if (role && task->role != role &&
52594 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52595 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52596 + return;
52597 +
52598 + /* perform subject lookup in possibly new role
52599 + we can use this result below in the case where role == task->role
52600 + */
52601 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52602 +
52603 + /* if we changed uid/gid, but result in the same role
52604 + and are using inheritance, don't lose the inherited subject
52605 + if current subject is other than what normal lookup
52606 + would result in, we arrived via inheritance, don't
52607 + lose subject
52608 + */
52609 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52610 + (subj == task->acl)))
52611 + task->acl = subj;
52612 +
52613 + task->role = role;
52614 +
52615 + task->is_writable = 0;
52616 +
52617 + /* ignore additional mmap checks for processes that are writable
52618 + by the default ACL */
52619 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52620 + if (unlikely(obj->mode & GR_WRITE))
52621 + task->is_writable = 1;
52622 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52623 + if (unlikely(obj->mode & GR_WRITE))
52624 + task->is_writable = 1;
52625 +
52626 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52627 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52628 +#endif
52629 +
52630 + gr_set_proc_res(task);
52631 +
52632 + return;
52633 +}
52634 +
52635 +int
52636 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52637 + const int unsafe_flags)
52638 +{
52639 + struct task_struct *task = current;
52640 + struct acl_subject_label *newacl;
52641 + struct acl_object_label *obj;
52642 + __u32 retmode;
52643 +
52644 + if (unlikely(!(gr_status & GR_READY)))
52645 + return 0;
52646 +
52647 + newacl = chk_subj_label(dentry, mnt, task->role);
52648 +
52649 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
52650 + did an exec
52651 + */
52652 + rcu_read_lock();
52653 + read_lock(&tasklist_lock);
52654 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
52655 + (task->parent->acl->mode & GR_POVERRIDE))) {
52656 + read_unlock(&tasklist_lock);
52657 + rcu_read_unlock();
52658 + goto skip_check;
52659 + }
52660 + read_unlock(&tasklist_lock);
52661 + rcu_read_unlock();
52662 +
52663 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52664 + !(task->role->roletype & GR_ROLE_GOD) &&
52665 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52666 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52667 + if (unsafe_flags & LSM_UNSAFE_SHARE)
52668 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52669 + else
52670 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52671 + return -EACCES;
52672 + }
52673 +
52674 +skip_check:
52675 +
52676 + obj = chk_obj_label(dentry, mnt, task->acl);
52677 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52678 +
52679 + if (!(task->acl->mode & GR_INHERITLEARN) &&
52680 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52681 + if (obj->nested)
52682 + task->acl = obj->nested;
52683 + else
52684 + task->acl = newacl;
52685 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52686 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52687 +
52688 + task->is_writable = 0;
52689 +
52690 + /* ignore additional mmap checks for processes that are writable
52691 + by the default ACL */
52692 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
52693 + if (unlikely(obj->mode & GR_WRITE))
52694 + task->is_writable = 1;
52695 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
52696 + if (unlikely(obj->mode & GR_WRITE))
52697 + task->is_writable = 1;
52698 +
52699 + gr_set_proc_res(task);
52700 +
52701 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52702 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52703 +#endif
52704 + return 0;
52705 +}
52706 +
52707 +/* always called with valid inodev ptr */
52708 +static void
52709 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52710 +{
52711 + struct acl_object_label *matchpo;
52712 + struct acl_subject_label *matchps;
52713 + struct acl_subject_label *subj;
52714 + struct acl_role_label *role;
52715 + unsigned int x;
52716 +
52717 + FOR_EACH_ROLE_START(role)
52718 + FOR_EACH_SUBJECT_START(role, subj, x)
52719 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52720 + matchpo->mode |= GR_DELETED;
52721 + FOR_EACH_SUBJECT_END(subj,x)
52722 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52723 + if (subj->inode == ino && subj->device == dev)
52724 + subj->mode |= GR_DELETED;
52725 + FOR_EACH_NESTED_SUBJECT_END(subj)
52726 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52727 + matchps->mode |= GR_DELETED;
52728 + FOR_EACH_ROLE_END(role)
52729 +
52730 + inodev->nentry->deleted = 1;
52731 +
52732 + return;
52733 +}
52734 +
52735 +void
52736 +gr_handle_delete(const ino_t ino, const dev_t dev)
52737 +{
52738 + struct inodev_entry *inodev;
52739 +
52740 + if (unlikely(!(gr_status & GR_READY)))
52741 + return;
52742 +
52743 + write_lock(&gr_inode_lock);
52744 + inodev = lookup_inodev_entry(ino, dev);
52745 + if (inodev != NULL)
52746 + do_handle_delete(inodev, ino, dev);
52747 + write_unlock(&gr_inode_lock);
52748 +
52749 + return;
52750 +}
52751 +
52752 +static void
52753 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52754 + const ino_t newinode, const dev_t newdevice,
52755 + struct acl_subject_label *subj)
52756 +{
52757 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52758 + struct acl_object_label *match;
52759 +
52760 + match = subj->obj_hash[index];
52761 +
52762 + while (match && (match->inode != oldinode ||
52763 + match->device != olddevice ||
52764 + !(match->mode & GR_DELETED)))
52765 + match = match->next;
52766 +
52767 + if (match && (match->inode == oldinode)
52768 + && (match->device == olddevice)
52769 + && (match->mode & GR_DELETED)) {
52770 + if (match->prev == NULL) {
52771 + subj->obj_hash[index] = match->next;
52772 + if (match->next != NULL)
52773 + match->next->prev = NULL;
52774 + } else {
52775 + match->prev->next = match->next;
52776 + if (match->next != NULL)
52777 + match->next->prev = match->prev;
52778 + }
52779 + match->prev = NULL;
52780 + match->next = NULL;
52781 + match->inode = newinode;
52782 + match->device = newdevice;
52783 + match->mode &= ~GR_DELETED;
52784 +
52785 + insert_acl_obj_label(match, subj);
52786 + }
52787 +
52788 + return;
52789 +}
52790 +
52791 +static void
52792 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52793 + const ino_t newinode, const dev_t newdevice,
52794 + struct acl_role_label *role)
52795 +{
52796 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52797 + struct acl_subject_label *match;
52798 +
52799 + match = role->subj_hash[index];
52800 +
52801 + while (match && (match->inode != oldinode ||
52802 + match->device != olddevice ||
52803 + !(match->mode & GR_DELETED)))
52804 + match = match->next;
52805 +
52806 + if (match && (match->inode == oldinode)
52807 + && (match->device == olddevice)
52808 + && (match->mode & GR_DELETED)) {
52809 + if (match->prev == NULL) {
52810 + role->subj_hash[index] = match->next;
52811 + if (match->next != NULL)
52812 + match->next->prev = NULL;
52813 + } else {
52814 + match->prev->next = match->next;
52815 + if (match->next != NULL)
52816 + match->next->prev = match->prev;
52817 + }
52818 + match->prev = NULL;
52819 + match->next = NULL;
52820 + match->inode = newinode;
52821 + match->device = newdevice;
52822 + match->mode &= ~GR_DELETED;
52823 +
52824 + insert_acl_subj_label(match, role);
52825 + }
52826 +
52827 + return;
52828 +}
52829 +
52830 +static void
52831 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52832 + const ino_t newinode, const dev_t newdevice)
52833 +{
52834 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52835 + struct inodev_entry *match;
52836 +
52837 + match = inodev_set.i_hash[index];
52838 +
52839 + while (match && (match->nentry->inode != oldinode ||
52840 + match->nentry->device != olddevice || !match->nentry->deleted))
52841 + match = match->next;
52842 +
52843 + if (match && (match->nentry->inode == oldinode)
52844 + && (match->nentry->device == olddevice) &&
52845 + match->nentry->deleted) {
52846 + if (match->prev == NULL) {
52847 + inodev_set.i_hash[index] = match->next;
52848 + if (match->next != NULL)
52849 + match->next->prev = NULL;
52850 + } else {
52851 + match->prev->next = match->next;
52852 + if (match->next != NULL)
52853 + match->next->prev = match->prev;
52854 + }
52855 + match->prev = NULL;
52856 + match->next = NULL;
52857 + match->nentry->inode = newinode;
52858 + match->nentry->device = newdevice;
52859 + match->nentry->deleted = 0;
52860 +
52861 + insert_inodev_entry(match);
52862 + }
52863 +
52864 + return;
52865 +}
52866 +
52867 +static void
52868 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52869 +{
52870 + struct acl_subject_label *subj;
52871 + struct acl_role_label *role;
52872 + unsigned int x;
52873 +
52874 + FOR_EACH_ROLE_START(role)
52875 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52876 +
52877 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52878 + if ((subj->inode == ino) && (subj->device == dev)) {
52879 + subj->inode = ino;
52880 + subj->device = dev;
52881 + }
52882 + FOR_EACH_NESTED_SUBJECT_END(subj)
52883 + FOR_EACH_SUBJECT_START(role, subj, x)
52884 + update_acl_obj_label(matchn->inode, matchn->device,
52885 + ino, dev, subj);
52886 + FOR_EACH_SUBJECT_END(subj,x)
52887 + FOR_EACH_ROLE_END(role)
52888 +
52889 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
52890 +
52891 + return;
52892 +}
52893 +
52894 +static void
52895 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
52896 + const struct vfsmount *mnt)
52897 +{
52898 + ino_t ino = dentry->d_inode->i_ino;
52899 + dev_t dev = __get_dev(dentry);
52900 +
52901 + __do_handle_create(matchn, ino, dev);
52902 +
52903 + return;
52904 +}
52905 +
52906 +void
52907 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52908 +{
52909 + struct name_entry *matchn;
52910 +
52911 + if (unlikely(!(gr_status & GR_READY)))
52912 + return;
52913 +
52914 + preempt_disable();
52915 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
52916 +
52917 + if (unlikely((unsigned long)matchn)) {
52918 + write_lock(&gr_inode_lock);
52919 + do_handle_create(matchn, dentry, mnt);
52920 + write_unlock(&gr_inode_lock);
52921 + }
52922 + preempt_enable();
52923 +
52924 + return;
52925 +}
52926 +
52927 +void
52928 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
52929 +{
52930 + struct name_entry *matchn;
52931 +
52932 + if (unlikely(!(gr_status & GR_READY)))
52933 + return;
52934 +
52935 + preempt_disable();
52936 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
52937 +
52938 + if (unlikely((unsigned long)matchn)) {
52939 + write_lock(&gr_inode_lock);
52940 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
52941 + write_unlock(&gr_inode_lock);
52942 + }
52943 + preempt_enable();
52944 +
52945 + return;
52946 +}
52947 +
52948 +void
52949 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52950 + struct dentry *old_dentry,
52951 + struct dentry *new_dentry,
52952 + struct vfsmount *mnt, const __u8 replace)
52953 +{
52954 + struct name_entry *matchn;
52955 + struct inodev_entry *inodev;
52956 + struct inode *inode = new_dentry->d_inode;
52957 + ino_t old_ino = old_dentry->d_inode->i_ino;
52958 + dev_t old_dev = __get_dev(old_dentry);
52959 +
52960 + /* vfs_rename swaps the name and parent link for old_dentry and
52961 + new_dentry
52962 + at this point, old_dentry has the new name, parent link, and inode
52963 + for the renamed file
52964 + if a file is being replaced by a rename, new_dentry has the inode
52965 + and name for the replaced file
52966 + */
52967 +
52968 + if (unlikely(!(gr_status & GR_READY)))
52969 + return;
52970 +
52971 + preempt_disable();
52972 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
52973 +
52974 + /* we wouldn't have to check d_inode if it weren't for
52975 + NFS silly-renaming
52976 + */
52977 +
52978 + write_lock(&gr_inode_lock);
52979 + if (unlikely(replace && inode)) {
52980 + ino_t new_ino = inode->i_ino;
52981 + dev_t new_dev = __get_dev(new_dentry);
52982 +
52983 + inodev = lookup_inodev_entry(new_ino, new_dev);
52984 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
52985 + do_handle_delete(inodev, new_ino, new_dev);
52986 + }
52987 +
52988 + inodev = lookup_inodev_entry(old_ino, old_dev);
52989 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
52990 + do_handle_delete(inodev, old_ino, old_dev);
52991 +
52992 + if (unlikely((unsigned long)matchn))
52993 + do_handle_create(matchn, old_dentry, mnt);
52994 +
52995 + write_unlock(&gr_inode_lock);
52996 + preempt_enable();
52997 +
52998 + return;
52999 +}
53000 +
53001 +static int
53002 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53003 + unsigned char **sum)
53004 +{
53005 + struct acl_role_label *r;
53006 + struct role_allowed_ip *ipp;
53007 + struct role_transition *trans;
53008 + unsigned int i;
53009 + int found = 0;
53010 + u32 curr_ip = current->signal->curr_ip;
53011 +
53012 + current->signal->saved_ip = curr_ip;
53013 +
53014 + /* check transition table */
53015 +
53016 + for (trans = current->role->transitions; trans; trans = trans->next) {
53017 + if (!strcmp(rolename, trans->rolename)) {
53018 + found = 1;
53019 + break;
53020 + }
53021 + }
53022 +
53023 + if (!found)
53024 + return 0;
53025 +
53026 + /* handle special roles that do not require authentication
53027 + and check ip */
53028 +
53029 + FOR_EACH_ROLE_START(r)
53030 + if (!strcmp(rolename, r->rolename) &&
53031 + (r->roletype & GR_ROLE_SPECIAL)) {
53032 + found = 0;
53033 + if (r->allowed_ips != NULL) {
53034 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53035 + if ((ntohl(curr_ip) & ipp->netmask) ==
53036 + (ntohl(ipp->addr) & ipp->netmask))
53037 + found = 1;
53038 + }
53039 + } else
53040 + found = 2;
53041 + if (!found)
53042 + return 0;
53043 +
53044 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53045 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53046 + *salt = NULL;
53047 + *sum = NULL;
53048 + return 1;
53049 + }
53050 + }
53051 + FOR_EACH_ROLE_END(r)
53052 +
53053 + for (i = 0; i < num_sprole_pws; i++) {
53054 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53055 + *salt = acl_special_roles[i]->salt;
53056 + *sum = acl_special_roles[i]->sum;
53057 + return 1;
53058 + }
53059 + }
53060 +
53061 + return 0;
53062 +}
53063 +
53064 +static void
53065 +assign_special_role(char *rolename)
53066 +{
53067 + struct acl_object_label *obj;
53068 + struct acl_role_label *r;
53069 + struct acl_role_label *assigned = NULL;
53070 + struct task_struct *tsk;
53071 + struct file *filp;
53072 +
53073 + FOR_EACH_ROLE_START(r)
53074 + if (!strcmp(rolename, r->rolename) &&
53075 + (r->roletype & GR_ROLE_SPECIAL)) {
53076 + assigned = r;
53077 + break;
53078 + }
53079 + FOR_EACH_ROLE_END(r)
53080 +
53081 + if (!assigned)
53082 + return;
53083 +
53084 + read_lock(&tasklist_lock);
53085 + read_lock(&grsec_exec_file_lock);
53086 +
53087 + tsk = current->real_parent;
53088 + if (tsk == NULL)
53089 + goto out_unlock;
53090 +
53091 + filp = tsk->exec_file;
53092 + if (filp == NULL)
53093 + goto out_unlock;
53094 +
53095 + tsk->is_writable = 0;
53096 +
53097 + tsk->acl_sp_role = 1;
53098 + tsk->acl_role_id = ++acl_sp_role_value;
53099 + tsk->role = assigned;
53100 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53101 +
53102 + /* ignore additional mmap checks for processes that are writable
53103 + by the default ACL */
53104 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53105 + if (unlikely(obj->mode & GR_WRITE))
53106 + tsk->is_writable = 1;
53107 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53108 + if (unlikely(obj->mode & GR_WRITE))
53109 + tsk->is_writable = 1;
53110 +
53111 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53112 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53113 +#endif
53114 +
53115 +out_unlock:
53116 + read_unlock(&grsec_exec_file_lock);
53117 + read_unlock(&tasklist_lock);
53118 + return;
53119 +}
53120 +
53121 +int gr_check_secure_terminal(struct task_struct *task)
53122 +{
53123 + struct task_struct *p, *p2, *p3;
53124 + struct files_struct *files;
53125 + struct fdtable *fdt;
53126 + struct file *our_file = NULL, *file;
53127 + int i;
53128 +
53129 + if (task->signal->tty == NULL)
53130 + return 1;
53131 +
53132 + files = get_files_struct(task);
53133 + if (files != NULL) {
53134 + rcu_read_lock();
53135 + fdt = files_fdtable(files);
53136 + for (i=0; i < fdt->max_fds; i++) {
53137 + file = fcheck_files(files, i);
53138 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53139 + get_file(file);
53140 + our_file = file;
53141 + }
53142 + }
53143 + rcu_read_unlock();
53144 + put_files_struct(files);
53145 + }
53146 +
53147 + if (our_file == NULL)
53148 + return 1;
53149 +
53150 + read_lock(&tasklist_lock);
53151 + do_each_thread(p2, p) {
53152 + files = get_files_struct(p);
53153 + if (files == NULL ||
53154 + (p->signal && p->signal->tty == task->signal->tty)) {
53155 + if (files != NULL)
53156 + put_files_struct(files);
53157 + continue;
53158 + }
53159 + rcu_read_lock();
53160 + fdt = files_fdtable(files);
53161 + for (i=0; i < fdt->max_fds; i++) {
53162 + file = fcheck_files(files, i);
53163 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53164 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53165 + p3 = task;
53166 + while (p3->pid > 0) {
53167 + if (p3 == p)
53168 + break;
53169 + p3 = p3->real_parent;
53170 + }
53171 + if (p3 == p)
53172 + break;
53173 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53174 + gr_handle_alertkill(p);
53175 + rcu_read_unlock();
53176 + put_files_struct(files);
53177 + read_unlock(&tasklist_lock);
53178 + fput(our_file);
53179 + return 0;
53180 + }
53181 + }
53182 + rcu_read_unlock();
53183 + put_files_struct(files);
53184 + } while_each_thread(p2, p);
53185 + read_unlock(&tasklist_lock);
53186 +
53187 + fput(our_file);
53188 + return 1;
53189 +}
53190 +
53191 +ssize_t
53192 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53193 +{
53194 + struct gr_arg_wrapper uwrap;
53195 + unsigned char *sprole_salt = NULL;
53196 + unsigned char *sprole_sum = NULL;
53197 + int error = sizeof (struct gr_arg_wrapper);
53198 + int error2 = 0;
53199 +
53200 + mutex_lock(&gr_dev_mutex);
53201 +
53202 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53203 + error = -EPERM;
53204 + goto out;
53205 + }
53206 +
53207 + if (count != sizeof (struct gr_arg_wrapper)) {
53208 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53209 + error = -EINVAL;
53210 + goto out;
53211 + }
53212 +
53213 +
53214 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53215 + gr_auth_expires = 0;
53216 + gr_auth_attempts = 0;
53217 + }
53218 +
53219 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53220 + error = -EFAULT;
53221 + goto out;
53222 + }
53223 +
53224 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53225 + error = -EINVAL;
53226 + goto out;
53227 + }
53228 +
53229 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53230 + error = -EFAULT;
53231 + goto out;
53232 + }
53233 +
53234 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53235 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53236 + time_after(gr_auth_expires, get_seconds())) {
53237 + error = -EBUSY;
53238 + goto out;
53239 + }
53240 +
53241 + /* if non-root trying to do anything other than use a special role,
53242 + do not attempt authentication, do not count towards authentication
53243 + locking
53244 + */
53245 +
53246 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53247 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53248 + current_uid()) {
53249 + error = -EPERM;
53250 + goto out;
53251 + }
53252 +
53253 + /* ensure pw and special role name are null terminated */
53254 +
53255 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53256 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53257 +
53258 + /* Okay.
53259 + * We have our enough of the argument structure..(we have yet
53260 + * to copy_from_user the tables themselves) . Copy the tables
53261 + * only if we need them, i.e. for loading operations. */
53262 +
53263 + switch (gr_usermode->mode) {
53264 + case GR_STATUS:
53265 + if (gr_status & GR_READY) {
53266 + error = 1;
53267 + if (!gr_check_secure_terminal(current))
53268 + error = 3;
53269 + } else
53270 + error = 2;
53271 + goto out;
53272 + case GR_SHUTDOWN:
53273 + if ((gr_status & GR_READY)
53274 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53275 + pax_open_kernel();
53276 + gr_status &= ~GR_READY;
53277 + pax_close_kernel();
53278 +
53279 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53280 + free_variables();
53281 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53282 + memset(gr_system_salt, 0, GR_SALT_LEN);
53283 + memset(gr_system_sum, 0, GR_SHA_LEN);
53284 + } else if (gr_status & GR_READY) {
53285 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53286 + error = -EPERM;
53287 + } else {
53288 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53289 + error = -EAGAIN;
53290 + }
53291 + break;
53292 + case GR_ENABLE:
53293 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53294 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53295 + else {
53296 + if (gr_status & GR_READY)
53297 + error = -EAGAIN;
53298 + else
53299 + error = error2;
53300 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53301 + }
53302 + break;
53303 + case GR_RELOAD:
53304 + if (!(gr_status & GR_READY)) {
53305 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53306 + error = -EAGAIN;
53307 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53308 + preempt_disable();
53309 +
53310 + pax_open_kernel();
53311 + gr_status &= ~GR_READY;
53312 + pax_close_kernel();
53313 +
53314 + free_variables();
53315 + if (!(error2 = gracl_init(gr_usermode))) {
53316 + preempt_enable();
53317 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53318 + } else {
53319 + preempt_enable();
53320 + error = error2;
53321 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53322 + }
53323 + } else {
53324 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53325 + error = -EPERM;
53326 + }
53327 + break;
53328 + case GR_SEGVMOD:
53329 + if (unlikely(!(gr_status & GR_READY))) {
53330 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53331 + error = -EAGAIN;
53332 + break;
53333 + }
53334 +
53335 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53336 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53337 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53338 + struct acl_subject_label *segvacl;
53339 + segvacl =
53340 + lookup_acl_subj_label(gr_usermode->segv_inode,
53341 + gr_usermode->segv_device,
53342 + current->role);
53343 + if (segvacl) {
53344 + segvacl->crashes = 0;
53345 + segvacl->expires = 0;
53346 + }
53347 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53348 + gr_remove_uid(gr_usermode->segv_uid);
53349 + }
53350 + } else {
53351 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53352 + error = -EPERM;
53353 + }
53354 + break;
53355 + case GR_SPROLE:
53356 + case GR_SPROLEPAM:
53357 + if (unlikely(!(gr_status & GR_READY))) {
53358 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53359 + error = -EAGAIN;
53360 + break;
53361 + }
53362 +
53363 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53364 + current->role->expires = 0;
53365 + current->role->auth_attempts = 0;
53366 + }
53367 +
53368 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53369 + time_after(current->role->expires, get_seconds())) {
53370 + error = -EBUSY;
53371 + goto out;
53372 + }
53373 +
53374 + if (lookup_special_role_auth
53375 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53376 + && ((!sprole_salt && !sprole_sum)
53377 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53378 + char *p = "";
53379 + assign_special_role(gr_usermode->sp_role);
53380 + read_lock(&tasklist_lock);
53381 + if (current->real_parent)
53382 + p = current->real_parent->role->rolename;
53383 + read_unlock(&tasklist_lock);
53384 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53385 + p, acl_sp_role_value);
53386 + } else {
53387 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53388 + error = -EPERM;
53389 + if(!(current->role->auth_attempts++))
53390 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53391 +
53392 + goto out;
53393 + }
53394 + break;
53395 + case GR_UNSPROLE:
53396 + if (unlikely(!(gr_status & GR_READY))) {
53397 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53398 + error = -EAGAIN;
53399 + break;
53400 + }
53401 +
53402 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53403 + char *p = "";
53404 + int i = 0;
53405 +
53406 + read_lock(&tasklist_lock);
53407 + if (current->real_parent) {
53408 + p = current->real_parent->role->rolename;
53409 + i = current->real_parent->acl_role_id;
53410 + }
53411 + read_unlock(&tasklist_lock);
53412 +
53413 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53414 + gr_set_acls(1);
53415 + } else {
53416 + error = -EPERM;
53417 + goto out;
53418 + }
53419 + break;
53420 + default:
53421 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53422 + error = -EINVAL;
53423 + break;
53424 + }
53425 +
53426 + if (error != -EPERM)
53427 + goto out;
53428 +
53429 + if(!(gr_auth_attempts++))
53430 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53431 +
53432 + out:
53433 + mutex_unlock(&gr_dev_mutex);
53434 + return error;
53435 +}
53436 +
53437 +/* must be called with
53438 + rcu_read_lock();
53439 + read_lock(&tasklist_lock);
53440 + read_lock(&grsec_exec_file_lock);
53441 +*/
53442 +int gr_apply_subject_to_task(struct task_struct *task)
53443 +{
53444 + struct acl_object_label *obj;
53445 + char *tmpname;
53446 + struct acl_subject_label *tmpsubj;
53447 + struct file *filp;
53448 + struct name_entry *nmatch;
53449 +
53450 + filp = task->exec_file;
53451 + if (filp == NULL)
53452 + return 0;
53453 +
53454 + /* the following is to apply the correct subject
53455 + on binaries running when the RBAC system
53456 + is enabled, when the binaries have been
53457 + replaced or deleted since their execution
53458 + -----
53459 + when the RBAC system starts, the inode/dev
53460 + from exec_file will be one the RBAC system
53461 + is unaware of. It only knows the inode/dev
53462 + of the present file on disk, or the absence
53463 + of it.
53464 + */
53465 + preempt_disable();
53466 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53467 +
53468 + nmatch = lookup_name_entry(tmpname);
53469 + preempt_enable();
53470 + tmpsubj = NULL;
53471 + if (nmatch) {
53472 + if (nmatch->deleted)
53473 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53474 + else
53475 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53476 + if (tmpsubj != NULL)
53477 + task->acl = tmpsubj;
53478 + }
53479 + if (tmpsubj == NULL)
53480 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53481 + task->role);
53482 + if (task->acl) {
53483 + task->is_writable = 0;
53484 + /* ignore additional mmap checks for processes that are writable
53485 + by the default ACL */
53486 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53487 + if (unlikely(obj->mode & GR_WRITE))
53488 + task->is_writable = 1;
53489 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53490 + if (unlikely(obj->mode & GR_WRITE))
53491 + task->is_writable = 1;
53492 +
53493 + gr_set_proc_res(task);
53494 +
53495 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53496 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53497 +#endif
53498 + } else {
53499 + return 1;
53500 + }
53501 +
53502 + return 0;
53503 +}
53504 +
53505 +int
53506 +gr_set_acls(const int type)
53507 +{
53508 + struct task_struct *task, *task2;
53509 + struct acl_role_label *role = current->role;
53510 + __u16 acl_role_id = current->acl_role_id;
53511 + const struct cred *cred;
53512 + int ret;
53513 +
53514 + rcu_read_lock();
53515 + read_lock(&tasklist_lock);
53516 + read_lock(&grsec_exec_file_lock);
53517 + do_each_thread(task2, task) {
53518 + /* check to see if we're called from the exit handler,
53519 + if so, only replace ACLs that have inherited the admin
53520 + ACL */
53521 +
53522 + if (type && (task->role != role ||
53523 + task->acl_role_id != acl_role_id))
53524 + continue;
53525 +
53526 + task->acl_role_id = 0;
53527 + task->acl_sp_role = 0;
53528 +
53529 + if (task->exec_file) {
53530 + cred = __task_cred(task);
53531 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53532 + ret = gr_apply_subject_to_task(task);
53533 + if (ret) {
53534 + read_unlock(&grsec_exec_file_lock);
53535 + read_unlock(&tasklist_lock);
53536 + rcu_read_unlock();
53537 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53538 + return ret;
53539 + }
53540 + } else {
53541 + // it's a kernel process
53542 + task->role = kernel_role;
53543 + task->acl = kernel_role->root_label;
53544 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53545 + task->acl->mode &= ~GR_PROCFIND;
53546 +#endif
53547 + }
53548 + } while_each_thread(task2, task);
53549 + read_unlock(&grsec_exec_file_lock);
53550 + read_unlock(&tasklist_lock);
53551 + rcu_read_unlock();
53552 +
53553 + return 0;
53554 +}
53555 +
53556 +void
53557 +gr_learn_resource(const struct task_struct *task,
53558 + const int res, const unsigned long wanted, const int gt)
53559 +{
53560 + struct acl_subject_label *acl;
53561 + const struct cred *cred;
53562 +
53563 + if (unlikely((gr_status & GR_READY) &&
53564 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53565 + goto skip_reslog;
53566 +
53567 +#ifdef CONFIG_GRKERNSEC_RESLOG
53568 + gr_log_resource(task, res, wanted, gt);
53569 +#endif
53570 + skip_reslog:
53571 +
53572 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53573 + return;
53574 +
53575 + acl = task->acl;
53576 +
53577 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53578 + !(acl->resmask & (1 << (unsigned short) res))))
53579 + return;
53580 +
53581 + if (wanted >= acl->res[res].rlim_cur) {
53582 + unsigned long res_add;
53583 +
53584 + res_add = wanted;
53585 + switch (res) {
53586 + case RLIMIT_CPU:
53587 + res_add += GR_RLIM_CPU_BUMP;
53588 + break;
53589 + case RLIMIT_FSIZE:
53590 + res_add += GR_RLIM_FSIZE_BUMP;
53591 + break;
53592 + case RLIMIT_DATA:
53593 + res_add += GR_RLIM_DATA_BUMP;
53594 + break;
53595 + case RLIMIT_STACK:
53596 + res_add += GR_RLIM_STACK_BUMP;
53597 + break;
53598 + case RLIMIT_CORE:
53599 + res_add += GR_RLIM_CORE_BUMP;
53600 + break;
53601 + case RLIMIT_RSS:
53602 + res_add += GR_RLIM_RSS_BUMP;
53603 + break;
53604 + case RLIMIT_NPROC:
53605 + res_add += GR_RLIM_NPROC_BUMP;
53606 + break;
53607 + case RLIMIT_NOFILE:
53608 + res_add += GR_RLIM_NOFILE_BUMP;
53609 + break;
53610 + case RLIMIT_MEMLOCK:
53611 + res_add += GR_RLIM_MEMLOCK_BUMP;
53612 + break;
53613 + case RLIMIT_AS:
53614 + res_add += GR_RLIM_AS_BUMP;
53615 + break;
53616 + case RLIMIT_LOCKS:
53617 + res_add += GR_RLIM_LOCKS_BUMP;
53618 + break;
53619 + case RLIMIT_SIGPENDING:
53620 + res_add += GR_RLIM_SIGPENDING_BUMP;
53621 + break;
53622 + case RLIMIT_MSGQUEUE:
53623 + res_add += GR_RLIM_MSGQUEUE_BUMP;
53624 + break;
53625 + case RLIMIT_NICE:
53626 + res_add += GR_RLIM_NICE_BUMP;
53627 + break;
53628 + case RLIMIT_RTPRIO:
53629 + res_add += GR_RLIM_RTPRIO_BUMP;
53630 + break;
53631 + case RLIMIT_RTTIME:
53632 + res_add += GR_RLIM_RTTIME_BUMP;
53633 + break;
53634 + }
53635 +
53636 + acl->res[res].rlim_cur = res_add;
53637 +
53638 + if (wanted > acl->res[res].rlim_max)
53639 + acl->res[res].rlim_max = res_add;
53640 +
53641 + /* only log the subject filename, since resource logging is supported for
53642 + single-subject learning only */
53643 + rcu_read_lock();
53644 + cred = __task_cred(task);
53645 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53646 + task->role->roletype, cred->uid, cred->gid, acl->filename,
53647 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53648 + "", (unsigned long) res, &task->signal->saved_ip);
53649 + rcu_read_unlock();
53650 + }
53651 +
53652 + return;
53653 +}
53654 +
53655 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53656 +void
53657 +pax_set_initial_flags(struct linux_binprm *bprm)
53658 +{
53659 + struct task_struct *task = current;
53660 + struct acl_subject_label *proc;
53661 + unsigned long flags;
53662 +
53663 + if (unlikely(!(gr_status & GR_READY)))
53664 + return;
53665 +
53666 + flags = pax_get_flags(task);
53667 +
53668 + proc = task->acl;
53669 +
53670 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53671 + flags &= ~MF_PAX_PAGEEXEC;
53672 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53673 + flags &= ~MF_PAX_SEGMEXEC;
53674 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53675 + flags &= ~MF_PAX_RANDMMAP;
53676 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53677 + flags &= ~MF_PAX_EMUTRAMP;
53678 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53679 + flags &= ~MF_PAX_MPROTECT;
53680 +
53681 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53682 + flags |= MF_PAX_PAGEEXEC;
53683 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53684 + flags |= MF_PAX_SEGMEXEC;
53685 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53686 + flags |= MF_PAX_RANDMMAP;
53687 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53688 + flags |= MF_PAX_EMUTRAMP;
53689 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53690 + flags |= MF_PAX_MPROTECT;
53691 +
53692 + pax_set_flags(task, flags);
53693 +
53694 + return;
53695 +}
53696 +#endif
53697 +
53698 +int
53699 +gr_handle_proc_ptrace(struct task_struct *task)
53700 +{
53701 + struct file *filp;
53702 + struct task_struct *tmp = task;
53703 + struct task_struct *curtemp = current;
53704 + __u32 retmode;
53705 +
53706 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53707 + if (unlikely(!(gr_status & GR_READY)))
53708 + return 0;
53709 +#endif
53710 +
53711 + read_lock(&tasklist_lock);
53712 + read_lock(&grsec_exec_file_lock);
53713 + filp = task->exec_file;
53714 +
53715 + while (tmp->pid > 0) {
53716 + if (tmp == curtemp)
53717 + break;
53718 + tmp = tmp->real_parent;
53719 + }
53720 +
53721 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53722 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53723 + read_unlock(&grsec_exec_file_lock);
53724 + read_unlock(&tasklist_lock);
53725 + return 1;
53726 + }
53727 +
53728 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53729 + if (!(gr_status & GR_READY)) {
53730 + read_unlock(&grsec_exec_file_lock);
53731 + read_unlock(&tasklist_lock);
53732 + return 0;
53733 + }
53734 +#endif
53735 +
53736 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53737 + read_unlock(&grsec_exec_file_lock);
53738 + read_unlock(&tasklist_lock);
53739 +
53740 + if (retmode & GR_NOPTRACE)
53741 + return 1;
53742 +
53743 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53744 + && (current->acl != task->acl || (current->acl != current->role->root_label
53745 + && current->pid != task->pid)))
53746 + return 1;
53747 +
53748 + return 0;
53749 +}
53750 +
53751 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53752 +{
53753 + if (unlikely(!(gr_status & GR_READY)))
53754 + return;
53755 +
53756 + if (!(current->role->roletype & GR_ROLE_GOD))
53757 + return;
53758 +
53759 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53760 + p->role->rolename, gr_task_roletype_to_char(p),
53761 + p->acl->filename);
53762 +}
53763 +
53764 +int
53765 +gr_handle_ptrace(struct task_struct *task, const long request)
53766 +{
53767 + struct task_struct *tmp = task;
53768 + struct task_struct *curtemp = current;
53769 + __u32 retmode;
53770 +
53771 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53772 + if (unlikely(!(gr_status & GR_READY)))
53773 + return 0;
53774 +#endif
53775 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
53776 + read_lock(&tasklist_lock);
53777 + while (tmp->pid > 0) {
53778 + if (tmp == curtemp)
53779 + break;
53780 + tmp = tmp->real_parent;
53781 + }
53782 +
53783 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53784 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53785 + read_unlock(&tasklist_lock);
53786 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53787 + return 1;
53788 + }
53789 + read_unlock(&tasklist_lock);
53790 + }
53791 +
53792 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53793 + if (!(gr_status & GR_READY))
53794 + return 0;
53795 +#endif
53796 +
53797 + read_lock(&grsec_exec_file_lock);
53798 + if (unlikely(!task->exec_file)) {
53799 + read_unlock(&grsec_exec_file_lock);
53800 + return 0;
53801 + }
53802 +
53803 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53804 + read_unlock(&grsec_exec_file_lock);
53805 +
53806 + if (retmode & GR_NOPTRACE) {
53807 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53808 + return 1;
53809 + }
53810 +
53811 + if (retmode & GR_PTRACERD) {
53812 + switch (request) {
53813 + case PTRACE_SEIZE:
53814 + case PTRACE_POKETEXT:
53815 + case PTRACE_POKEDATA:
53816 + case PTRACE_POKEUSR:
53817 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53818 + case PTRACE_SETREGS:
53819 + case PTRACE_SETFPREGS:
53820 +#endif
53821 +#ifdef CONFIG_X86
53822 + case PTRACE_SETFPXREGS:
53823 +#endif
53824 +#ifdef CONFIG_ALTIVEC
53825 + case PTRACE_SETVRREGS:
53826 +#endif
53827 + return 1;
53828 + default:
53829 + return 0;
53830 + }
53831 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
53832 + !(current->role->roletype & GR_ROLE_GOD) &&
53833 + (current->acl != task->acl)) {
53834 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53835 + return 1;
53836 + }
53837 +
53838 + return 0;
53839 +}
53840 +
53841 +static int is_writable_mmap(const struct file *filp)
53842 +{
53843 + struct task_struct *task = current;
53844 + struct acl_object_label *obj, *obj2;
53845 +
53846 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53847 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53848 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53849 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53850 + task->role->root_label);
53851 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53852 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53853 + return 1;
53854 + }
53855 + }
53856 + return 0;
53857 +}
53858 +
53859 +int
53860 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53861 +{
53862 + __u32 mode;
53863 +
53864 + if (unlikely(!file || !(prot & PROT_EXEC)))
53865 + return 1;
53866 +
53867 + if (is_writable_mmap(file))
53868 + return 0;
53869 +
53870 + mode =
53871 + gr_search_file(file->f_path.dentry,
53872 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53873 + file->f_path.mnt);
53874 +
53875 + if (!gr_tpe_allow(file))
53876 + return 0;
53877 +
53878 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53879 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53880 + return 0;
53881 + } else if (unlikely(!(mode & GR_EXEC))) {
53882 + return 0;
53883 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53884 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53885 + return 1;
53886 + }
53887 +
53888 + return 1;
53889 +}
53890 +
53891 +int
53892 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53893 +{
53894 + __u32 mode;
53895 +
53896 + if (unlikely(!file || !(prot & PROT_EXEC)))
53897 + return 1;
53898 +
53899 + if (is_writable_mmap(file))
53900 + return 0;
53901 +
53902 + mode =
53903 + gr_search_file(file->f_path.dentry,
53904 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53905 + file->f_path.mnt);
53906 +
53907 + if (!gr_tpe_allow(file))
53908 + return 0;
53909 +
53910 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53911 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53912 + return 0;
53913 + } else if (unlikely(!(mode & GR_EXEC))) {
53914 + return 0;
53915 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53916 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53917 + return 1;
53918 + }
53919 +
53920 + return 1;
53921 +}
53922 +
53923 +void
53924 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53925 +{
53926 + unsigned long runtime;
53927 + unsigned long cputime;
53928 + unsigned int wday, cday;
53929 + __u8 whr, chr;
53930 + __u8 wmin, cmin;
53931 + __u8 wsec, csec;
53932 + struct timespec timeval;
53933 +
53934 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
53935 + !(task->acl->mode & GR_PROCACCT)))
53936 + return;
53937 +
53938 + do_posix_clock_monotonic_gettime(&timeval);
53939 + runtime = timeval.tv_sec - task->start_time.tv_sec;
53940 + wday = runtime / (3600 * 24);
53941 + runtime -= wday * (3600 * 24);
53942 + whr = runtime / 3600;
53943 + runtime -= whr * 3600;
53944 + wmin = runtime / 60;
53945 + runtime -= wmin * 60;
53946 + wsec = runtime;
53947 +
53948 + cputime = (task->utime + task->stime) / HZ;
53949 + cday = cputime / (3600 * 24);
53950 + cputime -= cday * (3600 * 24);
53951 + chr = cputime / 3600;
53952 + cputime -= chr * 3600;
53953 + cmin = cputime / 60;
53954 + cputime -= cmin * 60;
53955 + csec = cputime;
53956 +
53957 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
53958 +
53959 + return;
53960 +}
53961 +
53962 +void gr_set_kernel_label(struct task_struct *task)
53963 +{
53964 + if (gr_status & GR_READY) {
53965 + task->role = kernel_role;
53966 + task->acl = kernel_role->root_label;
53967 + }
53968 + return;
53969 +}
53970 +
53971 +#ifdef CONFIG_TASKSTATS
53972 +int gr_is_taskstats_denied(int pid)
53973 +{
53974 + struct task_struct *task;
53975 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53976 + const struct cred *cred;
53977 +#endif
53978 + int ret = 0;
53979 +
53980 + /* restrict taskstats viewing to un-chrooted root users
53981 + who have the 'view' subject flag if the RBAC system is enabled
53982 + */
53983 +
53984 + rcu_read_lock();
53985 + read_lock(&tasklist_lock);
53986 + task = find_task_by_vpid(pid);
53987 + if (task) {
53988 +#ifdef CONFIG_GRKERNSEC_CHROOT
53989 + if (proc_is_chrooted(task))
53990 + ret = -EACCES;
53991 +#endif
53992 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53993 + cred = __task_cred(task);
53994 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53995 + if (cred->uid != 0)
53996 + ret = -EACCES;
53997 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53998 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
53999 + ret = -EACCES;
54000 +#endif
54001 +#endif
54002 + if (gr_status & GR_READY) {
54003 + if (!(task->acl->mode & GR_VIEW))
54004 + ret = -EACCES;
54005 + }
54006 + } else
54007 + ret = -ENOENT;
54008 +
54009 + read_unlock(&tasklist_lock);
54010 + rcu_read_unlock();
54011 +
54012 + return ret;
54013 +}
54014 +#endif
54015 +
54016 +/* AUXV entries are filled via a descendant of search_binary_handler
54017 + after we've already applied the subject for the target
54018 +*/
54019 +int gr_acl_enable_at_secure(void)
54020 +{
54021 + if (unlikely(!(gr_status & GR_READY)))
54022 + return 0;
54023 +
54024 + if (current->acl->mode & GR_ATSECURE)
54025 + return 1;
54026 +
54027 + return 0;
54028 +}
54029 +
54030 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54031 +{
54032 + struct task_struct *task = current;
54033 + struct dentry *dentry = file->f_path.dentry;
54034 + struct vfsmount *mnt = file->f_path.mnt;
54035 + struct acl_object_label *obj, *tmp;
54036 + struct acl_subject_label *subj;
54037 + unsigned int bufsize;
54038 + int is_not_root;
54039 + char *path;
54040 + dev_t dev = __get_dev(dentry);
54041 +
54042 + if (unlikely(!(gr_status & GR_READY)))
54043 + return 1;
54044 +
54045 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54046 + return 1;
54047 +
54048 + /* ignore Eric Biederman */
54049 + if (IS_PRIVATE(dentry->d_inode))
54050 + return 1;
54051 +
54052 + subj = task->acl;
54053 + do {
54054 + obj = lookup_acl_obj_label(ino, dev, subj);
54055 + if (obj != NULL)
54056 + return (obj->mode & GR_FIND) ? 1 : 0;
54057 + } while ((subj = subj->parent_subject));
54058 +
54059 + /* this is purely an optimization since we're looking for an object
54060 + for the directory we're doing a readdir on
54061 + if it's possible for any globbed object to match the entry we're
54062 + filling into the directory, then the object we find here will be
54063 + an anchor point with attached globbed objects
54064 + */
54065 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54066 + if (obj->globbed == NULL)
54067 + return (obj->mode & GR_FIND) ? 1 : 0;
54068 +
54069 + is_not_root = ((obj->filename[0] == '/') &&
54070 + (obj->filename[1] == '\0')) ? 0 : 1;
54071 + bufsize = PAGE_SIZE - namelen - is_not_root;
54072 +
54073 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54074 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54075 + return 1;
54076 +
54077 + preempt_disable();
54078 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54079 + bufsize);
54080 +
54081 + bufsize = strlen(path);
54082 +
54083 + /* if base is "/", don't append an additional slash */
54084 + if (is_not_root)
54085 + *(path + bufsize) = '/';
54086 + memcpy(path + bufsize + is_not_root, name, namelen);
54087 + *(path + bufsize + namelen + is_not_root) = '\0';
54088 +
54089 + tmp = obj->globbed;
54090 + while (tmp) {
54091 + if (!glob_match(tmp->filename, path)) {
54092 + preempt_enable();
54093 + return (tmp->mode & GR_FIND) ? 1 : 0;
54094 + }
54095 + tmp = tmp->next;
54096 + }
54097 + preempt_enable();
54098 + return (obj->mode & GR_FIND) ? 1 : 0;
54099 +}
54100 +
54101 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54102 +EXPORT_SYMBOL(gr_acl_is_enabled);
54103 +#endif
54104 +EXPORT_SYMBOL(gr_learn_resource);
54105 +EXPORT_SYMBOL(gr_set_kernel_label);
54106 +#ifdef CONFIG_SECURITY
54107 +EXPORT_SYMBOL(gr_check_user_change);
54108 +EXPORT_SYMBOL(gr_check_group_change);
54109 +#endif
54110 +
54111 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54112 new file mode 100644
54113 index 0000000..34fefda
54114 --- /dev/null
54115 +++ b/grsecurity/gracl_alloc.c
54116 @@ -0,0 +1,105 @@
54117 +#include <linux/kernel.h>
54118 +#include <linux/mm.h>
54119 +#include <linux/slab.h>
54120 +#include <linux/vmalloc.h>
54121 +#include <linux/gracl.h>
54122 +#include <linux/grsecurity.h>
54123 +
54124 +static unsigned long alloc_stack_next = 1;
54125 +static unsigned long alloc_stack_size = 1;
54126 +static void **alloc_stack;
54127 +
54128 +static __inline__ int
54129 +alloc_pop(void)
54130 +{
54131 + if (alloc_stack_next == 1)
54132 + return 0;
54133 +
54134 + kfree(alloc_stack[alloc_stack_next - 2]);
54135 +
54136 + alloc_stack_next--;
54137 +
54138 + return 1;
54139 +}
54140 +
54141 +static __inline__ int
54142 +alloc_push(void *buf)
54143 +{
54144 + if (alloc_stack_next >= alloc_stack_size)
54145 + return 1;
54146 +
54147 + alloc_stack[alloc_stack_next - 1] = buf;
54148 +
54149 + alloc_stack_next++;
54150 +
54151 + return 0;
54152 +}
54153 +
54154 +void *
54155 +acl_alloc(unsigned long len)
54156 +{
54157 + void *ret = NULL;
54158 +
54159 + if (!len || len > PAGE_SIZE)
54160 + goto out;
54161 +
54162 + ret = kmalloc(len, GFP_KERNEL);
54163 +
54164 + if (ret) {
54165 + if (alloc_push(ret)) {
54166 + kfree(ret);
54167 + ret = NULL;
54168 + }
54169 + }
54170 +
54171 +out:
54172 + return ret;
54173 +}
54174 +
54175 +void *
54176 +acl_alloc_num(unsigned long num, unsigned long len)
54177 +{
54178 + if (!len || (num > (PAGE_SIZE / len)))
54179 + return NULL;
54180 +
54181 + return acl_alloc(num * len);
54182 +}
54183 +
54184 +void
54185 +acl_free_all(void)
54186 +{
54187 + if (gr_acl_is_enabled() || !alloc_stack)
54188 + return;
54189 +
54190 + while (alloc_pop()) ;
54191 +
54192 + if (alloc_stack) {
54193 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54194 + kfree(alloc_stack);
54195 + else
54196 + vfree(alloc_stack);
54197 + }
54198 +
54199 + alloc_stack = NULL;
54200 + alloc_stack_size = 1;
54201 + alloc_stack_next = 1;
54202 +
54203 + return;
54204 +}
54205 +
54206 +int
54207 +acl_alloc_stack_init(unsigned long size)
54208 +{
54209 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54210 + alloc_stack =
54211 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54212 + else
54213 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54214 +
54215 + alloc_stack_size = size;
54216 +
54217 + if (!alloc_stack)
54218 + return 0;
54219 + else
54220 + return 1;
54221 +}
54222 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54223 new file mode 100644
54224 index 0000000..6d21049
54225 --- /dev/null
54226 +++ b/grsecurity/gracl_cap.c
54227 @@ -0,0 +1,110 @@
54228 +#include <linux/kernel.h>
54229 +#include <linux/module.h>
54230 +#include <linux/sched.h>
54231 +#include <linux/gracl.h>
54232 +#include <linux/grsecurity.h>
54233 +#include <linux/grinternal.h>
54234 +
54235 +extern const char *captab_log[];
54236 +extern int captab_log_entries;
54237 +
54238 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54239 +{
54240 + struct acl_subject_label *curracl;
54241 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54242 + kernel_cap_t cap_audit = __cap_empty_set;
54243 +
54244 + if (!gr_acl_is_enabled())
54245 + return 1;
54246 +
54247 + curracl = task->acl;
54248 +
54249 + cap_drop = curracl->cap_lower;
54250 + cap_mask = curracl->cap_mask;
54251 + cap_audit = curracl->cap_invert_audit;
54252 +
54253 + while ((curracl = curracl->parent_subject)) {
54254 + /* if the cap isn't specified in the current computed mask but is specified in the
54255 + current level subject, and is lowered in the current level subject, then add
54256 + it to the set of dropped capabilities
54257 + otherwise, add the current level subject's mask to the current computed mask
54258 + */
54259 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54260 + cap_raise(cap_mask, cap);
54261 + if (cap_raised(curracl->cap_lower, cap))
54262 + cap_raise(cap_drop, cap);
54263 + if (cap_raised(curracl->cap_invert_audit, cap))
54264 + cap_raise(cap_audit, cap);
54265 + }
54266 + }
54267 +
54268 + if (!cap_raised(cap_drop, cap)) {
54269 + if (cap_raised(cap_audit, cap))
54270 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54271 + return 1;
54272 + }
54273 +
54274 + curracl = task->acl;
54275 +
54276 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54277 + && cap_raised(cred->cap_effective, cap)) {
54278 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54279 + task->role->roletype, cred->uid,
54280 + cred->gid, task->exec_file ?
54281 + gr_to_filename(task->exec_file->f_path.dentry,
54282 + task->exec_file->f_path.mnt) : curracl->filename,
54283 + curracl->filename, 0UL,
54284 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54285 + return 1;
54286 + }
54287 +
54288 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54289 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54290 +
54291 + return 0;
54292 +}
54293 +
54294 +int
54295 +gr_acl_is_capable(const int cap)
54296 +{
54297 + return gr_task_acl_is_capable(current, current_cred(), cap);
54298 +}
54299 +
54300 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54301 +{
54302 + struct acl_subject_label *curracl;
54303 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54304 +
54305 + if (!gr_acl_is_enabled())
54306 + return 1;
54307 +
54308 + curracl = task->acl;
54309 +
54310 + cap_drop = curracl->cap_lower;
54311 + cap_mask = curracl->cap_mask;
54312 +
54313 + while ((curracl = curracl->parent_subject)) {
54314 + /* if the cap isn't specified in the current computed mask but is specified in the
54315 + current level subject, and is lowered in the current level subject, then add
54316 + it to the set of dropped capabilities
54317 + otherwise, add the current level subject's mask to the current computed mask
54318 + */
54319 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54320 + cap_raise(cap_mask, cap);
54321 + if (cap_raised(curracl->cap_lower, cap))
54322 + cap_raise(cap_drop, cap);
54323 + }
54324 + }
54325 +
54326 + if (!cap_raised(cap_drop, cap))
54327 + return 1;
54328 +
54329 + return 0;
54330 +}
54331 +
54332 +int
54333 +gr_acl_is_capable_nolog(const int cap)
54334 +{
54335 + return gr_task_acl_is_capable_nolog(current, cap);
54336 +}
54337 +
54338 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54339 new file mode 100644
54340 index 0000000..88d0e87
54341 --- /dev/null
54342 +++ b/grsecurity/gracl_fs.c
54343 @@ -0,0 +1,435 @@
54344 +#include <linux/kernel.h>
54345 +#include <linux/sched.h>
54346 +#include <linux/types.h>
54347 +#include <linux/fs.h>
54348 +#include <linux/file.h>
54349 +#include <linux/stat.h>
54350 +#include <linux/grsecurity.h>
54351 +#include <linux/grinternal.h>
54352 +#include <linux/gracl.h>
54353 +
54354 +umode_t
54355 +gr_acl_umask(void)
54356 +{
54357 + if (unlikely(!gr_acl_is_enabled()))
54358 + return 0;
54359 +
54360 + return current->role->umask;
54361 +}
54362 +
54363 +__u32
54364 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54365 + const struct vfsmount * mnt)
54366 +{
54367 + __u32 mode;
54368 +
54369 + if (unlikely(!dentry->d_inode))
54370 + return GR_FIND;
54371 +
54372 + mode =
54373 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54374 +
54375 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54376 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54377 + return mode;
54378 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54379 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54380 + return 0;
54381 + } else if (unlikely(!(mode & GR_FIND)))
54382 + return 0;
54383 +
54384 + return GR_FIND;
54385 +}
54386 +
54387 +__u32
54388 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54389 + int acc_mode)
54390 +{
54391 + __u32 reqmode = GR_FIND;
54392 + __u32 mode;
54393 +
54394 + if (unlikely(!dentry->d_inode))
54395 + return reqmode;
54396 +
54397 + if (acc_mode & MAY_APPEND)
54398 + reqmode |= GR_APPEND;
54399 + else if (acc_mode & MAY_WRITE)
54400 + reqmode |= GR_WRITE;
54401 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54402 + reqmode |= GR_READ;
54403 +
54404 + mode =
54405 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54406 + mnt);
54407 +
54408 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54409 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54410 + reqmode & GR_READ ? " reading" : "",
54411 + reqmode & GR_WRITE ? " writing" : reqmode &
54412 + GR_APPEND ? " appending" : "");
54413 + return reqmode;
54414 + } else
54415 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54416 + {
54417 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54418 + reqmode & GR_READ ? " reading" : "",
54419 + reqmode & GR_WRITE ? " writing" : reqmode &
54420 + GR_APPEND ? " appending" : "");
54421 + return 0;
54422 + } else if (unlikely((mode & reqmode) != reqmode))
54423 + return 0;
54424 +
54425 + return reqmode;
54426 +}
54427 +
54428 +__u32
54429 +gr_acl_handle_creat(const struct dentry * dentry,
54430 + const struct dentry * p_dentry,
54431 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54432 + const int imode)
54433 +{
54434 + __u32 reqmode = GR_WRITE | GR_CREATE;
54435 + __u32 mode;
54436 +
54437 + if (acc_mode & MAY_APPEND)
54438 + reqmode |= GR_APPEND;
54439 + // if a directory was required or the directory already exists, then
54440 + // don't count this open as a read
54441 + if ((acc_mode & MAY_READ) &&
54442 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54443 + reqmode |= GR_READ;
54444 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54445 + reqmode |= GR_SETID;
54446 +
54447 + mode =
54448 + gr_check_create(dentry, p_dentry, p_mnt,
54449 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54450 +
54451 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54452 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54453 + reqmode & GR_READ ? " reading" : "",
54454 + reqmode & GR_WRITE ? " writing" : reqmode &
54455 + GR_APPEND ? " appending" : "");
54456 + return reqmode;
54457 + } else
54458 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54459 + {
54460 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54461 + reqmode & GR_READ ? " reading" : "",
54462 + reqmode & GR_WRITE ? " writing" : reqmode &
54463 + GR_APPEND ? " appending" : "");
54464 + return 0;
54465 + } else if (unlikely((mode & reqmode) != reqmode))
54466 + return 0;
54467 +
54468 + return reqmode;
54469 +}
54470 +
54471 +__u32
54472 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54473 + const int fmode)
54474 +{
54475 + __u32 mode, reqmode = GR_FIND;
54476 +
54477 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54478 + reqmode |= GR_EXEC;
54479 + if (fmode & S_IWOTH)
54480 + reqmode |= GR_WRITE;
54481 + if (fmode & S_IROTH)
54482 + reqmode |= GR_READ;
54483 +
54484 + mode =
54485 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54486 + mnt);
54487 +
54488 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54489 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54490 + reqmode & GR_READ ? " reading" : "",
54491 + reqmode & GR_WRITE ? " writing" : "",
54492 + reqmode & GR_EXEC ? " executing" : "");
54493 + return reqmode;
54494 + } else
54495 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54496 + {
54497 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54498 + reqmode & GR_READ ? " reading" : "",
54499 + reqmode & GR_WRITE ? " writing" : "",
54500 + reqmode & GR_EXEC ? " executing" : "");
54501 + return 0;
54502 + } else if (unlikely((mode & reqmode) != reqmode))
54503 + return 0;
54504 +
54505 + return reqmode;
54506 +}
54507 +
54508 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54509 +{
54510 + __u32 mode;
54511 +
54512 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54513 +
54514 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54515 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54516 + return mode;
54517 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54518 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54519 + return 0;
54520 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54521 + return 0;
54522 +
54523 + return (reqmode);
54524 +}
54525 +
54526 +__u32
54527 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54528 +{
54529 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54530 +}
54531 +
54532 +__u32
54533 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54534 +{
54535 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54536 +}
54537 +
54538 +__u32
54539 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54540 +{
54541 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54542 +}
54543 +
54544 +__u32
54545 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54546 +{
54547 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54548 +}
54549 +
54550 +__u32
54551 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54552 + umode_t *modeptr)
54553 +{
54554 + umode_t mode;
54555 +
54556 + *modeptr &= ~gr_acl_umask();
54557 + mode = *modeptr;
54558 +
54559 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54560 + return 1;
54561 +
54562 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
54563 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54564 + GR_CHMOD_ACL_MSG);
54565 + } else {
54566 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54567 + }
54568 +}
54569 +
54570 +__u32
54571 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54572 +{
54573 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54574 +}
54575 +
54576 +__u32
54577 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54578 +{
54579 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54580 +}
54581 +
54582 +__u32
54583 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54584 +{
54585 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54586 +}
54587 +
54588 +__u32
54589 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54590 +{
54591 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54592 + GR_UNIXCONNECT_ACL_MSG);
54593 +}
54594 +
54595 +/* hardlinks require at minimum create and link permission,
54596 + any additional privilege required is based on the
54597 + privilege of the file being linked to
54598 +*/
54599 +__u32
54600 +gr_acl_handle_link(const struct dentry * new_dentry,
54601 + const struct dentry * parent_dentry,
54602 + const struct vfsmount * parent_mnt,
54603 + const struct dentry * old_dentry,
54604 + const struct vfsmount * old_mnt, const char *to)
54605 +{
54606 + __u32 mode;
54607 + __u32 needmode = GR_CREATE | GR_LINK;
54608 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54609 +
54610 + mode =
54611 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54612 + old_mnt);
54613 +
54614 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54615 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54616 + return mode;
54617 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54618 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54619 + return 0;
54620 + } else if (unlikely((mode & needmode) != needmode))
54621 + return 0;
54622 +
54623 + return 1;
54624 +}
54625 +
54626 +__u32
54627 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54628 + const struct dentry * parent_dentry,
54629 + const struct vfsmount * parent_mnt, const char *from)
54630 +{
54631 + __u32 needmode = GR_WRITE | GR_CREATE;
54632 + __u32 mode;
54633 +
54634 + mode =
54635 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
54636 + GR_CREATE | GR_AUDIT_CREATE |
54637 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54638 +
54639 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54640 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54641 + return mode;
54642 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54643 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54644 + return 0;
54645 + } else if (unlikely((mode & needmode) != needmode))
54646 + return 0;
54647 +
54648 + return (GR_WRITE | GR_CREATE);
54649 +}
54650 +
54651 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54652 +{
54653 + __u32 mode;
54654 +
54655 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54656 +
54657 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54658 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54659 + return mode;
54660 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54661 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54662 + return 0;
54663 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54664 + return 0;
54665 +
54666 + return (reqmode);
54667 +}
54668 +
54669 +__u32
54670 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54671 + const struct dentry * parent_dentry,
54672 + const struct vfsmount * parent_mnt,
54673 + const int mode)
54674 +{
54675 + __u32 reqmode = GR_WRITE | GR_CREATE;
54676 + if (unlikely(mode & (S_ISUID | S_ISGID)))
54677 + reqmode |= GR_SETID;
54678 +
54679 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54680 + reqmode, GR_MKNOD_ACL_MSG);
54681 +}
54682 +
54683 +__u32
54684 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
54685 + const struct dentry *parent_dentry,
54686 + const struct vfsmount *parent_mnt)
54687 +{
54688 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54689 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54690 +}
54691 +
54692 +#define RENAME_CHECK_SUCCESS(old, new) \
54693 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54694 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54695 +
54696 +int
54697 +gr_acl_handle_rename(struct dentry *new_dentry,
54698 + struct dentry *parent_dentry,
54699 + const struct vfsmount *parent_mnt,
54700 + struct dentry *old_dentry,
54701 + struct inode *old_parent_inode,
54702 + struct vfsmount *old_mnt, const char *newname)
54703 +{
54704 + __u32 comp1, comp2;
54705 + int error = 0;
54706 +
54707 + if (unlikely(!gr_acl_is_enabled()))
54708 + return 0;
54709 +
54710 + if (!new_dentry->d_inode) {
54711 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54712 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54713 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54714 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54715 + GR_DELETE | GR_AUDIT_DELETE |
54716 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54717 + GR_SUPPRESS, old_mnt);
54718 + } else {
54719 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54720 + GR_CREATE | GR_DELETE |
54721 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54722 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54723 + GR_SUPPRESS, parent_mnt);
54724 + comp2 =
54725 + gr_search_file(old_dentry,
54726 + GR_READ | GR_WRITE | GR_AUDIT_READ |
54727 + GR_DELETE | GR_AUDIT_DELETE |
54728 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54729 + }
54730 +
54731 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54732 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54733 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54734 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54735 + && !(comp2 & GR_SUPPRESS)) {
54736 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54737 + error = -EACCES;
54738 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54739 + error = -EACCES;
54740 +
54741 + return error;
54742 +}
54743 +
54744 +void
54745 +gr_acl_handle_exit(void)
54746 +{
54747 + u16 id;
54748 + char *rolename;
54749 + struct file *exec_file;
54750 +
54751 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54752 + !(current->role->roletype & GR_ROLE_PERSIST))) {
54753 + id = current->acl_role_id;
54754 + rolename = current->role->rolename;
54755 + gr_set_acls(1);
54756 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54757 + }
54758 +
54759 + write_lock(&grsec_exec_file_lock);
54760 + exec_file = current->exec_file;
54761 + current->exec_file = NULL;
54762 + write_unlock(&grsec_exec_file_lock);
54763 +
54764 + if (exec_file)
54765 + fput(exec_file);
54766 +}
54767 +
54768 +int
54769 +gr_acl_handle_procpidmem(const struct task_struct *task)
54770 +{
54771 + if (unlikely(!gr_acl_is_enabled()))
54772 + return 0;
54773 +
54774 + if (task != current && task->acl->mode & GR_PROTPROCFD)
54775 + return -EACCES;
54776 +
54777 + return 0;
54778 +}
54779 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54780 new file mode 100644
54781 index 0000000..58800a7
54782 --- /dev/null
54783 +++ b/grsecurity/gracl_ip.c
54784 @@ -0,0 +1,384 @@
54785 +#include <linux/kernel.h>
54786 +#include <asm/uaccess.h>
54787 +#include <asm/errno.h>
54788 +#include <net/sock.h>
54789 +#include <linux/file.h>
54790 +#include <linux/fs.h>
54791 +#include <linux/net.h>
54792 +#include <linux/in.h>
54793 +#include <linux/skbuff.h>
54794 +#include <linux/ip.h>
54795 +#include <linux/udp.h>
54796 +#include <linux/types.h>
54797 +#include <linux/sched.h>
54798 +#include <linux/netdevice.h>
54799 +#include <linux/inetdevice.h>
54800 +#include <linux/gracl.h>
54801 +#include <linux/grsecurity.h>
54802 +#include <linux/grinternal.h>
54803 +
54804 +#define GR_BIND 0x01
54805 +#define GR_CONNECT 0x02
54806 +#define GR_INVERT 0x04
54807 +#define GR_BINDOVERRIDE 0x08
54808 +#define GR_CONNECTOVERRIDE 0x10
54809 +#define GR_SOCK_FAMILY 0x20
54810 +
54811 +static const char * gr_protocols[IPPROTO_MAX] = {
54812 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54813 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54814 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54815 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54816 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54817 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54818 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54819 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54820 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54821 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54822 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54823 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54824 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54825 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54826 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54827 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54828 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54829 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54830 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54831 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54832 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54833 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54834 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54835 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54836 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54837 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54838 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54839 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54840 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54841 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54842 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54843 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54844 + };
54845 +
54846 +static const char * gr_socktypes[SOCK_MAX] = {
54847 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54848 + "unknown:7", "unknown:8", "unknown:9", "packet"
54849 + };
54850 +
54851 +static const char * gr_sockfamilies[AF_MAX+1] = {
54852 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54853 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54854 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54855 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54856 + };
54857 +
54858 +const char *
54859 +gr_proto_to_name(unsigned char proto)
54860 +{
54861 + return gr_protocols[proto];
54862 +}
54863 +
54864 +const char *
54865 +gr_socktype_to_name(unsigned char type)
54866 +{
54867 + return gr_socktypes[type];
54868 +}
54869 +
54870 +const char *
54871 +gr_sockfamily_to_name(unsigned char family)
54872 +{
54873 + return gr_sockfamilies[family];
54874 +}
54875 +
54876 +int
54877 +gr_search_socket(const int domain, const int type, const int protocol)
54878 +{
54879 + struct acl_subject_label *curr;
54880 + const struct cred *cred = current_cred();
54881 +
54882 + if (unlikely(!gr_acl_is_enabled()))
54883 + goto exit;
54884 +
54885 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
54886 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54887 + goto exit; // let the kernel handle it
54888 +
54889 + curr = current->acl;
54890 +
54891 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
54892 + /* the family is allowed, if this is PF_INET allow it only if
54893 + the extra sock type/protocol checks pass */
54894 + if (domain == PF_INET)
54895 + goto inet_check;
54896 + goto exit;
54897 + } else {
54898 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54899 + __u32 fakeip = 0;
54900 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54901 + current->role->roletype, cred->uid,
54902 + cred->gid, current->exec_file ?
54903 + gr_to_filename(current->exec_file->f_path.dentry,
54904 + current->exec_file->f_path.mnt) :
54905 + curr->filename, curr->filename,
54906 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
54907 + &current->signal->saved_ip);
54908 + goto exit;
54909 + }
54910 + goto exit_fail;
54911 + }
54912 +
54913 +inet_check:
54914 + /* the rest of this checking is for IPv4 only */
54915 + if (!curr->ips)
54916 + goto exit;
54917 +
54918 + if ((curr->ip_type & (1 << type)) &&
54919 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
54920 + goto exit;
54921 +
54922 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54923 + /* we don't place acls on raw sockets , and sometimes
54924 + dgram/ip sockets are opened for ioctl and not
54925 + bind/connect, so we'll fake a bind learn log */
54926 + if (type == SOCK_RAW || type == SOCK_PACKET) {
54927 + __u32 fakeip = 0;
54928 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54929 + current->role->roletype, cred->uid,
54930 + cred->gid, current->exec_file ?
54931 + gr_to_filename(current->exec_file->f_path.dentry,
54932 + current->exec_file->f_path.mnt) :
54933 + curr->filename, curr->filename,
54934 + &fakeip, 0, type,
54935 + protocol, GR_CONNECT, &current->signal->saved_ip);
54936 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
54937 + __u32 fakeip = 0;
54938 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54939 + current->role->roletype, cred->uid,
54940 + cred->gid, current->exec_file ?
54941 + gr_to_filename(current->exec_file->f_path.dentry,
54942 + current->exec_file->f_path.mnt) :
54943 + curr->filename, curr->filename,
54944 + &fakeip, 0, type,
54945 + protocol, GR_BIND, &current->signal->saved_ip);
54946 + }
54947 + /* we'll log when they use connect or bind */
54948 + goto exit;
54949 + }
54950 +
54951 +exit_fail:
54952 + if (domain == PF_INET)
54953 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
54954 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
54955 + else
54956 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
54957 + gr_socktype_to_name(type), protocol);
54958 +
54959 + return 0;
54960 +exit:
54961 + return 1;
54962 +}
54963 +
54964 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
54965 +{
54966 + if ((ip->mode & mode) &&
54967 + (ip_port >= ip->low) &&
54968 + (ip_port <= ip->high) &&
54969 + ((ntohl(ip_addr) & our_netmask) ==
54970 + (ntohl(our_addr) & our_netmask))
54971 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
54972 + && (ip->type & (1 << type))) {
54973 + if (ip->mode & GR_INVERT)
54974 + return 2; // specifically denied
54975 + else
54976 + return 1; // allowed
54977 + }
54978 +
54979 + return 0; // not specifically allowed, may continue parsing
54980 +}
54981 +
54982 +static int
54983 +gr_search_connectbind(const int full_mode, struct sock *sk,
54984 + struct sockaddr_in *addr, const int type)
54985 +{
54986 + char iface[IFNAMSIZ] = {0};
54987 + struct acl_subject_label *curr;
54988 + struct acl_ip_label *ip;
54989 + struct inet_sock *isk;
54990 + struct net_device *dev;
54991 + struct in_device *idev;
54992 + unsigned long i;
54993 + int ret;
54994 + int mode = full_mode & (GR_BIND | GR_CONNECT);
54995 + __u32 ip_addr = 0;
54996 + __u32 our_addr;
54997 + __u32 our_netmask;
54998 + char *p;
54999 + __u16 ip_port = 0;
55000 + const struct cred *cred = current_cred();
55001 +
55002 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55003 + return 0;
55004 +
55005 + curr = current->acl;
55006 + isk = inet_sk(sk);
55007 +
55008 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55009 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55010 + addr->sin_addr.s_addr = curr->inaddr_any_override;
55011 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55012 + struct sockaddr_in saddr;
55013 + int err;
55014 +
55015 + saddr.sin_family = AF_INET;
55016 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
55017 + saddr.sin_port = isk->inet_sport;
55018 +
55019 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55020 + if (err)
55021 + return err;
55022 +
55023 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55024 + if (err)
55025 + return err;
55026 + }
55027 +
55028 + if (!curr->ips)
55029 + return 0;
55030 +
55031 + ip_addr = addr->sin_addr.s_addr;
55032 + ip_port = ntohs(addr->sin_port);
55033 +
55034 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55035 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55036 + current->role->roletype, cred->uid,
55037 + cred->gid, current->exec_file ?
55038 + gr_to_filename(current->exec_file->f_path.dentry,
55039 + current->exec_file->f_path.mnt) :
55040 + curr->filename, curr->filename,
55041 + &ip_addr, ip_port, type,
55042 + sk->sk_protocol, mode, &current->signal->saved_ip);
55043 + return 0;
55044 + }
55045 +
55046 + for (i = 0; i < curr->ip_num; i++) {
55047 + ip = *(curr->ips + i);
55048 + if (ip->iface != NULL) {
55049 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55050 + p = strchr(iface, ':');
55051 + if (p != NULL)
55052 + *p = '\0';
55053 + dev = dev_get_by_name(sock_net(sk), iface);
55054 + if (dev == NULL)
55055 + continue;
55056 + idev = in_dev_get(dev);
55057 + if (idev == NULL) {
55058 + dev_put(dev);
55059 + continue;
55060 + }
55061 + rcu_read_lock();
55062 + for_ifa(idev) {
55063 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55064 + our_addr = ifa->ifa_address;
55065 + our_netmask = 0xffffffff;
55066 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55067 + if (ret == 1) {
55068 + rcu_read_unlock();
55069 + in_dev_put(idev);
55070 + dev_put(dev);
55071 + return 0;
55072 + } else if (ret == 2) {
55073 + rcu_read_unlock();
55074 + in_dev_put(idev);
55075 + dev_put(dev);
55076 + goto denied;
55077 + }
55078 + }
55079 + } endfor_ifa(idev);
55080 + rcu_read_unlock();
55081 + in_dev_put(idev);
55082 + dev_put(dev);
55083 + } else {
55084 + our_addr = ip->addr;
55085 + our_netmask = ip->netmask;
55086 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55087 + if (ret == 1)
55088 + return 0;
55089 + else if (ret == 2)
55090 + goto denied;
55091 + }
55092 + }
55093 +
55094 +denied:
55095 + if (mode == GR_BIND)
55096 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55097 + else if (mode == GR_CONNECT)
55098 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55099 +
55100 + return -EACCES;
55101 +}
55102 +
55103 +int
55104 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55105 +{
55106 + /* always allow disconnection of dgram sockets with connect */
55107 + if (addr->sin_family == AF_UNSPEC)
55108 + return 0;
55109 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55110 +}
55111 +
55112 +int
55113 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55114 +{
55115 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55116 +}
55117 +
55118 +int gr_search_listen(struct socket *sock)
55119 +{
55120 + struct sock *sk = sock->sk;
55121 + struct sockaddr_in addr;
55122 +
55123 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55124 + addr.sin_port = inet_sk(sk)->inet_sport;
55125 +
55126 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55127 +}
55128 +
55129 +int gr_search_accept(struct socket *sock)
55130 +{
55131 + struct sock *sk = sock->sk;
55132 + struct sockaddr_in addr;
55133 +
55134 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55135 + addr.sin_port = inet_sk(sk)->inet_sport;
55136 +
55137 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55138 +}
55139 +
55140 +int
55141 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55142 +{
55143 + if (addr)
55144 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55145 + else {
55146 + struct sockaddr_in sin;
55147 + const struct inet_sock *inet = inet_sk(sk);
55148 +
55149 + sin.sin_addr.s_addr = inet->inet_daddr;
55150 + sin.sin_port = inet->inet_dport;
55151 +
55152 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55153 + }
55154 +}
55155 +
55156 +int
55157 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55158 +{
55159 + struct sockaddr_in sin;
55160 +
55161 + if (unlikely(skb->len < sizeof (struct udphdr)))
55162 + return 0; // skip this packet
55163 +
55164 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55165 + sin.sin_port = udp_hdr(skb)->source;
55166 +
55167 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55168 +}
55169 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55170 new file mode 100644
55171 index 0000000..25f54ef
55172 --- /dev/null
55173 +++ b/grsecurity/gracl_learn.c
55174 @@ -0,0 +1,207 @@
55175 +#include <linux/kernel.h>
55176 +#include <linux/mm.h>
55177 +#include <linux/sched.h>
55178 +#include <linux/poll.h>
55179 +#include <linux/string.h>
55180 +#include <linux/file.h>
55181 +#include <linux/types.h>
55182 +#include <linux/vmalloc.h>
55183 +#include <linux/grinternal.h>
55184 +
55185 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55186 + size_t count, loff_t *ppos);
55187 +extern int gr_acl_is_enabled(void);
55188 +
55189 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55190 +static int gr_learn_attached;
55191 +
55192 +/* use a 512k buffer */
55193 +#define LEARN_BUFFER_SIZE (512 * 1024)
55194 +
55195 +static DEFINE_SPINLOCK(gr_learn_lock);
55196 +static DEFINE_MUTEX(gr_learn_user_mutex);
55197 +
55198 +/* we need to maintain two buffers, so that the kernel context of grlearn
55199 + uses a semaphore around the userspace copying, and the other kernel contexts
55200 + use a spinlock when copying into the buffer, since they cannot sleep
55201 +*/
55202 +static char *learn_buffer;
55203 +static char *learn_buffer_user;
55204 +static int learn_buffer_len;
55205 +static int learn_buffer_user_len;
55206 +
55207 +static ssize_t
55208 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55209 +{
55210 + DECLARE_WAITQUEUE(wait, current);
55211 + ssize_t retval = 0;
55212 +
55213 + add_wait_queue(&learn_wait, &wait);
55214 + set_current_state(TASK_INTERRUPTIBLE);
55215 + do {
55216 + mutex_lock(&gr_learn_user_mutex);
55217 + spin_lock(&gr_learn_lock);
55218 + if (learn_buffer_len)
55219 + break;
55220 + spin_unlock(&gr_learn_lock);
55221 + mutex_unlock(&gr_learn_user_mutex);
55222 + if (file->f_flags & O_NONBLOCK) {
55223 + retval = -EAGAIN;
55224 + goto out;
55225 + }
55226 + if (signal_pending(current)) {
55227 + retval = -ERESTARTSYS;
55228 + goto out;
55229 + }
55230 +
55231 + schedule();
55232 + } while (1);
55233 +
55234 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55235 + learn_buffer_user_len = learn_buffer_len;
55236 + retval = learn_buffer_len;
55237 + learn_buffer_len = 0;
55238 +
55239 + spin_unlock(&gr_learn_lock);
55240 +
55241 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55242 + retval = -EFAULT;
55243 +
55244 + mutex_unlock(&gr_learn_user_mutex);
55245 +out:
55246 + set_current_state(TASK_RUNNING);
55247 + remove_wait_queue(&learn_wait, &wait);
55248 + return retval;
55249 +}
55250 +
55251 +static unsigned int
55252 +poll_learn(struct file * file, poll_table * wait)
55253 +{
55254 + poll_wait(file, &learn_wait, wait);
55255 +
55256 + if (learn_buffer_len)
55257 + return (POLLIN | POLLRDNORM);
55258 +
55259 + return 0;
55260 +}
55261 +
55262 +void
55263 +gr_clear_learn_entries(void)
55264 +{
55265 + char *tmp;
55266 +
55267 + mutex_lock(&gr_learn_user_mutex);
55268 + spin_lock(&gr_learn_lock);
55269 + tmp = learn_buffer;
55270 + learn_buffer = NULL;
55271 + spin_unlock(&gr_learn_lock);
55272 + if (tmp)
55273 + vfree(tmp);
55274 + if (learn_buffer_user != NULL) {
55275 + vfree(learn_buffer_user);
55276 + learn_buffer_user = NULL;
55277 + }
55278 + learn_buffer_len = 0;
55279 + mutex_unlock(&gr_learn_user_mutex);
55280 +
55281 + return;
55282 +}
55283 +
55284 +void
55285 +gr_add_learn_entry(const char *fmt, ...)
55286 +{
55287 + va_list args;
55288 + unsigned int len;
55289 +
55290 + if (!gr_learn_attached)
55291 + return;
55292 +
55293 + spin_lock(&gr_learn_lock);
55294 +
55295 + /* leave a gap at the end so we know when it's "full" but don't have to
55296 + compute the exact length of the string we're trying to append
55297 + */
55298 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55299 + spin_unlock(&gr_learn_lock);
55300 + wake_up_interruptible(&learn_wait);
55301 + return;
55302 + }
55303 + if (learn_buffer == NULL) {
55304 + spin_unlock(&gr_learn_lock);
55305 + return;
55306 + }
55307 +
55308 + va_start(args, fmt);
55309 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55310 + va_end(args);
55311 +
55312 + learn_buffer_len += len + 1;
55313 +
55314 + spin_unlock(&gr_learn_lock);
55315 + wake_up_interruptible(&learn_wait);
55316 +
55317 + return;
55318 +}
55319 +
55320 +static int
55321 +open_learn(struct inode *inode, struct file *file)
55322 +{
55323 + if (file->f_mode & FMODE_READ && gr_learn_attached)
55324 + return -EBUSY;
55325 + if (file->f_mode & FMODE_READ) {
55326 + int retval = 0;
55327 + mutex_lock(&gr_learn_user_mutex);
55328 + if (learn_buffer == NULL)
55329 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55330 + if (learn_buffer_user == NULL)
55331 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55332 + if (learn_buffer == NULL) {
55333 + retval = -ENOMEM;
55334 + goto out_error;
55335 + }
55336 + if (learn_buffer_user == NULL) {
55337 + retval = -ENOMEM;
55338 + goto out_error;
55339 + }
55340 + learn_buffer_len = 0;
55341 + learn_buffer_user_len = 0;
55342 + gr_learn_attached = 1;
55343 +out_error:
55344 + mutex_unlock(&gr_learn_user_mutex);
55345 + return retval;
55346 + }
55347 + return 0;
55348 +}
55349 +
55350 +static int
55351 +close_learn(struct inode *inode, struct file *file)
55352 +{
55353 + if (file->f_mode & FMODE_READ) {
55354 + char *tmp = NULL;
55355 + mutex_lock(&gr_learn_user_mutex);
55356 + spin_lock(&gr_learn_lock);
55357 + tmp = learn_buffer;
55358 + learn_buffer = NULL;
55359 + spin_unlock(&gr_learn_lock);
55360 + if (tmp)
55361 + vfree(tmp);
55362 + if (learn_buffer_user != NULL) {
55363 + vfree(learn_buffer_user);
55364 + learn_buffer_user = NULL;
55365 + }
55366 + learn_buffer_len = 0;
55367 + learn_buffer_user_len = 0;
55368 + gr_learn_attached = 0;
55369 + mutex_unlock(&gr_learn_user_mutex);
55370 + }
55371 +
55372 + return 0;
55373 +}
55374 +
55375 +const struct file_operations grsec_fops = {
55376 + .read = read_learn,
55377 + .write = write_grsec_handler,
55378 + .open = open_learn,
55379 + .release = close_learn,
55380 + .poll = poll_learn,
55381 +};
55382 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55383 new file mode 100644
55384 index 0000000..39645c9
55385 --- /dev/null
55386 +++ b/grsecurity/gracl_res.c
55387 @@ -0,0 +1,68 @@
55388 +#include <linux/kernel.h>
55389 +#include <linux/sched.h>
55390 +#include <linux/gracl.h>
55391 +#include <linux/grinternal.h>
55392 +
55393 +static const char *restab_log[] = {
55394 + [RLIMIT_CPU] = "RLIMIT_CPU",
55395 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55396 + [RLIMIT_DATA] = "RLIMIT_DATA",
55397 + [RLIMIT_STACK] = "RLIMIT_STACK",
55398 + [RLIMIT_CORE] = "RLIMIT_CORE",
55399 + [RLIMIT_RSS] = "RLIMIT_RSS",
55400 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55401 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55402 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55403 + [RLIMIT_AS] = "RLIMIT_AS",
55404 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55405 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55406 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55407 + [RLIMIT_NICE] = "RLIMIT_NICE",
55408 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55409 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55410 + [GR_CRASH_RES] = "RLIMIT_CRASH"
55411 +};
55412 +
55413 +void
55414 +gr_log_resource(const struct task_struct *task,
55415 + const int res, const unsigned long wanted, const int gt)
55416 +{
55417 + const struct cred *cred;
55418 + unsigned long rlim;
55419 +
55420 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
55421 + return;
55422 +
55423 + // not yet supported resource
55424 + if (unlikely(!restab_log[res]))
55425 + return;
55426 +
55427 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55428 + rlim = task_rlimit_max(task, res);
55429 + else
55430 + rlim = task_rlimit(task, res);
55431 +
55432 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55433 + return;
55434 +
55435 + rcu_read_lock();
55436 + cred = __task_cred(task);
55437 +
55438 + if (res == RLIMIT_NPROC &&
55439 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55440 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55441 + goto out_rcu_unlock;
55442 + else if (res == RLIMIT_MEMLOCK &&
55443 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55444 + goto out_rcu_unlock;
55445 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55446 + goto out_rcu_unlock;
55447 + rcu_read_unlock();
55448 +
55449 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55450 +
55451 + return;
55452 +out_rcu_unlock:
55453 + rcu_read_unlock();
55454 + return;
55455 +}
55456 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55457 new file mode 100644
55458 index 0000000..5556be3
55459 --- /dev/null
55460 +++ b/grsecurity/gracl_segv.c
55461 @@ -0,0 +1,299 @@
55462 +#include <linux/kernel.h>
55463 +#include <linux/mm.h>
55464 +#include <asm/uaccess.h>
55465 +#include <asm/errno.h>
55466 +#include <asm/mman.h>
55467 +#include <net/sock.h>
55468 +#include <linux/file.h>
55469 +#include <linux/fs.h>
55470 +#include <linux/net.h>
55471 +#include <linux/in.h>
55472 +#include <linux/slab.h>
55473 +#include <linux/types.h>
55474 +#include <linux/sched.h>
55475 +#include <linux/timer.h>
55476 +#include <linux/gracl.h>
55477 +#include <linux/grsecurity.h>
55478 +#include <linux/grinternal.h>
55479 +
55480 +static struct crash_uid *uid_set;
55481 +static unsigned short uid_used;
55482 +static DEFINE_SPINLOCK(gr_uid_lock);
55483 +extern rwlock_t gr_inode_lock;
55484 +extern struct acl_subject_label *
55485 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55486 + struct acl_role_label *role);
55487 +
55488 +#ifdef CONFIG_BTRFS_FS
55489 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55490 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55491 +#endif
55492 +
55493 +static inline dev_t __get_dev(const struct dentry *dentry)
55494 +{
55495 +#ifdef CONFIG_BTRFS_FS
55496 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55497 + return get_btrfs_dev_from_inode(dentry->d_inode);
55498 + else
55499 +#endif
55500 + return dentry->d_inode->i_sb->s_dev;
55501 +}
55502 +
55503 +int
55504 +gr_init_uidset(void)
55505 +{
55506 + uid_set =
55507 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55508 + uid_used = 0;
55509 +
55510 + return uid_set ? 1 : 0;
55511 +}
55512 +
55513 +void
55514 +gr_free_uidset(void)
55515 +{
55516 + if (uid_set)
55517 + kfree(uid_set);
55518 +
55519 + return;
55520 +}
55521 +
55522 +int
55523 +gr_find_uid(const uid_t uid)
55524 +{
55525 + struct crash_uid *tmp = uid_set;
55526 + uid_t buid;
55527 + int low = 0, high = uid_used - 1, mid;
55528 +
55529 + while (high >= low) {
55530 + mid = (low + high) >> 1;
55531 + buid = tmp[mid].uid;
55532 + if (buid == uid)
55533 + return mid;
55534 + if (buid > uid)
55535 + high = mid - 1;
55536 + if (buid < uid)
55537 + low = mid + 1;
55538 + }
55539 +
55540 + return -1;
55541 +}
55542 +
55543 +static __inline__ void
55544 +gr_insertsort(void)
55545 +{
55546 + unsigned short i, j;
55547 + struct crash_uid index;
55548 +
55549 + for (i = 1; i < uid_used; i++) {
55550 + index = uid_set[i];
55551 + j = i;
55552 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55553 + uid_set[j] = uid_set[j - 1];
55554 + j--;
55555 + }
55556 + uid_set[j] = index;
55557 + }
55558 +
55559 + return;
55560 +}
55561 +
55562 +static __inline__ void
55563 +gr_insert_uid(const uid_t uid, const unsigned long expires)
55564 +{
55565 + int loc;
55566 +
55567 + if (uid_used == GR_UIDTABLE_MAX)
55568 + return;
55569 +
55570 + loc = gr_find_uid(uid);
55571 +
55572 + if (loc >= 0) {
55573 + uid_set[loc].expires = expires;
55574 + return;
55575 + }
55576 +
55577 + uid_set[uid_used].uid = uid;
55578 + uid_set[uid_used].expires = expires;
55579 + uid_used++;
55580 +
55581 + gr_insertsort();
55582 +
55583 + return;
55584 +}
55585 +
55586 +void
55587 +gr_remove_uid(const unsigned short loc)
55588 +{
55589 + unsigned short i;
55590 +
55591 + for (i = loc + 1; i < uid_used; i++)
55592 + uid_set[i - 1] = uid_set[i];
55593 +
55594 + uid_used--;
55595 +
55596 + return;
55597 +}
55598 +
55599 +int
55600 +gr_check_crash_uid(const uid_t uid)
55601 +{
55602 + int loc;
55603 + int ret = 0;
55604 +
55605 + if (unlikely(!gr_acl_is_enabled()))
55606 + return 0;
55607 +
55608 + spin_lock(&gr_uid_lock);
55609 + loc = gr_find_uid(uid);
55610 +
55611 + if (loc < 0)
55612 + goto out_unlock;
55613 +
55614 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
55615 + gr_remove_uid(loc);
55616 + else
55617 + ret = 1;
55618 +
55619 +out_unlock:
55620 + spin_unlock(&gr_uid_lock);
55621 + return ret;
55622 +}
55623 +
55624 +static __inline__ int
55625 +proc_is_setxid(const struct cred *cred)
55626 +{
55627 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
55628 + cred->uid != cred->fsuid)
55629 + return 1;
55630 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55631 + cred->gid != cred->fsgid)
55632 + return 1;
55633 +
55634 + return 0;
55635 +}
55636 +
55637 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
55638 +
55639 +void
55640 +gr_handle_crash(struct task_struct *task, const int sig)
55641 +{
55642 + struct acl_subject_label *curr;
55643 + struct task_struct *tsk, *tsk2;
55644 + const struct cred *cred;
55645 + const struct cred *cred2;
55646 +
55647 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55648 + return;
55649 +
55650 + if (unlikely(!gr_acl_is_enabled()))
55651 + return;
55652 +
55653 + curr = task->acl;
55654 +
55655 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
55656 + return;
55657 +
55658 + if (time_before_eq(curr->expires, get_seconds())) {
55659 + curr->expires = 0;
55660 + curr->crashes = 0;
55661 + }
55662 +
55663 + curr->crashes++;
55664 +
55665 + if (!curr->expires)
55666 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55667 +
55668 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55669 + time_after(curr->expires, get_seconds())) {
55670 + rcu_read_lock();
55671 + cred = __task_cred(task);
55672 + if (cred->uid && proc_is_setxid(cred)) {
55673 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55674 + spin_lock(&gr_uid_lock);
55675 + gr_insert_uid(cred->uid, curr->expires);
55676 + spin_unlock(&gr_uid_lock);
55677 + curr->expires = 0;
55678 + curr->crashes = 0;
55679 + read_lock(&tasklist_lock);
55680 + do_each_thread(tsk2, tsk) {
55681 + cred2 = __task_cred(tsk);
55682 + if (tsk != task && cred2->uid == cred->uid)
55683 + gr_fake_force_sig(SIGKILL, tsk);
55684 + } while_each_thread(tsk2, tsk);
55685 + read_unlock(&tasklist_lock);
55686 + } else {
55687 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55688 + read_lock(&tasklist_lock);
55689 + read_lock(&grsec_exec_file_lock);
55690 + do_each_thread(tsk2, tsk) {
55691 + if (likely(tsk != task)) {
55692 + // if this thread has the same subject as the one that triggered
55693 + // RES_CRASH and it's the same binary, kill it
55694 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55695 + gr_fake_force_sig(SIGKILL, tsk);
55696 + }
55697 + } while_each_thread(tsk2, tsk);
55698 + read_unlock(&grsec_exec_file_lock);
55699 + read_unlock(&tasklist_lock);
55700 + }
55701 + rcu_read_unlock();
55702 + }
55703 +
55704 + return;
55705 +}
55706 +
55707 +int
55708 +gr_check_crash_exec(const struct file *filp)
55709 +{
55710 + struct acl_subject_label *curr;
55711 +
55712 + if (unlikely(!gr_acl_is_enabled()))
55713 + return 0;
55714 +
55715 + read_lock(&gr_inode_lock);
55716 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55717 + __get_dev(filp->f_path.dentry),
55718 + current->role);
55719 + read_unlock(&gr_inode_lock);
55720 +
55721 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55722 + (!curr->crashes && !curr->expires))
55723 + return 0;
55724 +
55725 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55726 + time_after(curr->expires, get_seconds()))
55727 + return 1;
55728 + else if (time_before_eq(curr->expires, get_seconds())) {
55729 + curr->crashes = 0;
55730 + curr->expires = 0;
55731 + }
55732 +
55733 + return 0;
55734 +}
55735 +
55736 +void
55737 +gr_handle_alertkill(struct task_struct *task)
55738 +{
55739 + struct acl_subject_label *curracl;
55740 + __u32 curr_ip;
55741 + struct task_struct *p, *p2;
55742 +
55743 + if (unlikely(!gr_acl_is_enabled()))
55744 + return;
55745 +
55746 + curracl = task->acl;
55747 + curr_ip = task->signal->curr_ip;
55748 +
55749 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55750 + read_lock(&tasklist_lock);
55751 + do_each_thread(p2, p) {
55752 + if (p->signal->curr_ip == curr_ip)
55753 + gr_fake_force_sig(SIGKILL, p);
55754 + } while_each_thread(p2, p);
55755 + read_unlock(&tasklist_lock);
55756 + } else if (curracl->mode & GR_KILLPROC)
55757 + gr_fake_force_sig(SIGKILL, task);
55758 +
55759 + return;
55760 +}
55761 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55762 new file mode 100644
55763 index 0000000..9d83a69
55764 --- /dev/null
55765 +++ b/grsecurity/gracl_shm.c
55766 @@ -0,0 +1,40 @@
55767 +#include <linux/kernel.h>
55768 +#include <linux/mm.h>
55769 +#include <linux/sched.h>
55770 +#include <linux/file.h>
55771 +#include <linux/ipc.h>
55772 +#include <linux/gracl.h>
55773 +#include <linux/grsecurity.h>
55774 +#include <linux/grinternal.h>
55775 +
55776 +int
55777 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55778 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55779 +{
55780 + struct task_struct *task;
55781 +
55782 + if (!gr_acl_is_enabled())
55783 + return 1;
55784 +
55785 + rcu_read_lock();
55786 + read_lock(&tasklist_lock);
55787 +
55788 + task = find_task_by_vpid(shm_cprid);
55789 +
55790 + if (unlikely(!task))
55791 + task = find_task_by_vpid(shm_lapid);
55792 +
55793 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55794 + (task->pid == shm_lapid)) &&
55795 + (task->acl->mode & GR_PROTSHM) &&
55796 + (task->acl != current->acl))) {
55797 + read_unlock(&tasklist_lock);
55798 + rcu_read_unlock();
55799 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55800 + return 0;
55801 + }
55802 + read_unlock(&tasklist_lock);
55803 + rcu_read_unlock();
55804 +
55805 + return 1;
55806 +}
55807 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55808 new file mode 100644
55809 index 0000000..bc0be01
55810 --- /dev/null
55811 +++ b/grsecurity/grsec_chdir.c
55812 @@ -0,0 +1,19 @@
55813 +#include <linux/kernel.h>
55814 +#include <linux/sched.h>
55815 +#include <linux/fs.h>
55816 +#include <linux/file.h>
55817 +#include <linux/grsecurity.h>
55818 +#include <linux/grinternal.h>
55819 +
55820 +void
55821 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55822 +{
55823 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55824 + if ((grsec_enable_chdir && grsec_enable_group &&
55825 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55826 + !grsec_enable_group)) {
55827 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55828 + }
55829 +#endif
55830 + return;
55831 +}
55832 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55833 new file mode 100644
55834 index 0000000..9807ee2
55835 --- /dev/null
55836 +++ b/grsecurity/grsec_chroot.c
55837 @@ -0,0 +1,368 @@
55838 +#include <linux/kernel.h>
55839 +#include <linux/module.h>
55840 +#include <linux/sched.h>
55841 +#include <linux/file.h>
55842 +#include <linux/fs.h>
55843 +#include <linux/mount.h>
55844 +#include <linux/types.h>
55845 +#include "../fs/mount.h"
55846 +#include <linux/grsecurity.h>
55847 +#include <linux/grinternal.h>
55848 +
55849 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55850 +{
55851 +#ifdef CONFIG_GRKERNSEC
55852 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55853 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
55854 + task->gr_is_chrooted = 1;
55855 + else
55856 + task->gr_is_chrooted = 0;
55857 +
55858 + task->gr_chroot_dentry = path->dentry;
55859 +#endif
55860 + return;
55861 +}
55862 +
55863 +void gr_clear_chroot_entries(struct task_struct *task)
55864 +{
55865 +#ifdef CONFIG_GRKERNSEC
55866 + task->gr_is_chrooted = 0;
55867 + task->gr_chroot_dentry = NULL;
55868 +#endif
55869 + return;
55870 +}
55871 +
55872 +int
55873 +gr_handle_chroot_unix(const pid_t pid)
55874 +{
55875 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55876 + struct task_struct *p;
55877 +
55878 + if (unlikely(!grsec_enable_chroot_unix))
55879 + return 1;
55880 +
55881 + if (likely(!proc_is_chrooted(current)))
55882 + return 1;
55883 +
55884 + rcu_read_lock();
55885 + read_lock(&tasklist_lock);
55886 + p = find_task_by_vpid_unrestricted(pid);
55887 + if (unlikely(p && !have_same_root(current, p))) {
55888 + read_unlock(&tasklist_lock);
55889 + rcu_read_unlock();
55890 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
55891 + return 0;
55892 + }
55893 + read_unlock(&tasklist_lock);
55894 + rcu_read_unlock();
55895 +#endif
55896 + return 1;
55897 +}
55898 +
55899 +int
55900 +gr_handle_chroot_nice(void)
55901 +{
55902 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55903 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
55904 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
55905 + return -EPERM;
55906 + }
55907 +#endif
55908 + return 0;
55909 +}
55910 +
55911 +int
55912 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
55913 +{
55914 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55915 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
55916 + && proc_is_chrooted(current)) {
55917 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
55918 + return -EACCES;
55919 + }
55920 +#endif
55921 + return 0;
55922 +}
55923 +
55924 +int
55925 +gr_handle_chroot_rawio(const struct inode *inode)
55926 +{
55927 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55928 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55929 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
55930 + return 1;
55931 +#endif
55932 + return 0;
55933 +}
55934 +
55935 +int
55936 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
55937 +{
55938 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55939 + struct task_struct *p;
55940 + int ret = 0;
55941 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
55942 + return ret;
55943 +
55944 + read_lock(&tasklist_lock);
55945 + do_each_pid_task(pid, type, p) {
55946 + if (!have_same_root(current, p)) {
55947 + ret = 1;
55948 + goto out;
55949 + }
55950 + } while_each_pid_task(pid, type, p);
55951 +out:
55952 + read_unlock(&tasklist_lock);
55953 + return ret;
55954 +#endif
55955 + return 0;
55956 +}
55957 +
55958 +int
55959 +gr_pid_is_chrooted(struct task_struct *p)
55960 +{
55961 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55962 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
55963 + return 0;
55964 +
55965 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
55966 + !have_same_root(current, p)) {
55967 + return 1;
55968 + }
55969 +#endif
55970 + return 0;
55971 +}
55972 +
55973 +EXPORT_SYMBOL(gr_pid_is_chrooted);
55974 +
55975 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
55976 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
55977 +{
55978 + struct path path, currentroot;
55979 + int ret = 0;
55980 +
55981 + path.dentry = (struct dentry *)u_dentry;
55982 + path.mnt = (struct vfsmount *)u_mnt;
55983 + get_fs_root(current->fs, &currentroot);
55984 + if (path_is_under(&path, &currentroot))
55985 + ret = 1;
55986 + path_put(&currentroot);
55987 +
55988 + return ret;
55989 +}
55990 +#endif
55991 +
55992 +int
55993 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
55994 +{
55995 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55996 + if (!grsec_enable_chroot_fchdir)
55997 + return 1;
55998 +
55999 + if (!proc_is_chrooted(current))
56000 + return 1;
56001 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56002 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56003 + return 0;
56004 + }
56005 +#endif
56006 + return 1;
56007 +}
56008 +
56009 +int
56010 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56011 + const time_t shm_createtime)
56012 +{
56013 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56014 + struct task_struct *p;
56015 + time_t starttime;
56016 +
56017 + if (unlikely(!grsec_enable_chroot_shmat))
56018 + return 1;
56019 +
56020 + if (likely(!proc_is_chrooted(current)))
56021 + return 1;
56022 +
56023 + rcu_read_lock();
56024 + read_lock(&tasklist_lock);
56025 +
56026 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56027 + starttime = p->start_time.tv_sec;
56028 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56029 + if (have_same_root(current, p)) {
56030 + goto allow;
56031 + } else {
56032 + read_unlock(&tasklist_lock);
56033 + rcu_read_unlock();
56034 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56035 + return 0;
56036 + }
56037 + }
56038 + /* creator exited, pid reuse, fall through to next check */
56039 + }
56040 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56041 + if (unlikely(!have_same_root(current, p))) {
56042 + read_unlock(&tasklist_lock);
56043 + rcu_read_unlock();
56044 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56045 + return 0;
56046 + }
56047 + }
56048 +
56049 +allow:
56050 + read_unlock(&tasklist_lock);
56051 + rcu_read_unlock();
56052 +#endif
56053 + return 1;
56054 +}
56055 +
56056 +void
56057 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56058 +{
56059 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56060 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56061 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56062 +#endif
56063 + return;
56064 +}
56065 +
56066 +int
56067 +gr_handle_chroot_mknod(const struct dentry *dentry,
56068 + const struct vfsmount *mnt, const int mode)
56069 +{
56070 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56071 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56072 + proc_is_chrooted(current)) {
56073 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56074 + return -EPERM;
56075 + }
56076 +#endif
56077 + return 0;
56078 +}
56079 +
56080 +int
56081 +gr_handle_chroot_mount(const struct dentry *dentry,
56082 + const struct vfsmount *mnt, const char *dev_name)
56083 +{
56084 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56085 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56086 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56087 + return -EPERM;
56088 + }
56089 +#endif
56090 + return 0;
56091 +}
56092 +
56093 +int
56094 +gr_handle_chroot_pivot(void)
56095 +{
56096 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56097 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56098 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56099 + return -EPERM;
56100 + }
56101 +#endif
56102 + return 0;
56103 +}
56104 +
56105 +int
56106 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56107 +{
56108 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56109 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56110 + !gr_is_outside_chroot(dentry, mnt)) {
56111 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56112 + return -EPERM;
56113 + }
56114 +#endif
56115 + return 0;
56116 +}
56117 +
56118 +extern const char *captab_log[];
56119 +extern int captab_log_entries;
56120 +
56121 +int
56122 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56123 +{
56124 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56125 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56126 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56127 + if (cap_raised(chroot_caps, cap)) {
56128 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56129 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56130 + }
56131 + return 0;
56132 + }
56133 + }
56134 +#endif
56135 + return 1;
56136 +}
56137 +
56138 +int
56139 +gr_chroot_is_capable(const int cap)
56140 +{
56141 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56142 + return gr_task_chroot_is_capable(current, current_cred(), cap);
56143 +#endif
56144 + return 1;
56145 +}
56146 +
56147 +int
56148 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56149 +{
56150 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56151 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56152 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56153 + if (cap_raised(chroot_caps, cap)) {
56154 + return 0;
56155 + }
56156 + }
56157 +#endif
56158 + return 1;
56159 +}
56160 +
56161 +int
56162 +gr_chroot_is_capable_nolog(const int cap)
56163 +{
56164 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56165 + return gr_task_chroot_is_capable_nolog(current, cap);
56166 +#endif
56167 + return 1;
56168 +}
56169 +
56170 +int
56171 +gr_handle_chroot_sysctl(const int op)
56172 +{
56173 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56174 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56175 + proc_is_chrooted(current))
56176 + return -EACCES;
56177 +#endif
56178 + return 0;
56179 +}
56180 +
56181 +void
56182 +gr_handle_chroot_chdir(struct path *path)
56183 +{
56184 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56185 + if (grsec_enable_chroot_chdir)
56186 + set_fs_pwd(current->fs, path);
56187 +#endif
56188 + return;
56189 +}
56190 +
56191 +int
56192 +gr_handle_chroot_chmod(const struct dentry *dentry,
56193 + const struct vfsmount *mnt, const int mode)
56194 +{
56195 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56196 + /* allow chmod +s on directories, but not files */
56197 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56198 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56199 + proc_is_chrooted(current)) {
56200 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56201 + return -EPERM;
56202 + }
56203 +#endif
56204 + return 0;
56205 +}
56206 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56207 new file mode 100644
56208 index 0000000..213ad8b
56209 --- /dev/null
56210 +++ b/grsecurity/grsec_disabled.c
56211 @@ -0,0 +1,437 @@
56212 +#include <linux/kernel.h>
56213 +#include <linux/module.h>
56214 +#include <linux/sched.h>
56215 +#include <linux/file.h>
56216 +#include <linux/fs.h>
56217 +#include <linux/kdev_t.h>
56218 +#include <linux/net.h>
56219 +#include <linux/in.h>
56220 +#include <linux/ip.h>
56221 +#include <linux/skbuff.h>
56222 +#include <linux/sysctl.h>
56223 +
56224 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56225 +void
56226 +pax_set_initial_flags(struct linux_binprm *bprm)
56227 +{
56228 + return;
56229 +}
56230 +#endif
56231 +
56232 +#ifdef CONFIG_SYSCTL
56233 +__u32
56234 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56235 +{
56236 + return 0;
56237 +}
56238 +#endif
56239 +
56240 +#ifdef CONFIG_TASKSTATS
56241 +int gr_is_taskstats_denied(int pid)
56242 +{
56243 + return 0;
56244 +}
56245 +#endif
56246 +
56247 +int
56248 +gr_acl_is_enabled(void)
56249 +{
56250 + return 0;
56251 +}
56252 +
56253 +void
56254 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56255 +{
56256 + return;
56257 +}
56258 +
56259 +int
56260 +gr_handle_rawio(const struct inode *inode)
56261 +{
56262 + return 0;
56263 +}
56264 +
56265 +void
56266 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56267 +{
56268 + return;
56269 +}
56270 +
56271 +int
56272 +gr_handle_ptrace(struct task_struct *task, const long request)
56273 +{
56274 + return 0;
56275 +}
56276 +
56277 +int
56278 +gr_handle_proc_ptrace(struct task_struct *task)
56279 +{
56280 + return 0;
56281 +}
56282 +
56283 +void
56284 +gr_learn_resource(const struct task_struct *task,
56285 + const int res, const unsigned long wanted, const int gt)
56286 +{
56287 + return;
56288 +}
56289 +
56290 +int
56291 +gr_set_acls(const int type)
56292 +{
56293 + return 0;
56294 +}
56295 +
56296 +int
56297 +gr_check_hidden_task(const struct task_struct *tsk)
56298 +{
56299 + return 0;
56300 +}
56301 +
56302 +int
56303 +gr_check_protected_task(const struct task_struct *task)
56304 +{
56305 + return 0;
56306 +}
56307 +
56308 +int
56309 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56310 +{
56311 + return 0;
56312 +}
56313 +
56314 +void
56315 +gr_copy_label(struct task_struct *tsk)
56316 +{
56317 + return;
56318 +}
56319 +
56320 +void
56321 +gr_set_pax_flags(struct task_struct *task)
56322 +{
56323 + return;
56324 +}
56325 +
56326 +int
56327 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56328 + const int unsafe_share)
56329 +{
56330 + return 0;
56331 +}
56332 +
56333 +void
56334 +gr_handle_delete(const ino_t ino, const dev_t dev)
56335 +{
56336 + return;
56337 +}
56338 +
56339 +void
56340 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56341 +{
56342 + return;
56343 +}
56344 +
56345 +void
56346 +gr_handle_crash(struct task_struct *task, const int sig)
56347 +{
56348 + return;
56349 +}
56350 +
56351 +int
56352 +gr_check_crash_exec(const struct file *filp)
56353 +{
56354 + return 0;
56355 +}
56356 +
56357 +int
56358 +gr_check_crash_uid(const uid_t uid)
56359 +{
56360 + return 0;
56361 +}
56362 +
56363 +void
56364 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56365 + struct dentry *old_dentry,
56366 + struct dentry *new_dentry,
56367 + struct vfsmount *mnt, const __u8 replace)
56368 +{
56369 + return;
56370 +}
56371 +
56372 +int
56373 +gr_search_socket(const int family, const int type, const int protocol)
56374 +{
56375 + return 1;
56376 +}
56377 +
56378 +int
56379 +gr_search_connectbind(const int mode, const struct socket *sock,
56380 + const struct sockaddr_in *addr)
56381 +{
56382 + return 0;
56383 +}
56384 +
56385 +void
56386 +gr_handle_alertkill(struct task_struct *task)
56387 +{
56388 + return;
56389 +}
56390 +
56391 +__u32
56392 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56393 +{
56394 + return 1;
56395 +}
56396 +
56397 +__u32
56398 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56399 + const struct vfsmount * mnt)
56400 +{
56401 + return 1;
56402 +}
56403 +
56404 +__u32
56405 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56406 + int acc_mode)
56407 +{
56408 + return 1;
56409 +}
56410 +
56411 +__u32
56412 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56413 +{
56414 + return 1;
56415 +}
56416 +
56417 +__u32
56418 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56419 +{
56420 + return 1;
56421 +}
56422 +
56423 +int
56424 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56425 + unsigned int *vm_flags)
56426 +{
56427 + return 1;
56428 +}
56429 +
56430 +__u32
56431 +gr_acl_handle_truncate(const struct dentry * dentry,
56432 + const struct vfsmount * mnt)
56433 +{
56434 + return 1;
56435 +}
56436 +
56437 +__u32
56438 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56439 +{
56440 + return 1;
56441 +}
56442 +
56443 +__u32
56444 +gr_acl_handle_access(const struct dentry * dentry,
56445 + const struct vfsmount * mnt, const int fmode)
56446 +{
56447 + return 1;
56448 +}
56449 +
56450 +__u32
56451 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56452 + umode_t *mode)
56453 +{
56454 + return 1;
56455 +}
56456 +
56457 +__u32
56458 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56459 +{
56460 + return 1;
56461 +}
56462 +
56463 +__u32
56464 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56465 +{
56466 + return 1;
56467 +}
56468 +
56469 +void
56470 +grsecurity_init(void)
56471 +{
56472 + return;
56473 +}
56474 +
56475 +umode_t gr_acl_umask(void)
56476 +{
56477 + return 0;
56478 +}
56479 +
56480 +__u32
56481 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56482 + const struct dentry * parent_dentry,
56483 + const struct vfsmount * parent_mnt,
56484 + const int mode)
56485 +{
56486 + return 1;
56487 +}
56488 +
56489 +__u32
56490 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
56491 + const struct dentry * parent_dentry,
56492 + const struct vfsmount * parent_mnt)
56493 +{
56494 + return 1;
56495 +}
56496 +
56497 +__u32
56498 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56499 + const struct dentry * parent_dentry,
56500 + const struct vfsmount * parent_mnt, const char *from)
56501 +{
56502 + return 1;
56503 +}
56504 +
56505 +__u32
56506 +gr_acl_handle_link(const struct dentry * new_dentry,
56507 + const struct dentry * parent_dentry,
56508 + const struct vfsmount * parent_mnt,
56509 + const struct dentry * old_dentry,
56510 + const struct vfsmount * old_mnt, const char *to)
56511 +{
56512 + return 1;
56513 +}
56514 +
56515 +int
56516 +gr_acl_handle_rename(const struct dentry *new_dentry,
56517 + const struct dentry *parent_dentry,
56518 + const struct vfsmount *parent_mnt,
56519 + const struct dentry *old_dentry,
56520 + const struct inode *old_parent_inode,
56521 + const struct vfsmount *old_mnt, const char *newname)
56522 +{
56523 + return 0;
56524 +}
56525 +
56526 +int
56527 +gr_acl_handle_filldir(const struct file *file, const char *name,
56528 + const int namelen, const ino_t ino)
56529 +{
56530 + return 1;
56531 +}
56532 +
56533 +int
56534 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56535 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56536 +{
56537 + return 1;
56538 +}
56539 +
56540 +int
56541 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56542 +{
56543 + return 0;
56544 +}
56545 +
56546 +int
56547 +gr_search_accept(const struct socket *sock)
56548 +{
56549 + return 0;
56550 +}
56551 +
56552 +int
56553 +gr_search_listen(const struct socket *sock)
56554 +{
56555 + return 0;
56556 +}
56557 +
56558 +int
56559 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56560 +{
56561 + return 0;
56562 +}
56563 +
56564 +__u32
56565 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56566 +{
56567 + return 1;
56568 +}
56569 +
56570 +__u32
56571 +gr_acl_handle_creat(const struct dentry * dentry,
56572 + const struct dentry * p_dentry,
56573 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56574 + const int imode)
56575 +{
56576 + return 1;
56577 +}
56578 +
56579 +void
56580 +gr_acl_handle_exit(void)
56581 +{
56582 + return;
56583 +}
56584 +
56585 +int
56586 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56587 +{
56588 + return 1;
56589 +}
56590 +
56591 +void
56592 +gr_set_role_label(const uid_t uid, const gid_t gid)
56593 +{
56594 + return;
56595 +}
56596 +
56597 +int
56598 +gr_acl_handle_procpidmem(const struct task_struct *task)
56599 +{
56600 + return 0;
56601 +}
56602 +
56603 +int
56604 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56605 +{
56606 + return 0;
56607 +}
56608 +
56609 +int
56610 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56611 +{
56612 + return 0;
56613 +}
56614 +
56615 +void
56616 +gr_set_kernel_label(struct task_struct *task)
56617 +{
56618 + return;
56619 +}
56620 +
56621 +int
56622 +gr_check_user_change(int real, int effective, int fs)
56623 +{
56624 + return 0;
56625 +}
56626 +
56627 +int
56628 +gr_check_group_change(int real, int effective, int fs)
56629 +{
56630 + return 0;
56631 +}
56632 +
56633 +int gr_acl_enable_at_secure(void)
56634 +{
56635 + return 0;
56636 +}
56637 +
56638 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56639 +{
56640 + return dentry->d_inode->i_sb->s_dev;
56641 +}
56642 +
56643 +EXPORT_SYMBOL(gr_learn_resource);
56644 +EXPORT_SYMBOL(gr_set_kernel_label);
56645 +#ifdef CONFIG_SECURITY
56646 +EXPORT_SYMBOL(gr_check_user_change);
56647 +EXPORT_SYMBOL(gr_check_group_change);
56648 +#endif
56649 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56650 new file mode 100644
56651 index 0000000..abfa971
56652 --- /dev/null
56653 +++ b/grsecurity/grsec_exec.c
56654 @@ -0,0 +1,174 @@
56655 +#include <linux/kernel.h>
56656 +#include <linux/sched.h>
56657 +#include <linux/file.h>
56658 +#include <linux/binfmts.h>
56659 +#include <linux/fs.h>
56660 +#include <linux/types.h>
56661 +#include <linux/grdefs.h>
56662 +#include <linux/grsecurity.h>
56663 +#include <linux/grinternal.h>
56664 +#include <linux/capability.h>
56665 +#include <linux/module.h>
56666 +
56667 +#include <asm/uaccess.h>
56668 +
56669 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56670 +static char gr_exec_arg_buf[132];
56671 +static DEFINE_MUTEX(gr_exec_arg_mutex);
56672 +#endif
56673 +
56674 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56675 +
56676 +void
56677 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56678 +{
56679 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56680 + char *grarg = gr_exec_arg_buf;
56681 + unsigned int i, x, execlen = 0;
56682 + char c;
56683 +
56684 + if (!((grsec_enable_execlog && grsec_enable_group &&
56685 + in_group_p(grsec_audit_gid))
56686 + || (grsec_enable_execlog && !grsec_enable_group)))
56687 + return;
56688 +
56689 + mutex_lock(&gr_exec_arg_mutex);
56690 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
56691 +
56692 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
56693 + const char __user *p;
56694 + unsigned int len;
56695 +
56696 + p = get_user_arg_ptr(argv, i);
56697 + if (IS_ERR(p))
56698 + goto log;
56699 +
56700 + len = strnlen_user(p, 128 - execlen);
56701 + if (len > 128 - execlen)
56702 + len = 128 - execlen;
56703 + else if (len > 0)
56704 + len--;
56705 + if (copy_from_user(grarg + execlen, p, len))
56706 + goto log;
56707 +
56708 + /* rewrite unprintable characters */
56709 + for (x = 0; x < len; x++) {
56710 + c = *(grarg + execlen + x);
56711 + if (c < 32 || c > 126)
56712 + *(grarg + execlen + x) = ' ';
56713 + }
56714 +
56715 + execlen += len;
56716 + *(grarg + execlen) = ' ';
56717 + *(grarg + execlen + 1) = '\0';
56718 + execlen++;
56719 + }
56720 +
56721 + log:
56722 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56723 + bprm->file->f_path.mnt, grarg);
56724 + mutex_unlock(&gr_exec_arg_mutex);
56725 +#endif
56726 + return;
56727 +}
56728 +
56729 +#ifdef CONFIG_GRKERNSEC
56730 +extern int gr_acl_is_capable(const int cap);
56731 +extern int gr_acl_is_capable_nolog(const int cap);
56732 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56733 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
56734 +extern int gr_chroot_is_capable(const int cap);
56735 +extern int gr_chroot_is_capable_nolog(const int cap);
56736 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56737 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
56738 +#endif
56739 +
56740 +const char *captab_log[] = {
56741 + "CAP_CHOWN",
56742 + "CAP_DAC_OVERRIDE",
56743 + "CAP_DAC_READ_SEARCH",
56744 + "CAP_FOWNER",
56745 + "CAP_FSETID",
56746 + "CAP_KILL",
56747 + "CAP_SETGID",
56748 + "CAP_SETUID",
56749 + "CAP_SETPCAP",
56750 + "CAP_LINUX_IMMUTABLE",
56751 + "CAP_NET_BIND_SERVICE",
56752 + "CAP_NET_BROADCAST",
56753 + "CAP_NET_ADMIN",
56754 + "CAP_NET_RAW",
56755 + "CAP_IPC_LOCK",
56756 + "CAP_IPC_OWNER",
56757 + "CAP_SYS_MODULE",
56758 + "CAP_SYS_RAWIO",
56759 + "CAP_SYS_CHROOT",
56760 + "CAP_SYS_PTRACE",
56761 + "CAP_SYS_PACCT",
56762 + "CAP_SYS_ADMIN",
56763 + "CAP_SYS_BOOT",
56764 + "CAP_SYS_NICE",
56765 + "CAP_SYS_RESOURCE",
56766 + "CAP_SYS_TIME",
56767 + "CAP_SYS_TTY_CONFIG",
56768 + "CAP_MKNOD",
56769 + "CAP_LEASE",
56770 + "CAP_AUDIT_WRITE",
56771 + "CAP_AUDIT_CONTROL",
56772 + "CAP_SETFCAP",
56773 + "CAP_MAC_OVERRIDE",
56774 + "CAP_MAC_ADMIN",
56775 + "CAP_SYSLOG",
56776 + "CAP_WAKE_ALARM"
56777 +};
56778 +
56779 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56780 +
56781 +int gr_is_capable(const int cap)
56782 +{
56783 +#ifdef CONFIG_GRKERNSEC
56784 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56785 + return 1;
56786 + return 0;
56787 +#else
56788 + return 1;
56789 +#endif
56790 +}
56791 +
56792 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56793 +{
56794 +#ifdef CONFIG_GRKERNSEC
56795 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
56796 + return 1;
56797 + return 0;
56798 +#else
56799 + return 1;
56800 +#endif
56801 +}
56802 +
56803 +int gr_is_capable_nolog(const int cap)
56804 +{
56805 +#ifdef CONFIG_GRKERNSEC
56806 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56807 + return 1;
56808 + return 0;
56809 +#else
56810 + return 1;
56811 +#endif
56812 +}
56813 +
56814 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
56815 +{
56816 +#ifdef CONFIG_GRKERNSEC
56817 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
56818 + return 1;
56819 + return 0;
56820 +#else
56821 + return 1;
56822 +#endif
56823 +}
56824 +
56825 +EXPORT_SYMBOL(gr_is_capable);
56826 +EXPORT_SYMBOL(gr_is_capable_nolog);
56827 +EXPORT_SYMBOL(gr_task_is_capable);
56828 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
56829 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56830 new file mode 100644
56831 index 0000000..d3ee748
56832 --- /dev/null
56833 +++ b/grsecurity/grsec_fifo.c
56834 @@ -0,0 +1,24 @@
56835 +#include <linux/kernel.h>
56836 +#include <linux/sched.h>
56837 +#include <linux/fs.h>
56838 +#include <linux/file.h>
56839 +#include <linux/grinternal.h>
56840 +
56841 +int
56842 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56843 + const struct dentry *dir, const int flag, const int acc_mode)
56844 +{
56845 +#ifdef CONFIG_GRKERNSEC_FIFO
56846 + const struct cred *cred = current_cred();
56847 +
56848 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56849 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56850 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56851 + (cred->fsuid != dentry->d_inode->i_uid)) {
56852 + if (!inode_permission(dentry->d_inode, acc_mode))
56853 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56854 + return -EACCES;
56855 + }
56856 +#endif
56857 + return 0;
56858 +}
56859 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56860 new file mode 100644
56861 index 0000000..8ca18bf
56862 --- /dev/null
56863 +++ b/grsecurity/grsec_fork.c
56864 @@ -0,0 +1,23 @@
56865 +#include <linux/kernel.h>
56866 +#include <linux/sched.h>
56867 +#include <linux/grsecurity.h>
56868 +#include <linux/grinternal.h>
56869 +#include <linux/errno.h>
56870 +
56871 +void
56872 +gr_log_forkfail(const int retval)
56873 +{
56874 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56875 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56876 + switch (retval) {
56877 + case -EAGAIN:
56878 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56879 + break;
56880 + case -ENOMEM:
56881 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56882 + break;
56883 + }
56884 + }
56885 +#endif
56886 + return;
56887 +}
56888 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
56889 new file mode 100644
56890 index 0000000..01ddde4
56891 --- /dev/null
56892 +++ b/grsecurity/grsec_init.c
56893 @@ -0,0 +1,277 @@
56894 +#include <linux/kernel.h>
56895 +#include <linux/sched.h>
56896 +#include <linux/mm.h>
56897 +#include <linux/gracl.h>
56898 +#include <linux/slab.h>
56899 +#include <linux/vmalloc.h>
56900 +#include <linux/percpu.h>
56901 +#include <linux/module.h>
56902 +
56903 +int grsec_enable_ptrace_readexec;
56904 +int grsec_enable_setxid;
56905 +int grsec_enable_brute;
56906 +int grsec_enable_link;
56907 +int grsec_enable_dmesg;
56908 +int grsec_enable_harden_ptrace;
56909 +int grsec_enable_fifo;
56910 +int grsec_enable_execlog;
56911 +int grsec_enable_signal;
56912 +int grsec_enable_forkfail;
56913 +int grsec_enable_audit_ptrace;
56914 +int grsec_enable_time;
56915 +int grsec_enable_audit_textrel;
56916 +int grsec_enable_group;
56917 +int grsec_audit_gid;
56918 +int grsec_enable_chdir;
56919 +int grsec_enable_mount;
56920 +int grsec_enable_rofs;
56921 +int grsec_enable_chroot_findtask;
56922 +int grsec_enable_chroot_mount;
56923 +int grsec_enable_chroot_shmat;
56924 +int grsec_enable_chroot_fchdir;
56925 +int grsec_enable_chroot_double;
56926 +int grsec_enable_chroot_pivot;
56927 +int grsec_enable_chroot_chdir;
56928 +int grsec_enable_chroot_chmod;
56929 +int grsec_enable_chroot_mknod;
56930 +int grsec_enable_chroot_nice;
56931 +int grsec_enable_chroot_execlog;
56932 +int grsec_enable_chroot_caps;
56933 +int grsec_enable_chroot_sysctl;
56934 +int grsec_enable_chroot_unix;
56935 +int grsec_enable_tpe;
56936 +int grsec_tpe_gid;
56937 +int grsec_enable_blackhole;
56938 +#ifdef CONFIG_IPV6_MODULE
56939 +EXPORT_SYMBOL(grsec_enable_blackhole);
56940 +#endif
56941 +int grsec_lastack_retries;
56942 +int grsec_enable_tpe_all;
56943 +int grsec_enable_tpe_invert;
56944 +int grsec_enable_socket_all;
56945 +int grsec_socket_all_gid;
56946 +int grsec_enable_socket_client;
56947 +int grsec_socket_client_gid;
56948 +int grsec_enable_socket_server;
56949 +int grsec_socket_server_gid;
56950 +int grsec_resource_logging;
56951 +int grsec_disable_privio;
56952 +int grsec_enable_log_rwxmaps;
56953 +int grsec_lock;
56954 +
56955 +DEFINE_SPINLOCK(grsec_alert_lock);
56956 +unsigned long grsec_alert_wtime = 0;
56957 +unsigned long grsec_alert_fyet = 0;
56958 +
56959 +DEFINE_SPINLOCK(grsec_audit_lock);
56960 +
56961 +DEFINE_RWLOCK(grsec_exec_file_lock);
56962 +
56963 +char *gr_shared_page[4];
56964 +
56965 +char *gr_alert_log_fmt;
56966 +char *gr_audit_log_fmt;
56967 +char *gr_alert_log_buf;
56968 +char *gr_audit_log_buf;
56969 +
56970 +extern struct gr_arg *gr_usermode;
56971 +extern unsigned char *gr_system_salt;
56972 +extern unsigned char *gr_system_sum;
56973 +
56974 +void __init
56975 +grsecurity_init(void)
56976 +{
56977 + int j;
56978 + /* create the per-cpu shared pages */
56979 +
56980 +#ifdef CONFIG_X86
56981 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
56982 +#endif
56983 +
56984 + for (j = 0; j < 4; j++) {
56985 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
56986 + if (gr_shared_page[j] == NULL) {
56987 + panic("Unable to allocate grsecurity shared page");
56988 + return;
56989 + }
56990 + }
56991 +
56992 + /* allocate log buffers */
56993 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
56994 + if (!gr_alert_log_fmt) {
56995 + panic("Unable to allocate grsecurity alert log format buffer");
56996 + return;
56997 + }
56998 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
56999 + if (!gr_audit_log_fmt) {
57000 + panic("Unable to allocate grsecurity audit log format buffer");
57001 + return;
57002 + }
57003 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57004 + if (!gr_alert_log_buf) {
57005 + panic("Unable to allocate grsecurity alert log buffer");
57006 + return;
57007 + }
57008 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57009 + if (!gr_audit_log_buf) {
57010 + panic("Unable to allocate grsecurity audit log buffer");
57011 + return;
57012 + }
57013 +
57014 + /* allocate memory for authentication structure */
57015 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57016 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57017 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57018 +
57019 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57020 + panic("Unable to allocate grsecurity authentication structure");
57021 + return;
57022 + }
57023 +
57024 +
57025 +#ifdef CONFIG_GRKERNSEC_IO
57026 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57027 + grsec_disable_privio = 1;
57028 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57029 + grsec_disable_privio = 1;
57030 +#else
57031 + grsec_disable_privio = 0;
57032 +#endif
57033 +#endif
57034 +
57035 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57036 + /* for backward compatibility, tpe_invert always defaults to on if
57037 + enabled in the kernel
57038 + */
57039 + grsec_enable_tpe_invert = 1;
57040 +#endif
57041 +
57042 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57043 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57044 + grsec_lock = 1;
57045 +#endif
57046 +
57047 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57048 + grsec_enable_audit_textrel = 1;
57049 +#endif
57050 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57051 + grsec_enable_log_rwxmaps = 1;
57052 +#endif
57053 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57054 + grsec_enable_group = 1;
57055 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57056 +#endif
57057 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57058 + grsec_enable_ptrace_readexec = 1;
57059 +#endif
57060 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57061 + grsec_enable_chdir = 1;
57062 +#endif
57063 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57064 + grsec_enable_harden_ptrace = 1;
57065 +#endif
57066 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57067 + grsec_enable_mount = 1;
57068 +#endif
57069 +#ifdef CONFIG_GRKERNSEC_LINK
57070 + grsec_enable_link = 1;
57071 +#endif
57072 +#ifdef CONFIG_GRKERNSEC_BRUTE
57073 + grsec_enable_brute = 1;
57074 +#endif
57075 +#ifdef CONFIG_GRKERNSEC_DMESG
57076 + grsec_enable_dmesg = 1;
57077 +#endif
57078 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57079 + grsec_enable_blackhole = 1;
57080 + grsec_lastack_retries = 4;
57081 +#endif
57082 +#ifdef CONFIG_GRKERNSEC_FIFO
57083 + grsec_enable_fifo = 1;
57084 +#endif
57085 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57086 + grsec_enable_execlog = 1;
57087 +#endif
57088 +#ifdef CONFIG_GRKERNSEC_SETXID
57089 + grsec_enable_setxid = 1;
57090 +#endif
57091 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57092 + grsec_enable_signal = 1;
57093 +#endif
57094 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57095 + grsec_enable_forkfail = 1;
57096 +#endif
57097 +#ifdef CONFIG_GRKERNSEC_TIME
57098 + grsec_enable_time = 1;
57099 +#endif
57100 +#ifdef CONFIG_GRKERNSEC_RESLOG
57101 + grsec_resource_logging = 1;
57102 +#endif
57103 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57104 + grsec_enable_chroot_findtask = 1;
57105 +#endif
57106 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57107 + grsec_enable_chroot_unix = 1;
57108 +#endif
57109 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57110 + grsec_enable_chroot_mount = 1;
57111 +#endif
57112 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57113 + grsec_enable_chroot_fchdir = 1;
57114 +#endif
57115 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57116 + grsec_enable_chroot_shmat = 1;
57117 +#endif
57118 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57119 + grsec_enable_audit_ptrace = 1;
57120 +#endif
57121 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57122 + grsec_enable_chroot_double = 1;
57123 +#endif
57124 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57125 + grsec_enable_chroot_pivot = 1;
57126 +#endif
57127 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57128 + grsec_enable_chroot_chdir = 1;
57129 +#endif
57130 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57131 + grsec_enable_chroot_chmod = 1;
57132 +#endif
57133 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57134 + grsec_enable_chroot_mknod = 1;
57135 +#endif
57136 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57137 + grsec_enable_chroot_nice = 1;
57138 +#endif
57139 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57140 + grsec_enable_chroot_execlog = 1;
57141 +#endif
57142 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57143 + grsec_enable_chroot_caps = 1;
57144 +#endif
57145 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57146 + grsec_enable_chroot_sysctl = 1;
57147 +#endif
57148 +#ifdef CONFIG_GRKERNSEC_TPE
57149 + grsec_enable_tpe = 1;
57150 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57151 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57152 + grsec_enable_tpe_all = 1;
57153 +#endif
57154 +#endif
57155 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57156 + grsec_enable_socket_all = 1;
57157 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57158 +#endif
57159 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57160 + grsec_enable_socket_client = 1;
57161 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57162 +#endif
57163 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57164 + grsec_enable_socket_server = 1;
57165 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57166 +#endif
57167 +#endif
57168 +
57169 + return;
57170 +}
57171 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57172 new file mode 100644
57173 index 0000000..3efe141
57174 --- /dev/null
57175 +++ b/grsecurity/grsec_link.c
57176 @@ -0,0 +1,43 @@
57177 +#include <linux/kernel.h>
57178 +#include <linux/sched.h>
57179 +#include <linux/fs.h>
57180 +#include <linux/file.h>
57181 +#include <linux/grinternal.h>
57182 +
57183 +int
57184 +gr_handle_follow_link(const struct inode *parent,
57185 + const struct inode *inode,
57186 + const struct dentry *dentry, const struct vfsmount *mnt)
57187 +{
57188 +#ifdef CONFIG_GRKERNSEC_LINK
57189 + const struct cred *cred = current_cred();
57190 +
57191 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57192 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57193 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57194 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57195 + return -EACCES;
57196 + }
57197 +#endif
57198 + return 0;
57199 +}
57200 +
57201 +int
57202 +gr_handle_hardlink(const struct dentry *dentry,
57203 + const struct vfsmount *mnt,
57204 + struct inode *inode, const int mode, const char *to)
57205 +{
57206 +#ifdef CONFIG_GRKERNSEC_LINK
57207 + const struct cred *cred = current_cred();
57208 +
57209 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57210 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57211 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57212 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57213 + !capable(CAP_FOWNER) && cred->uid) {
57214 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57215 + return -EPERM;
57216 + }
57217 +#endif
57218 + return 0;
57219 +}
57220 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57221 new file mode 100644
57222 index 0000000..a45d2e9
57223 --- /dev/null
57224 +++ b/grsecurity/grsec_log.c
57225 @@ -0,0 +1,322 @@
57226 +#include <linux/kernel.h>
57227 +#include <linux/sched.h>
57228 +#include <linux/file.h>
57229 +#include <linux/tty.h>
57230 +#include <linux/fs.h>
57231 +#include <linux/grinternal.h>
57232 +
57233 +#ifdef CONFIG_TREE_PREEMPT_RCU
57234 +#define DISABLE_PREEMPT() preempt_disable()
57235 +#define ENABLE_PREEMPT() preempt_enable()
57236 +#else
57237 +#define DISABLE_PREEMPT()
57238 +#define ENABLE_PREEMPT()
57239 +#endif
57240 +
57241 +#define BEGIN_LOCKS(x) \
57242 + DISABLE_PREEMPT(); \
57243 + rcu_read_lock(); \
57244 + read_lock(&tasklist_lock); \
57245 + read_lock(&grsec_exec_file_lock); \
57246 + if (x != GR_DO_AUDIT) \
57247 + spin_lock(&grsec_alert_lock); \
57248 + else \
57249 + spin_lock(&grsec_audit_lock)
57250 +
57251 +#define END_LOCKS(x) \
57252 + if (x != GR_DO_AUDIT) \
57253 + spin_unlock(&grsec_alert_lock); \
57254 + else \
57255 + spin_unlock(&grsec_audit_lock); \
57256 + read_unlock(&grsec_exec_file_lock); \
57257 + read_unlock(&tasklist_lock); \
57258 + rcu_read_unlock(); \
57259 + ENABLE_PREEMPT(); \
57260 + if (x == GR_DONT_AUDIT) \
57261 + gr_handle_alertkill(current)
57262 +
57263 +enum {
57264 + FLOODING,
57265 + NO_FLOODING
57266 +};
57267 +
57268 +extern char *gr_alert_log_fmt;
57269 +extern char *gr_audit_log_fmt;
57270 +extern char *gr_alert_log_buf;
57271 +extern char *gr_audit_log_buf;
57272 +
57273 +static int gr_log_start(int audit)
57274 +{
57275 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57276 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57277 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57278 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57279 + unsigned long curr_secs = get_seconds();
57280 +
57281 + if (audit == GR_DO_AUDIT)
57282 + goto set_fmt;
57283 +
57284 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57285 + grsec_alert_wtime = curr_secs;
57286 + grsec_alert_fyet = 0;
57287 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57288 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57289 + grsec_alert_fyet++;
57290 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57291 + grsec_alert_wtime = curr_secs;
57292 + grsec_alert_fyet++;
57293 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57294 + return FLOODING;
57295 + }
57296 + else return FLOODING;
57297 +
57298 +set_fmt:
57299 +#endif
57300 + memset(buf, 0, PAGE_SIZE);
57301 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57302 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57303 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57304 + } else if (current->signal->curr_ip) {
57305 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57306 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57307 + } else if (gr_acl_is_enabled()) {
57308 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57309 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57310 + } else {
57311 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
57312 + strcpy(buf, fmt);
57313 + }
57314 +
57315 + return NO_FLOODING;
57316 +}
57317 +
57318 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57319 + __attribute__ ((format (printf, 2, 0)));
57320 +
57321 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57322 +{
57323 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57324 + unsigned int len = strlen(buf);
57325 +
57326 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57327 +
57328 + return;
57329 +}
57330 +
57331 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57332 + __attribute__ ((format (printf, 2, 3)));
57333 +
57334 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57335 +{
57336 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57337 + unsigned int len = strlen(buf);
57338 + va_list ap;
57339 +
57340 + va_start(ap, msg);
57341 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57342 + va_end(ap);
57343 +
57344 + return;
57345 +}
57346 +
57347 +static void gr_log_end(int audit, int append_default)
57348 +{
57349 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57350 +
57351 + if (append_default) {
57352 + unsigned int len = strlen(buf);
57353 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57354 + }
57355 +
57356 + printk("%s\n", buf);
57357 +
57358 + return;
57359 +}
57360 +
57361 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57362 +{
57363 + int logtype;
57364 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57365 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57366 + void *voidptr = NULL;
57367 + int num1 = 0, num2 = 0;
57368 + unsigned long ulong1 = 0, ulong2 = 0;
57369 + struct dentry *dentry = NULL;
57370 + struct vfsmount *mnt = NULL;
57371 + struct file *file = NULL;
57372 + struct task_struct *task = NULL;
57373 + const struct cred *cred, *pcred;
57374 + va_list ap;
57375 +
57376 + BEGIN_LOCKS(audit);
57377 + logtype = gr_log_start(audit);
57378 + if (logtype == FLOODING) {
57379 + END_LOCKS(audit);
57380 + return;
57381 + }
57382 + va_start(ap, argtypes);
57383 + switch (argtypes) {
57384 + case GR_TTYSNIFF:
57385 + task = va_arg(ap, struct task_struct *);
57386 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57387 + break;
57388 + case GR_SYSCTL_HIDDEN:
57389 + str1 = va_arg(ap, char *);
57390 + gr_log_middle_varargs(audit, msg, result, str1);
57391 + break;
57392 + case GR_RBAC:
57393 + dentry = va_arg(ap, struct dentry *);
57394 + mnt = va_arg(ap, struct vfsmount *);
57395 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57396 + break;
57397 + case GR_RBAC_STR:
57398 + dentry = va_arg(ap, struct dentry *);
57399 + mnt = va_arg(ap, struct vfsmount *);
57400 + str1 = va_arg(ap, char *);
57401 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57402 + break;
57403 + case GR_STR_RBAC:
57404 + str1 = va_arg(ap, char *);
57405 + dentry = va_arg(ap, struct dentry *);
57406 + mnt = va_arg(ap, struct vfsmount *);
57407 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57408 + break;
57409 + case GR_RBAC_MODE2:
57410 + dentry = va_arg(ap, struct dentry *);
57411 + mnt = va_arg(ap, struct vfsmount *);
57412 + str1 = va_arg(ap, char *);
57413 + str2 = va_arg(ap, char *);
57414 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57415 + break;
57416 + case GR_RBAC_MODE3:
57417 + dentry = va_arg(ap, struct dentry *);
57418 + mnt = va_arg(ap, struct vfsmount *);
57419 + str1 = va_arg(ap, char *);
57420 + str2 = va_arg(ap, char *);
57421 + str3 = va_arg(ap, char *);
57422 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57423 + break;
57424 + case GR_FILENAME:
57425 + dentry = va_arg(ap, struct dentry *);
57426 + mnt = va_arg(ap, struct vfsmount *);
57427 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57428 + break;
57429 + case GR_STR_FILENAME:
57430 + str1 = va_arg(ap, char *);
57431 + dentry = va_arg(ap, struct dentry *);
57432 + mnt = va_arg(ap, struct vfsmount *);
57433 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57434 + break;
57435 + case GR_FILENAME_STR:
57436 + dentry = va_arg(ap, struct dentry *);
57437 + mnt = va_arg(ap, struct vfsmount *);
57438 + str1 = va_arg(ap, char *);
57439 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57440 + break;
57441 + case GR_FILENAME_TWO_INT:
57442 + dentry = va_arg(ap, struct dentry *);
57443 + mnt = va_arg(ap, struct vfsmount *);
57444 + num1 = va_arg(ap, int);
57445 + num2 = va_arg(ap, int);
57446 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57447 + break;
57448 + case GR_FILENAME_TWO_INT_STR:
57449 + dentry = va_arg(ap, struct dentry *);
57450 + mnt = va_arg(ap, struct vfsmount *);
57451 + num1 = va_arg(ap, int);
57452 + num2 = va_arg(ap, int);
57453 + str1 = va_arg(ap, char *);
57454 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57455 + break;
57456 + case GR_TEXTREL:
57457 + file = va_arg(ap, struct file *);
57458 + ulong1 = va_arg(ap, unsigned long);
57459 + ulong2 = va_arg(ap, unsigned long);
57460 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57461 + break;
57462 + case GR_PTRACE:
57463 + task = va_arg(ap, struct task_struct *);
57464 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57465 + break;
57466 + case GR_RESOURCE:
57467 + task = va_arg(ap, struct task_struct *);
57468 + cred = __task_cred(task);
57469 + pcred = __task_cred(task->real_parent);
57470 + ulong1 = va_arg(ap, unsigned long);
57471 + str1 = va_arg(ap, char *);
57472 + ulong2 = va_arg(ap, unsigned long);
57473 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57474 + break;
57475 + case GR_CAP:
57476 + task = va_arg(ap, struct task_struct *);
57477 + cred = __task_cred(task);
57478 + pcred = __task_cred(task->real_parent);
57479 + str1 = va_arg(ap, char *);
57480 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57481 + break;
57482 + case GR_SIG:
57483 + str1 = va_arg(ap, char *);
57484 + voidptr = va_arg(ap, void *);
57485 + gr_log_middle_varargs(audit, msg, str1, voidptr);
57486 + break;
57487 + case GR_SIG2:
57488 + task = va_arg(ap, struct task_struct *);
57489 + cred = __task_cred(task);
57490 + pcred = __task_cred(task->real_parent);
57491 + num1 = va_arg(ap, int);
57492 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57493 + break;
57494 + case GR_CRASH1:
57495 + task = va_arg(ap, struct task_struct *);
57496 + cred = __task_cred(task);
57497 + pcred = __task_cred(task->real_parent);
57498 + ulong1 = va_arg(ap, unsigned long);
57499 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57500 + break;
57501 + case GR_CRASH2:
57502 + task = va_arg(ap, struct task_struct *);
57503 + cred = __task_cred(task);
57504 + pcred = __task_cred(task->real_parent);
57505 + ulong1 = va_arg(ap, unsigned long);
57506 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57507 + break;
57508 + case GR_RWXMAP:
57509 + file = va_arg(ap, struct file *);
57510 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57511 + break;
57512 + case GR_PSACCT:
57513 + {
57514 + unsigned int wday, cday;
57515 + __u8 whr, chr;
57516 + __u8 wmin, cmin;
57517 + __u8 wsec, csec;
57518 + char cur_tty[64] = { 0 };
57519 + char parent_tty[64] = { 0 };
57520 +
57521 + task = va_arg(ap, struct task_struct *);
57522 + wday = va_arg(ap, unsigned int);
57523 + cday = va_arg(ap, unsigned int);
57524 + whr = va_arg(ap, int);
57525 + chr = va_arg(ap, int);
57526 + wmin = va_arg(ap, int);
57527 + cmin = va_arg(ap, int);
57528 + wsec = va_arg(ap, int);
57529 + csec = va_arg(ap, int);
57530 + ulong1 = va_arg(ap, unsigned long);
57531 + cred = __task_cred(task);
57532 + pcred = __task_cred(task->real_parent);
57533 +
57534 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57535 + }
57536 + break;
57537 + default:
57538 + gr_log_middle(audit, msg, ap);
57539 + }
57540 + va_end(ap);
57541 + // these don't need DEFAULTSECARGS printed on the end
57542 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57543 + gr_log_end(audit, 0);
57544 + else
57545 + gr_log_end(audit, 1);
57546 + END_LOCKS(audit);
57547 +}
57548 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57549 new file mode 100644
57550 index 0000000..f536303
57551 --- /dev/null
57552 +++ b/grsecurity/grsec_mem.c
57553 @@ -0,0 +1,40 @@
57554 +#include <linux/kernel.h>
57555 +#include <linux/sched.h>
57556 +#include <linux/mm.h>
57557 +#include <linux/mman.h>
57558 +#include <linux/grinternal.h>
57559 +
57560 +void
57561 +gr_handle_ioperm(void)
57562 +{
57563 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57564 + return;
57565 +}
57566 +
57567 +void
57568 +gr_handle_iopl(void)
57569 +{
57570 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57571 + return;
57572 +}
57573 +
57574 +void
57575 +gr_handle_mem_readwrite(u64 from, u64 to)
57576 +{
57577 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57578 + return;
57579 +}
57580 +
57581 +void
57582 +gr_handle_vm86(void)
57583 +{
57584 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57585 + return;
57586 +}
57587 +
57588 +void
57589 +gr_log_badprocpid(const char *entry)
57590 +{
57591 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57592 + return;
57593 +}
57594 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57595 new file mode 100644
57596 index 0000000..2131422
57597 --- /dev/null
57598 +++ b/grsecurity/grsec_mount.c
57599 @@ -0,0 +1,62 @@
57600 +#include <linux/kernel.h>
57601 +#include <linux/sched.h>
57602 +#include <linux/mount.h>
57603 +#include <linux/grsecurity.h>
57604 +#include <linux/grinternal.h>
57605 +
57606 +void
57607 +gr_log_remount(const char *devname, const int retval)
57608 +{
57609 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57610 + if (grsec_enable_mount && (retval >= 0))
57611 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57612 +#endif
57613 + return;
57614 +}
57615 +
57616 +void
57617 +gr_log_unmount(const char *devname, const int retval)
57618 +{
57619 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57620 + if (grsec_enable_mount && (retval >= 0))
57621 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57622 +#endif
57623 + return;
57624 +}
57625 +
57626 +void
57627 +gr_log_mount(const char *from, const char *to, const int retval)
57628 +{
57629 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57630 + if (grsec_enable_mount && (retval >= 0))
57631 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57632 +#endif
57633 + return;
57634 +}
57635 +
57636 +int
57637 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57638 +{
57639 +#ifdef CONFIG_GRKERNSEC_ROFS
57640 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57641 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57642 + return -EPERM;
57643 + } else
57644 + return 0;
57645 +#endif
57646 + return 0;
57647 +}
57648 +
57649 +int
57650 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57651 +{
57652 +#ifdef CONFIG_GRKERNSEC_ROFS
57653 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57654 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57655 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57656 + return -EPERM;
57657 + } else
57658 + return 0;
57659 +#endif
57660 + return 0;
57661 +}
57662 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57663 new file mode 100644
57664 index 0000000..a3b12a0
57665 --- /dev/null
57666 +++ b/grsecurity/grsec_pax.c
57667 @@ -0,0 +1,36 @@
57668 +#include <linux/kernel.h>
57669 +#include <linux/sched.h>
57670 +#include <linux/mm.h>
57671 +#include <linux/file.h>
57672 +#include <linux/grinternal.h>
57673 +#include <linux/grsecurity.h>
57674 +
57675 +void
57676 +gr_log_textrel(struct vm_area_struct * vma)
57677 +{
57678 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57679 + if (grsec_enable_audit_textrel)
57680 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57681 +#endif
57682 + return;
57683 +}
57684 +
57685 +void
57686 +gr_log_rwxmmap(struct file *file)
57687 +{
57688 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57689 + if (grsec_enable_log_rwxmaps)
57690 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57691 +#endif
57692 + return;
57693 +}
57694 +
57695 +void
57696 +gr_log_rwxmprotect(struct file *file)
57697 +{
57698 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57699 + if (grsec_enable_log_rwxmaps)
57700 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57701 +#endif
57702 + return;
57703 +}
57704 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57705 new file mode 100644
57706 index 0000000..f7f29aa
57707 --- /dev/null
57708 +++ b/grsecurity/grsec_ptrace.c
57709 @@ -0,0 +1,30 @@
57710 +#include <linux/kernel.h>
57711 +#include <linux/sched.h>
57712 +#include <linux/grinternal.h>
57713 +#include <linux/security.h>
57714 +
57715 +void
57716 +gr_audit_ptrace(struct task_struct *task)
57717 +{
57718 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57719 + if (grsec_enable_audit_ptrace)
57720 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57721 +#endif
57722 + return;
57723 +}
57724 +
57725 +int
57726 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
57727 +{
57728 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57729 + const struct dentry *dentry = file->f_path.dentry;
57730 + const struct vfsmount *mnt = file->f_path.mnt;
57731 +
57732 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57733 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57734 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57735 + return -EACCES;
57736 + }
57737 +#endif
57738 + return 0;
57739 +}
57740 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57741 new file mode 100644
57742 index 0000000..7a5b2de
57743 --- /dev/null
57744 +++ b/grsecurity/grsec_sig.c
57745 @@ -0,0 +1,207 @@
57746 +#include <linux/kernel.h>
57747 +#include <linux/sched.h>
57748 +#include <linux/delay.h>
57749 +#include <linux/grsecurity.h>
57750 +#include <linux/grinternal.h>
57751 +#include <linux/hardirq.h>
57752 +
57753 +char *signames[] = {
57754 + [SIGSEGV] = "Segmentation fault",
57755 + [SIGILL] = "Illegal instruction",
57756 + [SIGABRT] = "Abort",
57757 + [SIGBUS] = "Invalid alignment/Bus error"
57758 +};
57759 +
57760 +void
57761 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57762 +{
57763 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57764 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57765 + (sig == SIGABRT) || (sig == SIGBUS))) {
57766 + if (t->pid == current->pid) {
57767 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57768 + } else {
57769 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57770 + }
57771 + }
57772 +#endif
57773 + return;
57774 +}
57775 +
57776 +int
57777 +gr_handle_signal(const struct task_struct *p, const int sig)
57778 +{
57779 +#ifdef CONFIG_GRKERNSEC
57780 + /* ignore the 0 signal for protected task checks */
57781 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57782 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57783 + return -EPERM;
57784 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57785 + return -EPERM;
57786 + }
57787 +#endif
57788 + return 0;
57789 +}
57790 +
57791 +#ifdef CONFIG_GRKERNSEC
57792 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57793 +
57794 +int gr_fake_force_sig(int sig, struct task_struct *t)
57795 +{
57796 + unsigned long int flags;
57797 + int ret, blocked, ignored;
57798 + struct k_sigaction *action;
57799 +
57800 + spin_lock_irqsave(&t->sighand->siglock, flags);
57801 + action = &t->sighand->action[sig-1];
57802 + ignored = action->sa.sa_handler == SIG_IGN;
57803 + blocked = sigismember(&t->blocked, sig);
57804 + if (blocked || ignored) {
57805 + action->sa.sa_handler = SIG_DFL;
57806 + if (blocked) {
57807 + sigdelset(&t->blocked, sig);
57808 + recalc_sigpending_and_wake(t);
57809 + }
57810 + }
57811 + if (action->sa.sa_handler == SIG_DFL)
57812 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
57813 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57814 +
57815 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
57816 +
57817 + return ret;
57818 +}
57819 +#endif
57820 +
57821 +#ifdef CONFIG_GRKERNSEC_BRUTE
57822 +#define GR_USER_BAN_TIME (15 * 60)
57823 +
57824 +static int __get_dumpable(unsigned long mm_flags)
57825 +{
57826 + int ret;
57827 +
57828 + ret = mm_flags & MMF_DUMPABLE_MASK;
57829 + return (ret >= 2) ? 2 : ret;
57830 +}
57831 +#endif
57832 +
57833 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57834 +{
57835 +#ifdef CONFIG_GRKERNSEC_BRUTE
57836 + uid_t uid = 0;
57837 +
57838 + if (!grsec_enable_brute)
57839 + return;
57840 +
57841 + rcu_read_lock();
57842 + read_lock(&tasklist_lock);
57843 + read_lock(&grsec_exec_file_lock);
57844 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57845 + p->real_parent->brute = 1;
57846 + else {
57847 + const struct cred *cred = __task_cred(p), *cred2;
57848 + struct task_struct *tsk, *tsk2;
57849 +
57850 + if (!__get_dumpable(mm_flags) && cred->uid) {
57851 + struct user_struct *user;
57852 +
57853 + uid = cred->uid;
57854 +
57855 + /* this is put upon execution past expiration */
57856 + user = find_user(uid);
57857 + if (user == NULL)
57858 + goto unlock;
57859 + user->banned = 1;
57860 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57861 + if (user->ban_expires == ~0UL)
57862 + user->ban_expires--;
57863 +
57864 + do_each_thread(tsk2, tsk) {
57865 + cred2 = __task_cred(tsk);
57866 + if (tsk != p && cred2->uid == uid)
57867 + gr_fake_force_sig(SIGKILL, tsk);
57868 + } while_each_thread(tsk2, tsk);
57869 + }
57870 + }
57871 +unlock:
57872 + read_unlock(&grsec_exec_file_lock);
57873 + read_unlock(&tasklist_lock);
57874 + rcu_read_unlock();
57875 +
57876 + if (uid)
57877 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57878 +
57879 +#endif
57880 + return;
57881 +}
57882 +
57883 +void gr_handle_brute_check(void)
57884 +{
57885 +#ifdef CONFIG_GRKERNSEC_BRUTE
57886 + if (current->brute)
57887 + msleep(30 * 1000);
57888 +#endif
57889 + return;
57890 +}
57891 +
57892 +void gr_handle_kernel_exploit(void)
57893 +{
57894 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
57895 + const struct cred *cred;
57896 + struct task_struct *tsk, *tsk2;
57897 + struct user_struct *user;
57898 + uid_t uid;
57899 +
57900 + if (in_irq() || in_serving_softirq() || in_nmi())
57901 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
57902 +
57903 + uid = current_uid();
57904 +
57905 + if (uid == 0)
57906 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
57907 + else {
57908 + /* kill all the processes of this user, hold a reference
57909 + to their creds struct, and prevent them from creating
57910 + another process until system reset
57911 + */
57912 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
57913 + /* we intentionally leak this ref */
57914 + user = get_uid(current->cred->user);
57915 + if (user) {
57916 + user->banned = 1;
57917 + user->ban_expires = ~0UL;
57918 + }
57919 +
57920 + read_lock(&tasklist_lock);
57921 + do_each_thread(tsk2, tsk) {
57922 + cred = __task_cred(tsk);
57923 + if (cred->uid == uid)
57924 + gr_fake_force_sig(SIGKILL, tsk);
57925 + } while_each_thread(tsk2, tsk);
57926 + read_unlock(&tasklist_lock);
57927 + }
57928 +#endif
57929 +}
57930 +
57931 +int __gr_process_user_ban(struct user_struct *user)
57932 +{
57933 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57934 + if (unlikely(user->banned)) {
57935 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
57936 + user->banned = 0;
57937 + user->ban_expires = 0;
57938 + free_uid(user);
57939 + } else
57940 + return -EPERM;
57941 + }
57942 +#endif
57943 + return 0;
57944 +}
57945 +
57946 +int gr_process_user_ban(void)
57947 +{
57948 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57949 + return __gr_process_user_ban(current->cred->user);
57950 +#endif
57951 + return 0;
57952 +}
57953 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
57954 new file mode 100644
57955 index 0000000..4030d57
57956 --- /dev/null
57957 +++ b/grsecurity/grsec_sock.c
57958 @@ -0,0 +1,244 @@
57959 +#include <linux/kernel.h>
57960 +#include <linux/module.h>
57961 +#include <linux/sched.h>
57962 +#include <linux/file.h>
57963 +#include <linux/net.h>
57964 +#include <linux/in.h>
57965 +#include <linux/ip.h>
57966 +#include <net/sock.h>
57967 +#include <net/inet_sock.h>
57968 +#include <linux/grsecurity.h>
57969 +#include <linux/grinternal.h>
57970 +#include <linux/gracl.h>
57971 +
57972 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
57973 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
57974 +
57975 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
57976 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
57977 +
57978 +#ifdef CONFIG_UNIX_MODULE
57979 +EXPORT_SYMBOL(gr_acl_handle_unix);
57980 +EXPORT_SYMBOL(gr_acl_handle_mknod);
57981 +EXPORT_SYMBOL(gr_handle_chroot_unix);
57982 +EXPORT_SYMBOL(gr_handle_create);
57983 +#endif
57984 +
57985 +#ifdef CONFIG_GRKERNSEC
57986 +#define gr_conn_table_size 32749
57987 +struct conn_table_entry {
57988 + struct conn_table_entry *next;
57989 + struct signal_struct *sig;
57990 +};
57991 +
57992 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
57993 +DEFINE_SPINLOCK(gr_conn_table_lock);
57994 +
57995 +extern const char * gr_socktype_to_name(unsigned char type);
57996 +extern const char * gr_proto_to_name(unsigned char proto);
57997 +extern const char * gr_sockfamily_to_name(unsigned char family);
57998 +
57999 +static __inline__ int
58000 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58001 +{
58002 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58003 +}
58004 +
58005 +static __inline__ int
58006 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58007 + __u16 sport, __u16 dport)
58008 +{
58009 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58010 + sig->gr_sport == sport && sig->gr_dport == dport))
58011 + return 1;
58012 + else
58013 + return 0;
58014 +}
58015 +
58016 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58017 +{
58018 + struct conn_table_entry **match;
58019 + unsigned int index;
58020 +
58021 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58022 + sig->gr_sport, sig->gr_dport,
58023 + gr_conn_table_size);
58024 +
58025 + newent->sig = sig;
58026 +
58027 + match = &gr_conn_table[index];
58028 + newent->next = *match;
58029 + *match = newent;
58030 +
58031 + return;
58032 +}
58033 +
58034 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58035 +{
58036 + struct conn_table_entry *match, *last = NULL;
58037 + unsigned int index;
58038 +
58039 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58040 + sig->gr_sport, sig->gr_dport,
58041 + gr_conn_table_size);
58042 +
58043 + match = gr_conn_table[index];
58044 + while (match && !conn_match(match->sig,
58045 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58046 + sig->gr_dport)) {
58047 + last = match;
58048 + match = match->next;
58049 + }
58050 +
58051 + if (match) {
58052 + if (last)
58053 + last->next = match->next;
58054 + else
58055 + gr_conn_table[index] = NULL;
58056 + kfree(match);
58057 + }
58058 +
58059 + return;
58060 +}
58061 +
58062 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58063 + __u16 sport, __u16 dport)
58064 +{
58065 + struct conn_table_entry *match;
58066 + unsigned int index;
58067 +
58068 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58069 +
58070 + match = gr_conn_table[index];
58071 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58072 + match = match->next;
58073 +
58074 + if (match)
58075 + return match->sig;
58076 + else
58077 + return NULL;
58078 +}
58079 +
58080 +#endif
58081 +
58082 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58083 +{
58084 +#ifdef CONFIG_GRKERNSEC
58085 + struct signal_struct *sig = task->signal;
58086 + struct conn_table_entry *newent;
58087 +
58088 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58089 + if (newent == NULL)
58090 + return;
58091 + /* no bh lock needed since we are called with bh disabled */
58092 + spin_lock(&gr_conn_table_lock);
58093 + gr_del_task_from_ip_table_nolock(sig);
58094 + sig->gr_saddr = inet->inet_rcv_saddr;
58095 + sig->gr_daddr = inet->inet_daddr;
58096 + sig->gr_sport = inet->inet_sport;
58097 + sig->gr_dport = inet->inet_dport;
58098 + gr_add_to_task_ip_table_nolock(sig, newent);
58099 + spin_unlock(&gr_conn_table_lock);
58100 +#endif
58101 + return;
58102 +}
58103 +
58104 +void gr_del_task_from_ip_table(struct task_struct *task)
58105 +{
58106 +#ifdef CONFIG_GRKERNSEC
58107 + spin_lock_bh(&gr_conn_table_lock);
58108 + gr_del_task_from_ip_table_nolock(task->signal);
58109 + spin_unlock_bh(&gr_conn_table_lock);
58110 +#endif
58111 + return;
58112 +}
58113 +
58114 +void
58115 +gr_attach_curr_ip(const struct sock *sk)
58116 +{
58117 +#ifdef CONFIG_GRKERNSEC
58118 + struct signal_struct *p, *set;
58119 + const struct inet_sock *inet = inet_sk(sk);
58120 +
58121 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58122 + return;
58123 +
58124 + set = current->signal;
58125 +
58126 + spin_lock_bh(&gr_conn_table_lock);
58127 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58128 + inet->inet_dport, inet->inet_sport);
58129 + if (unlikely(p != NULL)) {
58130 + set->curr_ip = p->curr_ip;
58131 + set->used_accept = 1;
58132 + gr_del_task_from_ip_table_nolock(p);
58133 + spin_unlock_bh(&gr_conn_table_lock);
58134 + return;
58135 + }
58136 + spin_unlock_bh(&gr_conn_table_lock);
58137 +
58138 + set->curr_ip = inet->inet_daddr;
58139 + set->used_accept = 1;
58140 +#endif
58141 + return;
58142 +}
58143 +
58144 +int
58145 +gr_handle_sock_all(const int family, const int type, const int protocol)
58146 +{
58147 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58148 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58149 + (family != AF_UNIX)) {
58150 + if (family == AF_INET)
58151 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58152 + else
58153 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58154 + return -EACCES;
58155 + }
58156 +#endif
58157 + return 0;
58158 +}
58159 +
58160 +int
58161 +gr_handle_sock_server(const struct sockaddr *sck)
58162 +{
58163 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58164 + if (grsec_enable_socket_server &&
58165 + in_group_p(grsec_socket_server_gid) &&
58166 + sck && (sck->sa_family != AF_UNIX) &&
58167 + (sck->sa_family != AF_LOCAL)) {
58168 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58169 + return -EACCES;
58170 + }
58171 +#endif
58172 + return 0;
58173 +}
58174 +
58175 +int
58176 +gr_handle_sock_server_other(const struct sock *sck)
58177 +{
58178 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58179 + if (grsec_enable_socket_server &&
58180 + in_group_p(grsec_socket_server_gid) &&
58181 + sck && (sck->sk_family != AF_UNIX) &&
58182 + (sck->sk_family != AF_LOCAL)) {
58183 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58184 + return -EACCES;
58185 + }
58186 +#endif
58187 + return 0;
58188 +}
58189 +
58190 +int
58191 +gr_handle_sock_client(const struct sockaddr *sck)
58192 +{
58193 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58194 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58195 + sck && (sck->sa_family != AF_UNIX) &&
58196 + (sck->sa_family != AF_LOCAL)) {
58197 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58198 + return -EACCES;
58199 + }
58200 +#endif
58201 + return 0;
58202 +}
58203 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58204 new file mode 100644
58205 index 0000000..8316f6f
58206 --- /dev/null
58207 +++ b/grsecurity/grsec_sysctl.c
58208 @@ -0,0 +1,453 @@
58209 +#include <linux/kernel.h>
58210 +#include <linux/sched.h>
58211 +#include <linux/sysctl.h>
58212 +#include <linux/grsecurity.h>
58213 +#include <linux/grinternal.h>
58214 +
58215 +int
58216 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58217 +{
58218 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58219 + if (dirname == NULL || name == NULL)
58220 + return 0;
58221 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58222 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58223 + return -EACCES;
58224 + }
58225 +#endif
58226 + return 0;
58227 +}
58228 +
58229 +#ifdef CONFIG_GRKERNSEC_ROFS
58230 +static int __maybe_unused one = 1;
58231 +#endif
58232 +
58233 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58234 +struct ctl_table grsecurity_table[] = {
58235 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58236 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58237 +#ifdef CONFIG_GRKERNSEC_IO
58238 + {
58239 + .procname = "disable_priv_io",
58240 + .data = &grsec_disable_privio,
58241 + .maxlen = sizeof(int),
58242 + .mode = 0600,
58243 + .proc_handler = &proc_dointvec,
58244 + },
58245 +#endif
58246 +#endif
58247 +#ifdef CONFIG_GRKERNSEC_LINK
58248 + {
58249 + .procname = "linking_restrictions",
58250 + .data = &grsec_enable_link,
58251 + .maxlen = sizeof(int),
58252 + .mode = 0600,
58253 + .proc_handler = &proc_dointvec,
58254 + },
58255 +#endif
58256 +#ifdef CONFIG_GRKERNSEC_BRUTE
58257 + {
58258 + .procname = "deter_bruteforce",
58259 + .data = &grsec_enable_brute,
58260 + .maxlen = sizeof(int),
58261 + .mode = 0600,
58262 + .proc_handler = &proc_dointvec,
58263 + },
58264 +#endif
58265 +#ifdef CONFIG_GRKERNSEC_FIFO
58266 + {
58267 + .procname = "fifo_restrictions",
58268 + .data = &grsec_enable_fifo,
58269 + .maxlen = sizeof(int),
58270 + .mode = 0600,
58271 + .proc_handler = &proc_dointvec,
58272 + },
58273 +#endif
58274 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58275 + {
58276 + .procname = "ptrace_readexec",
58277 + .data = &grsec_enable_ptrace_readexec,
58278 + .maxlen = sizeof(int),
58279 + .mode = 0600,
58280 + .proc_handler = &proc_dointvec,
58281 + },
58282 +#endif
58283 +#ifdef CONFIG_GRKERNSEC_SETXID
58284 + {
58285 + .procname = "consistent_setxid",
58286 + .data = &grsec_enable_setxid,
58287 + .maxlen = sizeof(int),
58288 + .mode = 0600,
58289 + .proc_handler = &proc_dointvec,
58290 + },
58291 +#endif
58292 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58293 + {
58294 + .procname = "ip_blackhole",
58295 + .data = &grsec_enable_blackhole,
58296 + .maxlen = sizeof(int),
58297 + .mode = 0600,
58298 + .proc_handler = &proc_dointvec,
58299 + },
58300 + {
58301 + .procname = "lastack_retries",
58302 + .data = &grsec_lastack_retries,
58303 + .maxlen = sizeof(int),
58304 + .mode = 0600,
58305 + .proc_handler = &proc_dointvec,
58306 + },
58307 +#endif
58308 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58309 + {
58310 + .procname = "exec_logging",
58311 + .data = &grsec_enable_execlog,
58312 + .maxlen = sizeof(int),
58313 + .mode = 0600,
58314 + .proc_handler = &proc_dointvec,
58315 + },
58316 +#endif
58317 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58318 + {
58319 + .procname = "rwxmap_logging",
58320 + .data = &grsec_enable_log_rwxmaps,
58321 + .maxlen = sizeof(int),
58322 + .mode = 0600,
58323 + .proc_handler = &proc_dointvec,
58324 + },
58325 +#endif
58326 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58327 + {
58328 + .procname = "signal_logging",
58329 + .data = &grsec_enable_signal,
58330 + .maxlen = sizeof(int),
58331 + .mode = 0600,
58332 + .proc_handler = &proc_dointvec,
58333 + },
58334 +#endif
58335 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58336 + {
58337 + .procname = "forkfail_logging",
58338 + .data = &grsec_enable_forkfail,
58339 + .maxlen = sizeof(int),
58340 + .mode = 0600,
58341 + .proc_handler = &proc_dointvec,
58342 + },
58343 +#endif
58344 +#ifdef CONFIG_GRKERNSEC_TIME
58345 + {
58346 + .procname = "timechange_logging",
58347 + .data = &grsec_enable_time,
58348 + .maxlen = sizeof(int),
58349 + .mode = 0600,
58350 + .proc_handler = &proc_dointvec,
58351 + },
58352 +#endif
58353 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58354 + {
58355 + .procname = "chroot_deny_shmat",
58356 + .data = &grsec_enable_chroot_shmat,
58357 + .maxlen = sizeof(int),
58358 + .mode = 0600,
58359 + .proc_handler = &proc_dointvec,
58360 + },
58361 +#endif
58362 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58363 + {
58364 + .procname = "chroot_deny_unix",
58365 + .data = &grsec_enable_chroot_unix,
58366 + .maxlen = sizeof(int),
58367 + .mode = 0600,
58368 + .proc_handler = &proc_dointvec,
58369 + },
58370 +#endif
58371 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58372 + {
58373 + .procname = "chroot_deny_mount",
58374 + .data = &grsec_enable_chroot_mount,
58375 + .maxlen = sizeof(int),
58376 + .mode = 0600,
58377 + .proc_handler = &proc_dointvec,
58378 + },
58379 +#endif
58380 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58381 + {
58382 + .procname = "chroot_deny_fchdir",
58383 + .data = &grsec_enable_chroot_fchdir,
58384 + .maxlen = sizeof(int),
58385 + .mode = 0600,
58386 + .proc_handler = &proc_dointvec,
58387 + },
58388 +#endif
58389 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58390 + {
58391 + .procname = "chroot_deny_chroot",
58392 + .data = &grsec_enable_chroot_double,
58393 + .maxlen = sizeof(int),
58394 + .mode = 0600,
58395 + .proc_handler = &proc_dointvec,
58396 + },
58397 +#endif
58398 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58399 + {
58400 + .procname = "chroot_deny_pivot",
58401 + .data = &grsec_enable_chroot_pivot,
58402 + .maxlen = sizeof(int),
58403 + .mode = 0600,
58404 + .proc_handler = &proc_dointvec,
58405 + },
58406 +#endif
58407 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58408 + {
58409 + .procname = "chroot_enforce_chdir",
58410 + .data = &grsec_enable_chroot_chdir,
58411 + .maxlen = sizeof(int),
58412 + .mode = 0600,
58413 + .proc_handler = &proc_dointvec,
58414 + },
58415 +#endif
58416 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58417 + {
58418 + .procname = "chroot_deny_chmod",
58419 + .data = &grsec_enable_chroot_chmod,
58420 + .maxlen = sizeof(int),
58421 + .mode = 0600,
58422 + .proc_handler = &proc_dointvec,
58423 + },
58424 +#endif
58425 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58426 + {
58427 + .procname = "chroot_deny_mknod",
58428 + .data = &grsec_enable_chroot_mknod,
58429 + .maxlen = sizeof(int),
58430 + .mode = 0600,
58431 + .proc_handler = &proc_dointvec,
58432 + },
58433 +#endif
58434 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58435 + {
58436 + .procname = "chroot_restrict_nice",
58437 + .data = &grsec_enable_chroot_nice,
58438 + .maxlen = sizeof(int),
58439 + .mode = 0600,
58440 + .proc_handler = &proc_dointvec,
58441 + },
58442 +#endif
58443 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58444 + {
58445 + .procname = "chroot_execlog",
58446 + .data = &grsec_enable_chroot_execlog,
58447 + .maxlen = sizeof(int),
58448 + .mode = 0600,
58449 + .proc_handler = &proc_dointvec,
58450 + },
58451 +#endif
58452 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58453 + {
58454 + .procname = "chroot_caps",
58455 + .data = &grsec_enable_chroot_caps,
58456 + .maxlen = sizeof(int),
58457 + .mode = 0600,
58458 + .proc_handler = &proc_dointvec,
58459 + },
58460 +#endif
58461 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58462 + {
58463 + .procname = "chroot_deny_sysctl",
58464 + .data = &grsec_enable_chroot_sysctl,
58465 + .maxlen = sizeof(int),
58466 + .mode = 0600,
58467 + .proc_handler = &proc_dointvec,
58468 + },
58469 +#endif
58470 +#ifdef CONFIG_GRKERNSEC_TPE
58471 + {
58472 + .procname = "tpe",
58473 + .data = &grsec_enable_tpe,
58474 + .maxlen = sizeof(int),
58475 + .mode = 0600,
58476 + .proc_handler = &proc_dointvec,
58477 + },
58478 + {
58479 + .procname = "tpe_gid",
58480 + .data = &grsec_tpe_gid,
58481 + .maxlen = sizeof(int),
58482 + .mode = 0600,
58483 + .proc_handler = &proc_dointvec,
58484 + },
58485 +#endif
58486 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58487 + {
58488 + .procname = "tpe_invert",
58489 + .data = &grsec_enable_tpe_invert,
58490 + .maxlen = sizeof(int),
58491 + .mode = 0600,
58492 + .proc_handler = &proc_dointvec,
58493 + },
58494 +#endif
58495 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58496 + {
58497 + .procname = "tpe_restrict_all",
58498 + .data = &grsec_enable_tpe_all,
58499 + .maxlen = sizeof(int),
58500 + .mode = 0600,
58501 + .proc_handler = &proc_dointvec,
58502 + },
58503 +#endif
58504 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58505 + {
58506 + .procname = "socket_all",
58507 + .data = &grsec_enable_socket_all,
58508 + .maxlen = sizeof(int),
58509 + .mode = 0600,
58510 + .proc_handler = &proc_dointvec,
58511 + },
58512 + {
58513 + .procname = "socket_all_gid",
58514 + .data = &grsec_socket_all_gid,
58515 + .maxlen = sizeof(int),
58516 + .mode = 0600,
58517 + .proc_handler = &proc_dointvec,
58518 + },
58519 +#endif
58520 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58521 + {
58522 + .procname = "socket_client",
58523 + .data = &grsec_enable_socket_client,
58524 + .maxlen = sizeof(int),
58525 + .mode = 0600,
58526 + .proc_handler = &proc_dointvec,
58527 + },
58528 + {
58529 + .procname = "socket_client_gid",
58530 + .data = &grsec_socket_client_gid,
58531 + .maxlen = sizeof(int),
58532 + .mode = 0600,
58533 + .proc_handler = &proc_dointvec,
58534 + },
58535 +#endif
58536 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58537 + {
58538 + .procname = "socket_server",
58539 + .data = &grsec_enable_socket_server,
58540 + .maxlen = sizeof(int),
58541 + .mode = 0600,
58542 + .proc_handler = &proc_dointvec,
58543 + },
58544 + {
58545 + .procname = "socket_server_gid",
58546 + .data = &grsec_socket_server_gid,
58547 + .maxlen = sizeof(int),
58548 + .mode = 0600,
58549 + .proc_handler = &proc_dointvec,
58550 + },
58551 +#endif
58552 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58553 + {
58554 + .procname = "audit_group",
58555 + .data = &grsec_enable_group,
58556 + .maxlen = sizeof(int),
58557 + .mode = 0600,
58558 + .proc_handler = &proc_dointvec,
58559 + },
58560 + {
58561 + .procname = "audit_gid",
58562 + .data = &grsec_audit_gid,
58563 + .maxlen = sizeof(int),
58564 + .mode = 0600,
58565 + .proc_handler = &proc_dointvec,
58566 + },
58567 +#endif
58568 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58569 + {
58570 + .procname = "audit_chdir",
58571 + .data = &grsec_enable_chdir,
58572 + .maxlen = sizeof(int),
58573 + .mode = 0600,
58574 + .proc_handler = &proc_dointvec,
58575 + },
58576 +#endif
58577 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58578 + {
58579 + .procname = "audit_mount",
58580 + .data = &grsec_enable_mount,
58581 + .maxlen = sizeof(int),
58582 + .mode = 0600,
58583 + .proc_handler = &proc_dointvec,
58584 + },
58585 +#endif
58586 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58587 + {
58588 + .procname = "audit_textrel",
58589 + .data = &grsec_enable_audit_textrel,
58590 + .maxlen = sizeof(int),
58591 + .mode = 0600,
58592 + .proc_handler = &proc_dointvec,
58593 + },
58594 +#endif
58595 +#ifdef CONFIG_GRKERNSEC_DMESG
58596 + {
58597 + .procname = "dmesg",
58598 + .data = &grsec_enable_dmesg,
58599 + .maxlen = sizeof(int),
58600 + .mode = 0600,
58601 + .proc_handler = &proc_dointvec,
58602 + },
58603 +#endif
58604 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58605 + {
58606 + .procname = "chroot_findtask",
58607 + .data = &grsec_enable_chroot_findtask,
58608 + .maxlen = sizeof(int),
58609 + .mode = 0600,
58610 + .proc_handler = &proc_dointvec,
58611 + },
58612 +#endif
58613 +#ifdef CONFIG_GRKERNSEC_RESLOG
58614 + {
58615 + .procname = "resource_logging",
58616 + .data = &grsec_resource_logging,
58617 + .maxlen = sizeof(int),
58618 + .mode = 0600,
58619 + .proc_handler = &proc_dointvec,
58620 + },
58621 +#endif
58622 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58623 + {
58624 + .procname = "audit_ptrace",
58625 + .data = &grsec_enable_audit_ptrace,
58626 + .maxlen = sizeof(int),
58627 + .mode = 0600,
58628 + .proc_handler = &proc_dointvec,
58629 + },
58630 +#endif
58631 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58632 + {
58633 + .procname = "harden_ptrace",
58634 + .data = &grsec_enable_harden_ptrace,
58635 + .maxlen = sizeof(int),
58636 + .mode = 0600,
58637 + .proc_handler = &proc_dointvec,
58638 + },
58639 +#endif
58640 + {
58641 + .procname = "grsec_lock",
58642 + .data = &grsec_lock,
58643 + .maxlen = sizeof(int),
58644 + .mode = 0600,
58645 + .proc_handler = &proc_dointvec,
58646 + },
58647 +#endif
58648 +#ifdef CONFIG_GRKERNSEC_ROFS
58649 + {
58650 + .procname = "romount_protect",
58651 + .data = &grsec_enable_rofs,
58652 + .maxlen = sizeof(int),
58653 + .mode = 0600,
58654 + .proc_handler = &proc_dointvec_minmax,
58655 + .extra1 = &one,
58656 + .extra2 = &one,
58657 + },
58658 +#endif
58659 + { }
58660 +};
58661 +#endif
58662 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58663 new file mode 100644
58664 index 0000000..0dc13c3
58665 --- /dev/null
58666 +++ b/grsecurity/grsec_time.c
58667 @@ -0,0 +1,16 @@
58668 +#include <linux/kernel.h>
58669 +#include <linux/sched.h>
58670 +#include <linux/grinternal.h>
58671 +#include <linux/module.h>
58672 +
58673 +void
58674 +gr_log_timechange(void)
58675 +{
58676 +#ifdef CONFIG_GRKERNSEC_TIME
58677 + if (grsec_enable_time)
58678 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58679 +#endif
58680 + return;
58681 +}
58682 +
58683 +EXPORT_SYMBOL(gr_log_timechange);
58684 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58685 new file mode 100644
58686 index 0000000..07e0dc0
58687 --- /dev/null
58688 +++ b/grsecurity/grsec_tpe.c
58689 @@ -0,0 +1,73 @@
58690 +#include <linux/kernel.h>
58691 +#include <linux/sched.h>
58692 +#include <linux/file.h>
58693 +#include <linux/fs.h>
58694 +#include <linux/grinternal.h>
58695 +
58696 +extern int gr_acl_tpe_check(void);
58697 +
58698 +int
58699 +gr_tpe_allow(const struct file *file)
58700 +{
58701 +#ifdef CONFIG_GRKERNSEC
58702 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58703 + const struct cred *cred = current_cred();
58704 + char *msg = NULL;
58705 + char *msg2 = NULL;
58706 +
58707 + // never restrict root
58708 + if (!cred->uid)
58709 + return 1;
58710 +
58711 + if (grsec_enable_tpe) {
58712 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58713 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58714 + msg = "not being in trusted group";
58715 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58716 + msg = "being in untrusted group";
58717 +#else
58718 + if (in_group_p(grsec_tpe_gid))
58719 + msg = "being in untrusted group";
58720 +#endif
58721 + }
58722 + if (!msg && gr_acl_tpe_check())
58723 + msg = "being in untrusted role";
58724 +
58725 + // not in any affected group/role
58726 + if (!msg)
58727 + goto next_check;
58728 +
58729 + if (inode->i_uid)
58730 + msg2 = "file in non-root-owned directory";
58731 + else if (inode->i_mode & S_IWOTH)
58732 + msg2 = "file in world-writable directory";
58733 + else if (inode->i_mode & S_IWGRP)
58734 + msg2 = "file in group-writable directory";
58735 +
58736 + if (msg && msg2) {
58737 + char fullmsg[70] = {0};
58738 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58739 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58740 + return 0;
58741 + }
58742 + msg = NULL;
58743 +next_check:
58744 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58745 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58746 + return 1;
58747 +
58748 + if (inode->i_uid && (inode->i_uid != cred->uid))
58749 + msg = "directory not owned by user";
58750 + else if (inode->i_mode & S_IWOTH)
58751 + msg = "file in world-writable directory";
58752 + else if (inode->i_mode & S_IWGRP)
58753 + msg = "file in group-writable directory";
58754 +
58755 + if (msg) {
58756 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58757 + return 0;
58758 + }
58759 +#endif
58760 +#endif
58761 + return 1;
58762 +}
58763 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58764 new file mode 100644
58765 index 0000000..9f7b1ac
58766 --- /dev/null
58767 +++ b/grsecurity/grsum.c
58768 @@ -0,0 +1,61 @@
58769 +#include <linux/err.h>
58770 +#include <linux/kernel.h>
58771 +#include <linux/sched.h>
58772 +#include <linux/mm.h>
58773 +#include <linux/scatterlist.h>
58774 +#include <linux/crypto.h>
58775 +#include <linux/gracl.h>
58776 +
58777 +
58778 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58779 +#error "crypto and sha256 must be built into the kernel"
58780 +#endif
58781 +
58782 +int
58783 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58784 +{
58785 + char *p;
58786 + struct crypto_hash *tfm;
58787 + struct hash_desc desc;
58788 + struct scatterlist sg;
58789 + unsigned char temp_sum[GR_SHA_LEN];
58790 + volatile int retval = 0;
58791 + volatile int dummy = 0;
58792 + unsigned int i;
58793 +
58794 + sg_init_table(&sg, 1);
58795 +
58796 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58797 + if (IS_ERR(tfm)) {
58798 + /* should never happen, since sha256 should be built in */
58799 + return 1;
58800 + }
58801 +
58802 + desc.tfm = tfm;
58803 + desc.flags = 0;
58804 +
58805 + crypto_hash_init(&desc);
58806 +
58807 + p = salt;
58808 + sg_set_buf(&sg, p, GR_SALT_LEN);
58809 + crypto_hash_update(&desc, &sg, sg.length);
58810 +
58811 + p = entry->pw;
58812 + sg_set_buf(&sg, p, strlen(p));
58813 +
58814 + crypto_hash_update(&desc, &sg, sg.length);
58815 +
58816 + crypto_hash_final(&desc, temp_sum);
58817 +
58818 + memset(entry->pw, 0, GR_PW_LEN);
58819 +
58820 + for (i = 0; i < GR_SHA_LEN; i++)
58821 + if (sum[i] != temp_sum[i])
58822 + retval = 1;
58823 + else
58824 + dummy = 1; // waste a cycle
58825 +
58826 + crypto_free_hash(tfm);
58827 +
58828 + return retval;
58829 +}
58830 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58831 index f1c8ca6..b5c1cc7 100644
58832 --- a/include/acpi/acpi_bus.h
58833 +++ b/include/acpi/acpi_bus.h
58834 @@ -107,7 +107,7 @@ struct acpi_device_ops {
58835 acpi_op_bind bind;
58836 acpi_op_unbind unbind;
58837 acpi_op_notify notify;
58838 -};
58839 +} __no_const;
58840
58841 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58842
58843 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58844 index b7babf0..71e4e74 100644
58845 --- a/include/asm-generic/atomic-long.h
58846 +++ b/include/asm-generic/atomic-long.h
58847 @@ -22,6 +22,12 @@
58848
58849 typedef atomic64_t atomic_long_t;
58850
58851 +#ifdef CONFIG_PAX_REFCOUNT
58852 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
58853 +#else
58854 +typedef atomic64_t atomic_long_unchecked_t;
58855 +#endif
58856 +
58857 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58858
58859 static inline long atomic_long_read(atomic_long_t *l)
58860 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58861 return (long)atomic64_read(v);
58862 }
58863
58864 +#ifdef CONFIG_PAX_REFCOUNT
58865 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58866 +{
58867 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58868 +
58869 + return (long)atomic64_read_unchecked(v);
58870 +}
58871 +#endif
58872 +
58873 static inline void atomic_long_set(atomic_long_t *l, long i)
58874 {
58875 atomic64_t *v = (atomic64_t *)l;
58876 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58877 atomic64_set(v, i);
58878 }
58879
58880 +#ifdef CONFIG_PAX_REFCOUNT
58881 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58882 +{
58883 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58884 +
58885 + atomic64_set_unchecked(v, i);
58886 +}
58887 +#endif
58888 +
58889 static inline void atomic_long_inc(atomic_long_t *l)
58890 {
58891 atomic64_t *v = (atomic64_t *)l;
58892 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58893 atomic64_inc(v);
58894 }
58895
58896 +#ifdef CONFIG_PAX_REFCOUNT
58897 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58898 +{
58899 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58900 +
58901 + atomic64_inc_unchecked(v);
58902 +}
58903 +#endif
58904 +
58905 static inline void atomic_long_dec(atomic_long_t *l)
58906 {
58907 atomic64_t *v = (atomic64_t *)l;
58908 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58909 atomic64_dec(v);
58910 }
58911
58912 +#ifdef CONFIG_PAX_REFCOUNT
58913 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58914 +{
58915 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58916 +
58917 + atomic64_dec_unchecked(v);
58918 +}
58919 +#endif
58920 +
58921 static inline void atomic_long_add(long i, atomic_long_t *l)
58922 {
58923 atomic64_t *v = (atomic64_t *)l;
58924 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58925 atomic64_add(i, v);
58926 }
58927
58928 +#ifdef CONFIG_PAX_REFCOUNT
58929 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58930 +{
58931 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58932 +
58933 + atomic64_add_unchecked(i, v);
58934 +}
58935 +#endif
58936 +
58937 static inline void atomic_long_sub(long i, atomic_long_t *l)
58938 {
58939 atomic64_t *v = (atomic64_t *)l;
58940 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58941 atomic64_sub(i, v);
58942 }
58943
58944 +#ifdef CONFIG_PAX_REFCOUNT
58945 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58946 +{
58947 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58948 +
58949 + atomic64_sub_unchecked(i, v);
58950 +}
58951 +#endif
58952 +
58953 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58954 {
58955 atomic64_t *v = (atomic64_t *)l;
58956 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58957 return (long)atomic64_inc_return(v);
58958 }
58959
58960 +#ifdef CONFIG_PAX_REFCOUNT
58961 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58962 +{
58963 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58964 +
58965 + return (long)atomic64_inc_return_unchecked(v);
58966 +}
58967 +#endif
58968 +
58969 static inline long atomic_long_dec_return(atomic_long_t *l)
58970 {
58971 atomic64_t *v = (atomic64_t *)l;
58972 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58973
58974 typedef atomic_t atomic_long_t;
58975
58976 +#ifdef CONFIG_PAX_REFCOUNT
58977 +typedef atomic_unchecked_t atomic_long_unchecked_t;
58978 +#else
58979 +typedef atomic_t atomic_long_unchecked_t;
58980 +#endif
58981 +
58982 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
58983 static inline long atomic_long_read(atomic_long_t *l)
58984 {
58985 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58986 return (long)atomic_read(v);
58987 }
58988
58989 +#ifdef CONFIG_PAX_REFCOUNT
58990 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58991 +{
58992 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58993 +
58994 + return (long)atomic_read_unchecked(v);
58995 +}
58996 +#endif
58997 +
58998 static inline void atomic_long_set(atomic_long_t *l, long i)
58999 {
59000 atomic_t *v = (atomic_t *)l;
59001 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59002 atomic_set(v, i);
59003 }
59004
59005 +#ifdef CONFIG_PAX_REFCOUNT
59006 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59007 +{
59008 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59009 +
59010 + atomic_set_unchecked(v, i);
59011 +}
59012 +#endif
59013 +
59014 static inline void atomic_long_inc(atomic_long_t *l)
59015 {
59016 atomic_t *v = (atomic_t *)l;
59017 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59018 atomic_inc(v);
59019 }
59020
59021 +#ifdef CONFIG_PAX_REFCOUNT
59022 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59023 +{
59024 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59025 +
59026 + atomic_inc_unchecked(v);
59027 +}
59028 +#endif
59029 +
59030 static inline void atomic_long_dec(atomic_long_t *l)
59031 {
59032 atomic_t *v = (atomic_t *)l;
59033 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59034 atomic_dec(v);
59035 }
59036
59037 +#ifdef CONFIG_PAX_REFCOUNT
59038 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59039 +{
59040 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59041 +
59042 + atomic_dec_unchecked(v);
59043 +}
59044 +#endif
59045 +
59046 static inline void atomic_long_add(long i, atomic_long_t *l)
59047 {
59048 atomic_t *v = (atomic_t *)l;
59049 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59050 atomic_add(i, v);
59051 }
59052
59053 +#ifdef CONFIG_PAX_REFCOUNT
59054 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59055 +{
59056 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59057 +
59058 + atomic_add_unchecked(i, v);
59059 +}
59060 +#endif
59061 +
59062 static inline void atomic_long_sub(long i, atomic_long_t *l)
59063 {
59064 atomic_t *v = (atomic_t *)l;
59065 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59066 atomic_sub(i, v);
59067 }
59068
59069 +#ifdef CONFIG_PAX_REFCOUNT
59070 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59071 +{
59072 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59073 +
59074 + atomic_sub_unchecked(i, v);
59075 +}
59076 +#endif
59077 +
59078 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59079 {
59080 atomic_t *v = (atomic_t *)l;
59081 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59082 return (long)atomic_inc_return(v);
59083 }
59084
59085 +#ifdef CONFIG_PAX_REFCOUNT
59086 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59087 +{
59088 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59089 +
59090 + return (long)atomic_inc_return_unchecked(v);
59091 +}
59092 +#endif
59093 +
59094 static inline long atomic_long_dec_return(atomic_long_t *l)
59095 {
59096 atomic_t *v = (atomic_t *)l;
59097 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59098
59099 #endif /* BITS_PER_LONG == 64 */
59100
59101 +#ifdef CONFIG_PAX_REFCOUNT
59102 +static inline void pax_refcount_needs_these_functions(void)
59103 +{
59104 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59105 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59106 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59107 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59108 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59109 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59110 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59111 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59112 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59113 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59114 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59115 +
59116 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59117 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59118 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59119 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59120 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59121 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59122 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59123 +}
59124 +#else
59125 +#define atomic_read_unchecked(v) atomic_read(v)
59126 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59127 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59128 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59129 +#define atomic_inc_unchecked(v) atomic_inc(v)
59130 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59131 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59132 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59133 +#define atomic_dec_unchecked(v) atomic_dec(v)
59134 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59135 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59136 +
59137 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59138 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59139 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59140 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59141 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59142 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59143 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59144 +#endif
59145 +
59146 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59147 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59148 index b18ce4f..2ee2843 100644
59149 --- a/include/asm-generic/atomic64.h
59150 +++ b/include/asm-generic/atomic64.h
59151 @@ -16,6 +16,8 @@ typedef struct {
59152 long long counter;
59153 } atomic64_t;
59154
59155 +typedef atomic64_t atomic64_unchecked_t;
59156 +
59157 #define ATOMIC64_INIT(i) { (i) }
59158
59159 extern long long atomic64_read(const atomic64_t *v);
59160 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59161 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59162 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59163
59164 +#define atomic64_read_unchecked(v) atomic64_read(v)
59165 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59166 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59167 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59168 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59169 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59170 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59171 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59172 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59173 +
59174 #endif /* _ASM_GENERIC_ATOMIC64_H */
59175 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59176 index 1bfcfe5..e04c5c9 100644
59177 --- a/include/asm-generic/cache.h
59178 +++ b/include/asm-generic/cache.h
59179 @@ -6,7 +6,7 @@
59180 * cache lines need to provide their own cache.h.
59181 */
59182
59183 -#define L1_CACHE_SHIFT 5
59184 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59185 +#define L1_CACHE_SHIFT 5UL
59186 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59187
59188 #endif /* __ASM_GENERIC_CACHE_H */
59189 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59190 index 0d68a1e..b74a761 100644
59191 --- a/include/asm-generic/emergency-restart.h
59192 +++ b/include/asm-generic/emergency-restart.h
59193 @@ -1,7 +1,7 @@
59194 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59195 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59196
59197 -static inline void machine_emergency_restart(void)
59198 +static inline __noreturn void machine_emergency_restart(void)
59199 {
59200 machine_restart(NULL);
59201 }
59202 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59203 index 0232ccb..13d9165 100644
59204 --- a/include/asm-generic/kmap_types.h
59205 +++ b/include/asm-generic/kmap_types.h
59206 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59207 KMAP_D(17) KM_NMI,
59208 KMAP_D(18) KM_NMI_PTE,
59209 KMAP_D(19) KM_KDB,
59210 +KMAP_D(20) KM_CLEARPAGE,
59211 /*
59212 * Remember to update debug_kmap_atomic() when adding new kmap types!
59213 */
59214 -KMAP_D(20) KM_TYPE_NR
59215 +KMAP_D(21) KM_TYPE_NR
59216 };
59217
59218 #undef KMAP_D
59219 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59220 index 9ceb03b..2efbcbd 100644
59221 --- a/include/asm-generic/local.h
59222 +++ b/include/asm-generic/local.h
59223 @@ -39,6 +39,7 @@ typedef struct
59224 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59225 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59226 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59227 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59228
59229 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59230 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59231 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59232 index 725612b..9cc513a 100644
59233 --- a/include/asm-generic/pgtable-nopmd.h
59234 +++ b/include/asm-generic/pgtable-nopmd.h
59235 @@ -1,14 +1,19 @@
59236 #ifndef _PGTABLE_NOPMD_H
59237 #define _PGTABLE_NOPMD_H
59238
59239 -#ifndef __ASSEMBLY__
59240 -
59241 #include <asm-generic/pgtable-nopud.h>
59242
59243 -struct mm_struct;
59244 -
59245 #define __PAGETABLE_PMD_FOLDED
59246
59247 +#define PMD_SHIFT PUD_SHIFT
59248 +#define PTRS_PER_PMD 1
59249 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59250 +#define PMD_MASK (~(PMD_SIZE-1))
59251 +
59252 +#ifndef __ASSEMBLY__
59253 +
59254 +struct mm_struct;
59255 +
59256 /*
59257 * Having the pmd type consist of a pud gets the size right, and allows
59258 * us to conceptually access the pud entry that this pmd is folded into
59259 @@ -16,11 +21,6 @@ struct mm_struct;
59260 */
59261 typedef struct { pud_t pud; } pmd_t;
59262
59263 -#define PMD_SHIFT PUD_SHIFT
59264 -#define PTRS_PER_PMD 1
59265 -#define PMD_SIZE (1UL << PMD_SHIFT)
59266 -#define PMD_MASK (~(PMD_SIZE-1))
59267 -
59268 /*
59269 * The "pud_xxx()" functions here are trivial for a folded two-level
59270 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59271 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59272 index 810431d..0ec4804f 100644
59273 --- a/include/asm-generic/pgtable-nopud.h
59274 +++ b/include/asm-generic/pgtable-nopud.h
59275 @@ -1,10 +1,15 @@
59276 #ifndef _PGTABLE_NOPUD_H
59277 #define _PGTABLE_NOPUD_H
59278
59279 -#ifndef __ASSEMBLY__
59280 -
59281 #define __PAGETABLE_PUD_FOLDED
59282
59283 +#define PUD_SHIFT PGDIR_SHIFT
59284 +#define PTRS_PER_PUD 1
59285 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59286 +#define PUD_MASK (~(PUD_SIZE-1))
59287 +
59288 +#ifndef __ASSEMBLY__
59289 +
59290 /*
59291 * Having the pud type consist of a pgd gets the size right, and allows
59292 * us to conceptually access the pgd entry that this pud is folded into
59293 @@ -12,11 +17,6 @@
59294 */
59295 typedef struct { pgd_t pgd; } pud_t;
59296
59297 -#define PUD_SHIFT PGDIR_SHIFT
59298 -#define PTRS_PER_PUD 1
59299 -#define PUD_SIZE (1UL << PUD_SHIFT)
59300 -#define PUD_MASK (~(PUD_SIZE-1))
59301 -
59302 /*
59303 * The "pgd_xxx()" functions here are trivial for a folded two-level
59304 * setup: the pud is never bad, and a pud always exists (as it's folded
59305 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59306 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59307
59308 #define pgd_populate(mm, pgd, pud) do { } while (0)
59309 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59310 /*
59311 * (puds are folded into pgds so this doesn't get actually called,
59312 * but the define is needed for a generic inline function.)
59313 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59314 index 125c54e..e95c18e 100644
59315 --- a/include/asm-generic/pgtable.h
59316 +++ b/include/asm-generic/pgtable.h
59317 @@ -446,6 +446,18 @@ static inline int pmd_write(pmd_t pmd)
59318 #endif /* __HAVE_ARCH_PMD_WRITE */
59319 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
59320
59321 +#ifndef __HAVE_ARCH_READ_PMD_ATOMIC
59322 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
59323 +{
59324 + /*
59325 + * Depend on compiler for an atomic pmd read. NOTE: this is
59326 + * only going to work, if the pmdval_t isn't larger than
59327 + * an unsigned long.
59328 + */
59329 + return *pmdp;
59330 +}
59331 +#endif /* __HAVE_ARCH_READ_PMD_ATOMIC */
59332 +
59333 /*
59334 * This function is meant to be used by sites walking pagetables with
59335 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
59336 @@ -459,11 +471,17 @@ static inline int pmd_write(pmd_t pmd)
59337 * undefined so behaving like if the pmd was none is safe (because it
59338 * can return none anyway). The compiler level barrier() is critically
59339 * important to compute the two checks atomically on the same pmdval.
59340 + *
59341 + * For 32bit kernels with a 64bit large pmd_t this automatically takes
59342 + * care of reading the pmd atomically to avoid SMP race conditions
59343 + * against pmd_populate() when the mmap_sem is hold for reading by the
59344 + * caller (a special atomic read not done by "gcc" as in the generic
59345 + * version above, is also needed when THP is disabled because the page
59346 + * fault can populate the pmd from under us).
59347 */
59348 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
59349 {
59350 - /* depend on compiler for an atomic pmd read */
59351 - pmd_t pmdval = *pmd;
59352 + pmd_t pmdval = read_pmd_atomic(pmd);
59353 /*
59354 * The barrier will stabilize the pmdval in a register or on
59355 * the stack so that it will stop changing under the code.
59356 @@ -503,6 +521,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59357 #endif
59358 }
59359
59360 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59361 +static inline unsigned long pax_open_kernel(void) { return 0; }
59362 +#endif
59363 +
59364 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59365 +static inline unsigned long pax_close_kernel(void) { return 0; }
59366 +#endif
59367 +
59368 #endif /* CONFIG_MMU */
59369
59370 #endif /* !__ASSEMBLY__ */
59371 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59372 index 8aeadf6..f1dc019 100644
59373 --- a/include/asm-generic/vmlinux.lds.h
59374 +++ b/include/asm-generic/vmlinux.lds.h
59375 @@ -218,6 +218,7 @@
59376 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59377 VMLINUX_SYMBOL(__start_rodata) = .; \
59378 *(.rodata) *(.rodata.*) \
59379 + *(.data..read_only) \
59380 *(__vermagic) /* Kernel version magic */ \
59381 . = ALIGN(8); \
59382 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59383 @@ -716,17 +717,18 @@
59384 * section in the linker script will go there too. @phdr should have
59385 * a leading colon.
59386 *
59387 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59388 + * Note that this macros defines per_cpu_load as an absolute symbol.
59389 * If there is no need to put the percpu section at a predetermined
59390 * address, use PERCPU_SECTION.
59391 */
59392 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59393 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59394 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59395 + per_cpu_load = .; \
59396 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59397 - LOAD_OFFSET) { \
59398 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59399 PERCPU_INPUT(cacheline) \
59400 } phdr \
59401 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59402 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59403
59404 /**
59405 * PERCPU_SECTION - define output section for percpu area, simple version
59406 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59407 index dd73104..fde86bd 100644
59408 --- a/include/drm/drmP.h
59409 +++ b/include/drm/drmP.h
59410 @@ -72,6 +72,7 @@
59411 #include <linux/workqueue.h>
59412 #include <linux/poll.h>
59413 #include <asm/pgalloc.h>
59414 +#include <asm/local.h>
59415 #include "drm.h"
59416
59417 #include <linux/idr.h>
59418 @@ -1074,7 +1075,7 @@ struct drm_device {
59419
59420 /** \name Usage Counters */
59421 /*@{ */
59422 - int open_count; /**< Outstanding files open */
59423 + local_t open_count; /**< Outstanding files open */
59424 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59425 atomic_t vma_count; /**< Outstanding vma areas open */
59426 int buf_use; /**< Buffers in use -- cannot alloc */
59427 @@ -1085,7 +1086,7 @@ struct drm_device {
59428 /*@{ */
59429 unsigned long counters;
59430 enum drm_stat_type types[15];
59431 - atomic_t counts[15];
59432 + atomic_unchecked_t counts[15];
59433 /*@} */
59434
59435 struct list_head filelist;
59436 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59437 index 37515d1..34fa8b0 100644
59438 --- a/include/drm/drm_crtc_helper.h
59439 +++ b/include/drm/drm_crtc_helper.h
59440 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59441
59442 /* disable crtc when not in use - more explicit than dpms off */
59443 void (*disable)(struct drm_crtc *crtc);
59444 -};
59445 +} __no_const;
59446
59447 struct drm_encoder_helper_funcs {
59448 void (*dpms)(struct drm_encoder *encoder, int mode);
59449 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59450 struct drm_connector *connector);
59451 /* disable encoder when not in use - more explicit than dpms off */
59452 void (*disable)(struct drm_encoder *encoder);
59453 -};
59454 +} __no_const;
59455
59456 struct drm_connector_helper_funcs {
59457 int (*get_modes)(struct drm_connector *connector);
59458 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59459 index d6d1da4..fdd1ac5 100644
59460 --- a/include/drm/ttm/ttm_memory.h
59461 +++ b/include/drm/ttm/ttm_memory.h
59462 @@ -48,7 +48,7 @@
59463
59464 struct ttm_mem_shrink {
59465 int (*do_shrink) (struct ttm_mem_shrink *);
59466 -};
59467 +} __no_const;
59468
59469 /**
59470 * struct ttm_mem_global - Global memory accounting structure.
59471 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59472 index e86dfca..40cc55f 100644
59473 --- a/include/linux/a.out.h
59474 +++ b/include/linux/a.out.h
59475 @@ -39,6 +39,14 @@ enum machine_type {
59476 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59477 };
59478
59479 +/* Constants for the N_FLAGS field */
59480 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59481 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59482 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59483 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59484 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59485 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59486 +
59487 #if !defined (N_MAGIC)
59488 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59489 #endif
59490 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59491 index 06fd4bb..1caec0d 100644
59492 --- a/include/linux/atmdev.h
59493 +++ b/include/linux/atmdev.h
59494 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59495 #endif
59496
59497 struct k_atm_aal_stats {
59498 -#define __HANDLE_ITEM(i) atomic_t i
59499 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
59500 __AAL_STAT_ITEMS
59501 #undef __HANDLE_ITEM
59502 };
59503 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59504 index 366422b..1fa7f84 100644
59505 --- a/include/linux/binfmts.h
59506 +++ b/include/linux/binfmts.h
59507 @@ -89,6 +89,7 @@ struct linux_binfmt {
59508 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59509 int (*load_shlib)(struct file *);
59510 int (*core_dump)(struct coredump_params *cprm);
59511 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59512 unsigned long min_coredump; /* minimal dump size */
59513 };
59514
59515 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59516 index 4d4ac24..2c3ccce 100644
59517 --- a/include/linux/blkdev.h
59518 +++ b/include/linux/blkdev.h
59519 @@ -1376,7 +1376,7 @@ struct block_device_operations {
59520 /* this callback is with swap_lock and sometimes page table lock held */
59521 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59522 struct module *owner;
59523 -};
59524 +} __do_const;
59525
59526 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59527 unsigned long);
59528 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59529 index 4d1a074..88f929a 100644
59530 --- a/include/linux/blktrace_api.h
59531 +++ b/include/linux/blktrace_api.h
59532 @@ -162,7 +162,7 @@ struct blk_trace {
59533 struct dentry *dir;
59534 struct dentry *dropped_file;
59535 struct dentry *msg_file;
59536 - atomic_t dropped;
59537 + atomic_unchecked_t dropped;
59538 };
59539
59540 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59541 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59542 index 83195fb..0b0f77d 100644
59543 --- a/include/linux/byteorder/little_endian.h
59544 +++ b/include/linux/byteorder/little_endian.h
59545 @@ -42,51 +42,51 @@
59546
59547 static inline __le64 __cpu_to_le64p(const __u64 *p)
59548 {
59549 - return (__force __le64)*p;
59550 + return (__force const __le64)*p;
59551 }
59552 static inline __u64 __le64_to_cpup(const __le64 *p)
59553 {
59554 - return (__force __u64)*p;
59555 + return (__force const __u64)*p;
59556 }
59557 static inline __le32 __cpu_to_le32p(const __u32 *p)
59558 {
59559 - return (__force __le32)*p;
59560 + return (__force const __le32)*p;
59561 }
59562 static inline __u32 __le32_to_cpup(const __le32 *p)
59563 {
59564 - return (__force __u32)*p;
59565 + return (__force const __u32)*p;
59566 }
59567 static inline __le16 __cpu_to_le16p(const __u16 *p)
59568 {
59569 - return (__force __le16)*p;
59570 + return (__force const __le16)*p;
59571 }
59572 static inline __u16 __le16_to_cpup(const __le16 *p)
59573 {
59574 - return (__force __u16)*p;
59575 + return (__force const __u16)*p;
59576 }
59577 static inline __be64 __cpu_to_be64p(const __u64 *p)
59578 {
59579 - return (__force __be64)__swab64p(p);
59580 + return (__force const __be64)__swab64p(p);
59581 }
59582 static inline __u64 __be64_to_cpup(const __be64 *p)
59583 {
59584 - return __swab64p((__u64 *)p);
59585 + return __swab64p((const __u64 *)p);
59586 }
59587 static inline __be32 __cpu_to_be32p(const __u32 *p)
59588 {
59589 - return (__force __be32)__swab32p(p);
59590 + return (__force const __be32)__swab32p(p);
59591 }
59592 static inline __u32 __be32_to_cpup(const __be32 *p)
59593 {
59594 - return __swab32p((__u32 *)p);
59595 + return __swab32p((const __u32 *)p);
59596 }
59597 static inline __be16 __cpu_to_be16p(const __u16 *p)
59598 {
59599 - return (__force __be16)__swab16p(p);
59600 + return (__force const __be16)__swab16p(p);
59601 }
59602 static inline __u16 __be16_to_cpup(const __be16 *p)
59603 {
59604 - return __swab16p((__u16 *)p);
59605 + return __swab16p((const __u16 *)p);
59606 }
59607 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59608 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59609 diff --git a/include/linux/cache.h b/include/linux/cache.h
59610 index 4c57065..4307975 100644
59611 --- a/include/linux/cache.h
59612 +++ b/include/linux/cache.h
59613 @@ -16,6 +16,10 @@
59614 #define __read_mostly
59615 #endif
59616
59617 +#ifndef __read_only
59618 +#define __read_only __read_mostly
59619 +#endif
59620 +
59621 #ifndef ____cacheline_aligned
59622 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59623 #endif
59624 diff --git a/include/linux/capability.h b/include/linux/capability.h
59625 index 12d52de..b5f7fa7 100644
59626 --- a/include/linux/capability.h
59627 +++ b/include/linux/capability.h
59628 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
59629 extern bool capable(int cap);
59630 extern bool ns_capable(struct user_namespace *ns, int cap);
59631 extern bool nsown_capable(int cap);
59632 +extern bool capable_nolog(int cap);
59633 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59634
59635 /* audit system wants to get cap info from files as well */
59636 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59637 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59638 index 42e55de..1cd0e66 100644
59639 --- a/include/linux/cleancache.h
59640 +++ b/include/linux/cleancache.h
59641 @@ -31,7 +31,7 @@ struct cleancache_ops {
59642 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
59643 void (*invalidate_inode)(int, struct cleancache_filekey);
59644 void (*invalidate_fs)(int);
59645 -};
59646 +} __no_const;
59647
59648 extern struct cleancache_ops
59649 cleancache_register_ops(struct cleancache_ops *ops);
59650 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59651 index 2f40791..a62d196 100644
59652 --- a/include/linux/compiler-gcc4.h
59653 +++ b/include/linux/compiler-gcc4.h
59654 @@ -32,6 +32,16 @@
59655 #define __linktime_error(message) __attribute__((__error__(message)))
59656
59657 #if __GNUC_MINOR__ >= 5
59658 +
59659 +#ifdef CONSTIFY_PLUGIN
59660 +#define __no_const __attribute__((no_const))
59661 +#define __do_const __attribute__((do_const))
59662 +#endif
59663 +
59664 +#ifdef SIZE_OVERFLOW_PLUGIN
59665 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
59666 +#endif
59667 +
59668 /*
59669 * Mark a position in code as unreachable. This can be used to
59670 * suppress control flow warnings after asm blocks that transfer
59671 @@ -47,6 +57,11 @@
59672 #define __noclone __attribute__((__noclone__))
59673
59674 #endif
59675 +
59676 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59677 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59678 +#define __bos0(ptr) __bos((ptr), 0)
59679 +#define __bos1(ptr) __bos((ptr), 1)
59680 #endif
59681
59682 #if __GNUC_MINOR__ > 0
59683 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59684 index 923d093..726c17f 100644
59685 --- a/include/linux/compiler.h
59686 +++ b/include/linux/compiler.h
59687 @@ -5,31 +5,62 @@
59688
59689 #ifdef __CHECKER__
59690 # define __user __attribute__((noderef, address_space(1)))
59691 +# define __force_user __force __user
59692 # define __kernel __attribute__((address_space(0)))
59693 +# define __force_kernel __force __kernel
59694 # define __safe __attribute__((safe))
59695 # define __force __attribute__((force))
59696 # define __nocast __attribute__((nocast))
59697 # define __iomem __attribute__((noderef, address_space(2)))
59698 +# define __force_iomem __force __iomem
59699 # define __acquires(x) __attribute__((context(x,0,1)))
59700 # define __releases(x) __attribute__((context(x,1,0)))
59701 # define __acquire(x) __context__(x,1)
59702 # define __release(x) __context__(x,-1)
59703 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59704 # define __percpu __attribute__((noderef, address_space(3)))
59705 +# define __force_percpu __force __percpu
59706 #ifdef CONFIG_SPARSE_RCU_POINTER
59707 # define __rcu __attribute__((noderef, address_space(4)))
59708 +# define __force_rcu __force __rcu
59709 #else
59710 # define __rcu
59711 +# define __force_rcu
59712 #endif
59713 extern void __chk_user_ptr(const volatile void __user *);
59714 extern void __chk_io_ptr(const volatile void __iomem *);
59715 +#elif defined(CHECKER_PLUGIN)
59716 +//# define __user
59717 +//# define __force_user
59718 +//# define __kernel
59719 +//# define __force_kernel
59720 +# define __safe
59721 +# define __force
59722 +# define __nocast
59723 +# define __iomem
59724 +# define __force_iomem
59725 +# define __chk_user_ptr(x) (void)0
59726 +# define __chk_io_ptr(x) (void)0
59727 +# define __builtin_warning(x, y...) (1)
59728 +# define __acquires(x)
59729 +# define __releases(x)
59730 +# define __acquire(x) (void)0
59731 +# define __release(x) (void)0
59732 +# define __cond_lock(x,c) (c)
59733 +# define __percpu
59734 +# define __force_percpu
59735 +# define __rcu
59736 +# define __force_rcu
59737 #else
59738 # define __user
59739 +# define __force_user
59740 # define __kernel
59741 +# define __force_kernel
59742 # define __safe
59743 # define __force
59744 # define __nocast
59745 # define __iomem
59746 +# define __force_iomem
59747 # define __chk_user_ptr(x) (void)0
59748 # define __chk_io_ptr(x) (void)0
59749 # define __builtin_warning(x, y...) (1)
59750 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59751 # define __release(x) (void)0
59752 # define __cond_lock(x,c) (c)
59753 # define __percpu
59754 +# define __force_percpu
59755 # define __rcu
59756 +# define __force_rcu
59757 #endif
59758
59759 #ifdef __KERNEL__
59760 @@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59761 # define __attribute_const__ /* unimplemented */
59762 #endif
59763
59764 +#ifndef __no_const
59765 +# define __no_const
59766 +#endif
59767 +
59768 +#ifndef __do_const
59769 +# define __do_const
59770 +#endif
59771 +
59772 +#ifndef __size_overflow
59773 +# define __size_overflow(...)
59774 +#endif
59775 +
59776 /*
59777 * Tell gcc if a function is cold. The compiler will assume any path
59778 * directly leading to the call is unlikely.
59779 @@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59780 #define __cold
59781 #endif
59782
59783 +#ifndef __alloc_size
59784 +#define __alloc_size(...)
59785 +#endif
59786 +
59787 +#ifndef __bos
59788 +#define __bos(ptr, arg)
59789 +#endif
59790 +
59791 +#ifndef __bos0
59792 +#define __bos0(ptr)
59793 +#endif
59794 +
59795 +#ifndef __bos1
59796 +#define __bos1(ptr)
59797 +#endif
59798 +
59799 /* Simple shorthand for a section definition */
59800 #ifndef __section
59801 # define __section(S) __attribute__ ((__section__(#S)))
59802 @@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59803 * use is to mediate communication between process-level code and irq/NMI
59804 * handlers, all running on the same CPU.
59805 */
59806 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59807 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59808 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59809
59810 #endif /* __LINUX_COMPILER_H */
59811 diff --git a/include/linux/cred.h b/include/linux/cred.h
59812 index adadf71..6af5560 100644
59813 --- a/include/linux/cred.h
59814 +++ b/include/linux/cred.h
59815 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59816 static inline void validate_process_creds(void)
59817 {
59818 }
59819 +static inline void validate_task_creds(struct task_struct *task)
59820 +{
59821 +}
59822 #endif
59823
59824 /**
59825 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59826 index b92eadf..b4ecdc1 100644
59827 --- a/include/linux/crypto.h
59828 +++ b/include/linux/crypto.h
59829 @@ -373,7 +373,7 @@ struct cipher_tfm {
59830 const u8 *key, unsigned int keylen);
59831 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59832 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59833 -};
59834 +} __no_const;
59835
59836 struct hash_tfm {
59837 int (*init)(struct hash_desc *desc);
59838 @@ -394,13 +394,13 @@ struct compress_tfm {
59839 int (*cot_decompress)(struct crypto_tfm *tfm,
59840 const u8 *src, unsigned int slen,
59841 u8 *dst, unsigned int *dlen);
59842 -};
59843 +} __no_const;
59844
59845 struct rng_tfm {
59846 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59847 unsigned int dlen);
59848 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59849 -};
59850 +} __no_const;
59851
59852 #define crt_ablkcipher crt_u.ablkcipher
59853 #define crt_aead crt_u.aead
59854 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59855 index 7925bf0..d5143d2 100644
59856 --- a/include/linux/decompress/mm.h
59857 +++ b/include/linux/decompress/mm.h
59858 @@ -77,7 +77,7 @@ static void free(void *where)
59859 * warnings when not needed (indeed large_malloc / large_free are not
59860 * needed by inflate */
59861
59862 -#define malloc(a) kmalloc(a, GFP_KERNEL)
59863 +#define malloc(a) kmalloc((a), GFP_KERNEL)
59864 #define free(a) kfree(a)
59865
59866 #define large_malloc(a) vmalloc(a)
59867 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59868 index dfc099e..e583e66 100644
59869 --- a/include/linux/dma-mapping.h
59870 +++ b/include/linux/dma-mapping.h
59871 @@ -51,7 +51,7 @@ struct dma_map_ops {
59872 u64 (*get_required_mask)(struct device *dev);
59873 #endif
59874 int is_phys;
59875 -};
59876 +} __do_const;
59877
59878 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59879
59880 diff --git a/include/linux/efi.h b/include/linux/efi.h
59881 index ec45ccd..9923c32 100644
59882 --- a/include/linux/efi.h
59883 +++ b/include/linux/efi.h
59884 @@ -635,7 +635,7 @@ struct efivar_operations {
59885 efi_get_variable_t *get_variable;
59886 efi_get_next_variable_t *get_next_variable;
59887 efi_set_variable_t *set_variable;
59888 -};
59889 +} __no_const;
59890
59891 struct efivars {
59892 /*
59893 diff --git a/include/linux/elf.h b/include/linux/elf.h
59894 index 999b4f5..57753b4 100644
59895 --- a/include/linux/elf.h
59896 +++ b/include/linux/elf.h
59897 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
59898 #define PT_GNU_EH_FRAME 0x6474e550
59899
59900 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
59901 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
59902 +
59903 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
59904 +
59905 +/* Constants for the e_flags field */
59906 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59907 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
59908 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
59909 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
59910 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59911 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59912
59913 /*
59914 * Extended Numbering
59915 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
59916 #define DT_DEBUG 21
59917 #define DT_TEXTREL 22
59918 #define DT_JMPREL 23
59919 +#define DT_FLAGS 30
59920 + #define DF_TEXTREL 0x00000004
59921 #define DT_ENCODING 32
59922 #define OLD_DT_LOOS 0x60000000
59923 #define DT_LOOS 0x6000000d
59924 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
59925 #define PF_W 0x2
59926 #define PF_X 0x1
59927
59928 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
59929 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
59930 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
59931 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
59932 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
59933 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
59934 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
59935 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
59936 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
59937 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
59938 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
59939 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
59940 +
59941 typedef struct elf32_phdr{
59942 Elf32_Word p_type;
59943 Elf32_Off p_offset;
59944 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
59945 #define EI_OSABI 7
59946 #define EI_PAD 8
59947
59948 +#define EI_PAX 14
59949 +
59950 #define ELFMAG0 0x7f /* EI_MAG */
59951 #define ELFMAG1 'E'
59952 #define ELFMAG2 'L'
59953 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
59954 #define elf_note elf32_note
59955 #define elf_addr_t Elf32_Off
59956 #define Elf_Half Elf32_Half
59957 +#define elf_dyn Elf32_Dyn
59958
59959 #else
59960
59961 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
59962 #define elf_note elf64_note
59963 #define elf_addr_t Elf64_Off
59964 #define Elf_Half Elf64_Half
59965 +#define elf_dyn Elf64_Dyn
59966
59967 #endif
59968
59969 diff --git a/include/linux/filter.h b/include/linux/filter.h
59970 index 8eeb205..d59bfa2 100644
59971 --- a/include/linux/filter.h
59972 +++ b/include/linux/filter.h
59973 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
59974
59975 struct sk_buff;
59976 struct sock;
59977 +struct bpf_jit_work;
59978
59979 struct sk_filter
59980 {
59981 @@ -141,6 +142,9 @@ struct sk_filter
59982 unsigned int len; /* Number of filter blocks */
59983 unsigned int (*bpf_func)(const struct sk_buff *skb,
59984 const struct sock_filter *filter);
59985 +#ifdef CONFIG_BPF_JIT
59986 + struct bpf_jit_work *work;
59987 +#endif
59988 struct rcu_head rcu;
59989 struct sock_filter insns[0];
59990 };
59991 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
59992 index cdc9b71..ce69fb5 100644
59993 --- a/include/linux/firewire.h
59994 +++ b/include/linux/firewire.h
59995 @@ -413,7 +413,7 @@ struct fw_iso_context {
59996 union {
59997 fw_iso_callback_t sc;
59998 fw_iso_mc_callback_t mc;
59999 - } callback;
60000 + } __no_const callback;
60001 void *callback_data;
60002 };
60003
60004 diff --git a/include/linux/fs.h b/include/linux/fs.h
60005 index 25c40b9..1bfd4f4 100644
60006 --- a/include/linux/fs.h
60007 +++ b/include/linux/fs.h
60008 @@ -1634,7 +1634,8 @@ struct file_operations {
60009 int (*setlease)(struct file *, long, struct file_lock **);
60010 long (*fallocate)(struct file *file, int mode, loff_t offset,
60011 loff_t len);
60012 -};
60013 +} __do_const;
60014 +typedef struct file_operations __no_const file_operations_no_const;
60015
60016 struct inode_operations {
60017 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60018 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60019 index 003dc0f..3c4ea97 100644
60020 --- a/include/linux/fs_struct.h
60021 +++ b/include/linux/fs_struct.h
60022 @@ -6,7 +6,7 @@
60023 #include <linux/seqlock.h>
60024
60025 struct fs_struct {
60026 - int users;
60027 + atomic_t users;
60028 spinlock_t lock;
60029 seqcount_t seq;
60030 int umask;
60031 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60032 index ce31408..b1ad003 100644
60033 --- a/include/linux/fscache-cache.h
60034 +++ b/include/linux/fscache-cache.h
60035 @@ -102,7 +102,7 @@ struct fscache_operation {
60036 fscache_operation_release_t release;
60037 };
60038
60039 -extern atomic_t fscache_op_debug_id;
60040 +extern atomic_unchecked_t fscache_op_debug_id;
60041 extern void fscache_op_work_func(struct work_struct *work);
60042
60043 extern void fscache_enqueue_operation(struct fscache_operation *);
60044 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60045 {
60046 INIT_WORK(&op->work, fscache_op_work_func);
60047 atomic_set(&op->usage, 1);
60048 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60049 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60050 op->processor = processor;
60051 op->release = release;
60052 INIT_LIST_HEAD(&op->pend_link);
60053 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60054 index a6dfe69..569586df 100644
60055 --- a/include/linux/fsnotify.h
60056 +++ b/include/linux/fsnotify.h
60057 @@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60058 */
60059 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60060 {
60061 - return kstrdup(name, GFP_KERNEL);
60062 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60063 }
60064
60065 /*
60066 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60067 index 91d0e0a3..035666b 100644
60068 --- a/include/linux/fsnotify_backend.h
60069 +++ b/include/linux/fsnotify_backend.h
60070 @@ -105,6 +105,7 @@ struct fsnotify_ops {
60071 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60072 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60073 };
60074 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60075
60076 /*
60077 * A group is a "thing" that wants to receive notification about filesystem
60078 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60079 index 176a939..1462211 100644
60080 --- a/include/linux/ftrace_event.h
60081 +++ b/include/linux/ftrace_event.h
60082 @@ -97,7 +97,7 @@ struct trace_event_functions {
60083 trace_print_func raw;
60084 trace_print_func hex;
60085 trace_print_func binary;
60086 -};
60087 +} __no_const;
60088
60089 struct trace_event {
60090 struct hlist_node node;
60091 @@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60092 extern int trace_add_event_call(struct ftrace_event_call *call);
60093 extern void trace_remove_event_call(struct ftrace_event_call *call);
60094
60095 -#define is_signed_type(type) (((type)(-1)) < 0)
60096 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60097
60098 int trace_set_clr_event(const char *system, const char *event, int set);
60099
60100 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60101 index 017a7fb..33a8507 100644
60102 --- a/include/linux/genhd.h
60103 +++ b/include/linux/genhd.h
60104 @@ -185,7 +185,7 @@ struct gendisk {
60105 struct kobject *slave_dir;
60106
60107 struct timer_rand_state *random;
60108 - atomic_t sync_io; /* RAID */
60109 + atomic_unchecked_t sync_io; /* RAID */
60110 struct disk_events *ev;
60111 #ifdef CONFIG_BLK_DEV_INTEGRITY
60112 struct blk_integrity *integrity;
60113 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60114 new file mode 100644
60115 index 0000000..c938b1f
60116 --- /dev/null
60117 +++ b/include/linux/gracl.h
60118 @@ -0,0 +1,319 @@
60119 +#ifndef GR_ACL_H
60120 +#define GR_ACL_H
60121 +
60122 +#include <linux/grdefs.h>
60123 +#include <linux/resource.h>
60124 +#include <linux/capability.h>
60125 +#include <linux/dcache.h>
60126 +#include <asm/resource.h>
60127 +
60128 +/* Major status information */
60129 +
60130 +#define GR_VERSION "grsecurity 2.9.1"
60131 +#define GRSECURITY_VERSION 0x2901
60132 +
60133 +enum {
60134 + GR_SHUTDOWN = 0,
60135 + GR_ENABLE = 1,
60136 + GR_SPROLE = 2,
60137 + GR_RELOAD = 3,
60138 + GR_SEGVMOD = 4,
60139 + GR_STATUS = 5,
60140 + GR_UNSPROLE = 6,
60141 + GR_PASSSET = 7,
60142 + GR_SPROLEPAM = 8,
60143 +};
60144 +
60145 +/* Password setup definitions
60146 + * kernel/grhash.c */
60147 +enum {
60148 + GR_PW_LEN = 128,
60149 + GR_SALT_LEN = 16,
60150 + GR_SHA_LEN = 32,
60151 +};
60152 +
60153 +enum {
60154 + GR_SPROLE_LEN = 64,
60155 +};
60156 +
60157 +enum {
60158 + GR_NO_GLOB = 0,
60159 + GR_REG_GLOB,
60160 + GR_CREATE_GLOB
60161 +};
60162 +
60163 +#define GR_NLIMITS 32
60164 +
60165 +/* Begin Data Structures */
60166 +
60167 +struct sprole_pw {
60168 + unsigned char *rolename;
60169 + unsigned char salt[GR_SALT_LEN];
60170 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60171 +};
60172 +
60173 +struct name_entry {
60174 + __u32 key;
60175 + ino_t inode;
60176 + dev_t device;
60177 + char *name;
60178 + __u16 len;
60179 + __u8 deleted;
60180 + struct name_entry *prev;
60181 + struct name_entry *next;
60182 +};
60183 +
60184 +struct inodev_entry {
60185 + struct name_entry *nentry;
60186 + struct inodev_entry *prev;
60187 + struct inodev_entry *next;
60188 +};
60189 +
60190 +struct acl_role_db {
60191 + struct acl_role_label **r_hash;
60192 + __u32 r_size;
60193 +};
60194 +
60195 +struct inodev_db {
60196 + struct inodev_entry **i_hash;
60197 + __u32 i_size;
60198 +};
60199 +
60200 +struct name_db {
60201 + struct name_entry **n_hash;
60202 + __u32 n_size;
60203 +};
60204 +
60205 +struct crash_uid {
60206 + uid_t uid;
60207 + unsigned long expires;
60208 +};
60209 +
60210 +struct gr_hash_struct {
60211 + void **table;
60212 + void **nametable;
60213 + void *first;
60214 + __u32 table_size;
60215 + __u32 used_size;
60216 + int type;
60217 +};
60218 +
60219 +/* Userspace Grsecurity ACL data structures */
60220 +
60221 +struct acl_subject_label {
60222 + char *filename;
60223 + ino_t inode;
60224 + dev_t device;
60225 + __u32 mode;
60226 + kernel_cap_t cap_mask;
60227 + kernel_cap_t cap_lower;
60228 + kernel_cap_t cap_invert_audit;
60229 +
60230 + struct rlimit res[GR_NLIMITS];
60231 + __u32 resmask;
60232 +
60233 + __u8 user_trans_type;
60234 + __u8 group_trans_type;
60235 + uid_t *user_transitions;
60236 + gid_t *group_transitions;
60237 + __u16 user_trans_num;
60238 + __u16 group_trans_num;
60239 +
60240 + __u32 sock_families[2];
60241 + __u32 ip_proto[8];
60242 + __u32 ip_type;
60243 + struct acl_ip_label **ips;
60244 + __u32 ip_num;
60245 + __u32 inaddr_any_override;
60246 +
60247 + __u32 crashes;
60248 + unsigned long expires;
60249 +
60250 + struct acl_subject_label *parent_subject;
60251 + struct gr_hash_struct *hash;
60252 + struct acl_subject_label *prev;
60253 + struct acl_subject_label *next;
60254 +
60255 + struct acl_object_label **obj_hash;
60256 + __u32 obj_hash_size;
60257 + __u16 pax_flags;
60258 +};
60259 +
60260 +struct role_allowed_ip {
60261 + __u32 addr;
60262 + __u32 netmask;
60263 +
60264 + struct role_allowed_ip *prev;
60265 + struct role_allowed_ip *next;
60266 +};
60267 +
60268 +struct role_transition {
60269 + char *rolename;
60270 +
60271 + struct role_transition *prev;
60272 + struct role_transition *next;
60273 +};
60274 +
60275 +struct acl_role_label {
60276 + char *rolename;
60277 + uid_t uidgid;
60278 + __u16 roletype;
60279 +
60280 + __u16 auth_attempts;
60281 + unsigned long expires;
60282 +
60283 + struct acl_subject_label *root_label;
60284 + struct gr_hash_struct *hash;
60285 +
60286 + struct acl_role_label *prev;
60287 + struct acl_role_label *next;
60288 +
60289 + struct role_transition *transitions;
60290 + struct role_allowed_ip *allowed_ips;
60291 + uid_t *domain_children;
60292 + __u16 domain_child_num;
60293 +
60294 + umode_t umask;
60295 +
60296 + struct acl_subject_label **subj_hash;
60297 + __u32 subj_hash_size;
60298 +};
60299 +
60300 +struct user_acl_role_db {
60301 + struct acl_role_label **r_table;
60302 + __u32 num_pointers; /* Number of allocations to track */
60303 + __u32 num_roles; /* Number of roles */
60304 + __u32 num_domain_children; /* Number of domain children */
60305 + __u32 num_subjects; /* Number of subjects */
60306 + __u32 num_objects; /* Number of objects */
60307 +};
60308 +
60309 +struct acl_object_label {
60310 + char *filename;
60311 + ino_t inode;
60312 + dev_t device;
60313 + __u32 mode;
60314 +
60315 + struct acl_subject_label *nested;
60316 + struct acl_object_label *globbed;
60317 +
60318 + /* next two structures not used */
60319 +
60320 + struct acl_object_label *prev;
60321 + struct acl_object_label *next;
60322 +};
60323 +
60324 +struct acl_ip_label {
60325 + char *iface;
60326 + __u32 addr;
60327 + __u32 netmask;
60328 + __u16 low, high;
60329 + __u8 mode;
60330 + __u32 type;
60331 + __u32 proto[8];
60332 +
60333 + /* next two structures not used */
60334 +
60335 + struct acl_ip_label *prev;
60336 + struct acl_ip_label *next;
60337 +};
60338 +
60339 +struct gr_arg {
60340 + struct user_acl_role_db role_db;
60341 + unsigned char pw[GR_PW_LEN];
60342 + unsigned char salt[GR_SALT_LEN];
60343 + unsigned char sum[GR_SHA_LEN];
60344 + unsigned char sp_role[GR_SPROLE_LEN];
60345 + struct sprole_pw *sprole_pws;
60346 + dev_t segv_device;
60347 + ino_t segv_inode;
60348 + uid_t segv_uid;
60349 + __u16 num_sprole_pws;
60350 + __u16 mode;
60351 +};
60352 +
60353 +struct gr_arg_wrapper {
60354 + struct gr_arg *arg;
60355 + __u32 version;
60356 + __u32 size;
60357 +};
60358 +
60359 +struct subject_map {
60360 + struct acl_subject_label *user;
60361 + struct acl_subject_label *kernel;
60362 + struct subject_map *prev;
60363 + struct subject_map *next;
60364 +};
60365 +
60366 +struct acl_subj_map_db {
60367 + struct subject_map **s_hash;
60368 + __u32 s_size;
60369 +};
60370 +
60371 +/* End Data Structures Section */
60372 +
60373 +/* Hash functions generated by empirical testing by Brad Spengler
60374 + Makes good use of the low bits of the inode. Generally 0-1 times
60375 + in loop for successful match. 0-3 for unsuccessful match.
60376 + Shift/add algorithm with modulus of table size and an XOR*/
60377 +
60378 +static __inline__ unsigned int
60379 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60380 +{
60381 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60382 +}
60383 +
60384 + static __inline__ unsigned int
60385 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60386 +{
60387 + return ((const unsigned long)userp % sz);
60388 +}
60389 +
60390 +static __inline__ unsigned int
60391 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60392 +{
60393 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60394 +}
60395 +
60396 +static __inline__ unsigned int
60397 +nhash(const char *name, const __u16 len, const unsigned int sz)
60398 +{
60399 + return full_name_hash((const unsigned char *)name, len) % sz;
60400 +}
60401 +
60402 +#define FOR_EACH_ROLE_START(role) \
60403 + role = role_list; \
60404 + while (role) {
60405 +
60406 +#define FOR_EACH_ROLE_END(role) \
60407 + role = role->prev; \
60408 + }
60409 +
60410 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60411 + subj = NULL; \
60412 + iter = 0; \
60413 + while (iter < role->subj_hash_size) { \
60414 + if (subj == NULL) \
60415 + subj = role->subj_hash[iter]; \
60416 + if (subj == NULL) { \
60417 + iter++; \
60418 + continue; \
60419 + }
60420 +
60421 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60422 + subj = subj->next; \
60423 + if (subj == NULL) \
60424 + iter++; \
60425 + }
60426 +
60427 +
60428 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60429 + subj = role->hash->first; \
60430 + while (subj != NULL) {
60431 +
60432 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60433 + subj = subj->next; \
60434 + }
60435 +
60436 +#endif
60437 +
60438 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60439 new file mode 100644
60440 index 0000000..323ecf2
60441 --- /dev/null
60442 +++ b/include/linux/gralloc.h
60443 @@ -0,0 +1,9 @@
60444 +#ifndef __GRALLOC_H
60445 +#define __GRALLOC_H
60446 +
60447 +void acl_free_all(void);
60448 +int acl_alloc_stack_init(unsigned long size);
60449 +void *acl_alloc(unsigned long len);
60450 +void *acl_alloc_num(unsigned long num, unsigned long len);
60451 +
60452 +#endif
60453 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60454 new file mode 100644
60455 index 0000000..b30e9bc
60456 --- /dev/null
60457 +++ b/include/linux/grdefs.h
60458 @@ -0,0 +1,140 @@
60459 +#ifndef GRDEFS_H
60460 +#define GRDEFS_H
60461 +
60462 +/* Begin grsecurity status declarations */
60463 +
60464 +enum {
60465 + GR_READY = 0x01,
60466 + GR_STATUS_INIT = 0x00 // disabled state
60467 +};
60468 +
60469 +/* Begin ACL declarations */
60470 +
60471 +/* Role flags */
60472 +
60473 +enum {
60474 + GR_ROLE_USER = 0x0001,
60475 + GR_ROLE_GROUP = 0x0002,
60476 + GR_ROLE_DEFAULT = 0x0004,
60477 + GR_ROLE_SPECIAL = 0x0008,
60478 + GR_ROLE_AUTH = 0x0010,
60479 + GR_ROLE_NOPW = 0x0020,
60480 + GR_ROLE_GOD = 0x0040,
60481 + GR_ROLE_LEARN = 0x0080,
60482 + GR_ROLE_TPE = 0x0100,
60483 + GR_ROLE_DOMAIN = 0x0200,
60484 + GR_ROLE_PAM = 0x0400,
60485 + GR_ROLE_PERSIST = 0x0800
60486 +};
60487 +
60488 +/* ACL Subject and Object mode flags */
60489 +enum {
60490 + GR_DELETED = 0x80000000
60491 +};
60492 +
60493 +/* ACL Object-only mode flags */
60494 +enum {
60495 + GR_READ = 0x00000001,
60496 + GR_APPEND = 0x00000002,
60497 + GR_WRITE = 0x00000004,
60498 + GR_EXEC = 0x00000008,
60499 + GR_FIND = 0x00000010,
60500 + GR_INHERIT = 0x00000020,
60501 + GR_SETID = 0x00000040,
60502 + GR_CREATE = 0x00000080,
60503 + GR_DELETE = 0x00000100,
60504 + GR_LINK = 0x00000200,
60505 + GR_AUDIT_READ = 0x00000400,
60506 + GR_AUDIT_APPEND = 0x00000800,
60507 + GR_AUDIT_WRITE = 0x00001000,
60508 + GR_AUDIT_EXEC = 0x00002000,
60509 + GR_AUDIT_FIND = 0x00004000,
60510 + GR_AUDIT_INHERIT= 0x00008000,
60511 + GR_AUDIT_SETID = 0x00010000,
60512 + GR_AUDIT_CREATE = 0x00020000,
60513 + GR_AUDIT_DELETE = 0x00040000,
60514 + GR_AUDIT_LINK = 0x00080000,
60515 + GR_PTRACERD = 0x00100000,
60516 + GR_NOPTRACE = 0x00200000,
60517 + GR_SUPPRESS = 0x00400000,
60518 + GR_NOLEARN = 0x00800000,
60519 + GR_INIT_TRANSFER= 0x01000000
60520 +};
60521 +
60522 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60523 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60524 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60525 +
60526 +/* ACL subject-only mode flags */
60527 +enum {
60528 + GR_KILL = 0x00000001,
60529 + GR_VIEW = 0x00000002,
60530 + GR_PROTECTED = 0x00000004,
60531 + GR_LEARN = 0x00000008,
60532 + GR_OVERRIDE = 0x00000010,
60533 + /* just a placeholder, this mode is only used in userspace */
60534 + GR_DUMMY = 0x00000020,
60535 + GR_PROTSHM = 0x00000040,
60536 + GR_KILLPROC = 0x00000080,
60537 + GR_KILLIPPROC = 0x00000100,
60538 + /* just a placeholder, this mode is only used in userspace */
60539 + GR_NOTROJAN = 0x00000200,
60540 + GR_PROTPROCFD = 0x00000400,
60541 + GR_PROCACCT = 0x00000800,
60542 + GR_RELAXPTRACE = 0x00001000,
60543 + GR_NESTED = 0x00002000,
60544 + GR_INHERITLEARN = 0x00004000,
60545 + GR_PROCFIND = 0x00008000,
60546 + GR_POVERRIDE = 0x00010000,
60547 + GR_KERNELAUTH = 0x00020000,
60548 + GR_ATSECURE = 0x00040000,
60549 + GR_SHMEXEC = 0x00080000
60550 +};
60551 +
60552 +enum {
60553 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60554 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60555 + GR_PAX_ENABLE_MPROTECT = 0x0004,
60556 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
60557 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60558 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60559 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60560 + GR_PAX_DISABLE_MPROTECT = 0x0400,
60561 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
60562 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60563 +};
60564 +
60565 +enum {
60566 + GR_ID_USER = 0x01,
60567 + GR_ID_GROUP = 0x02,
60568 +};
60569 +
60570 +enum {
60571 + GR_ID_ALLOW = 0x01,
60572 + GR_ID_DENY = 0x02,
60573 +};
60574 +
60575 +#define GR_CRASH_RES 31
60576 +#define GR_UIDTABLE_MAX 500
60577 +
60578 +/* begin resource learning section */
60579 +enum {
60580 + GR_RLIM_CPU_BUMP = 60,
60581 + GR_RLIM_FSIZE_BUMP = 50000,
60582 + GR_RLIM_DATA_BUMP = 10000,
60583 + GR_RLIM_STACK_BUMP = 1000,
60584 + GR_RLIM_CORE_BUMP = 10000,
60585 + GR_RLIM_RSS_BUMP = 500000,
60586 + GR_RLIM_NPROC_BUMP = 1,
60587 + GR_RLIM_NOFILE_BUMP = 5,
60588 + GR_RLIM_MEMLOCK_BUMP = 50000,
60589 + GR_RLIM_AS_BUMP = 500000,
60590 + GR_RLIM_LOCKS_BUMP = 2,
60591 + GR_RLIM_SIGPENDING_BUMP = 5,
60592 + GR_RLIM_MSGQUEUE_BUMP = 10000,
60593 + GR_RLIM_NICE_BUMP = 1,
60594 + GR_RLIM_RTPRIO_BUMP = 1,
60595 + GR_RLIM_RTTIME_BUMP = 1000000
60596 +};
60597 +
60598 +#endif
60599 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60600 new file mode 100644
60601 index 0000000..da390f1
60602 --- /dev/null
60603 +++ b/include/linux/grinternal.h
60604 @@ -0,0 +1,221 @@
60605 +#ifndef __GRINTERNAL_H
60606 +#define __GRINTERNAL_H
60607 +
60608 +#ifdef CONFIG_GRKERNSEC
60609 +
60610 +#include <linux/fs.h>
60611 +#include <linux/mnt_namespace.h>
60612 +#include <linux/nsproxy.h>
60613 +#include <linux/gracl.h>
60614 +#include <linux/grdefs.h>
60615 +#include <linux/grmsg.h>
60616 +
60617 +void gr_add_learn_entry(const char *fmt, ...)
60618 + __attribute__ ((format (printf, 1, 2)));
60619 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60620 + const struct vfsmount *mnt);
60621 +__u32 gr_check_create(const struct dentry *new_dentry,
60622 + const struct dentry *parent,
60623 + const struct vfsmount *mnt, const __u32 mode);
60624 +int gr_check_protected_task(const struct task_struct *task);
60625 +__u32 to_gr_audit(const __u32 reqmode);
60626 +int gr_set_acls(const int type);
60627 +int gr_apply_subject_to_task(struct task_struct *task);
60628 +int gr_acl_is_enabled(void);
60629 +char gr_roletype_to_char(void);
60630 +
60631 +void gr_handle_alertkill(struct task_struct *task);
60632 +char *gr_to_filename(const struct dentry *dentry,
60633 + const struct vfsmount *mnt);
60634 +char *gr_to_filename1(const struct dentry *dentry,
60635 + const struct vfsmount *mnt);
60636 +char *gr_to_filename2(const struct dentry *dentry,
60637 + const struct vfsmount *mnt);
60638 +char *gr_to_filename3(const struct dentry *dentry,
60639 + const struct vfsmount *mnt);
60640 +
60641 +extern int grsec_enable_ptrace_readexec;
60642 +extern int grsec_enable_harden_ptrace;
60643 +extern int grsec_enable_link;
60644 +extern int grsec_enable_fifo;
60645 +extern int grsec_enable_execve;
60646 +extern int grsec_enable_shm;
60647 +extern int grsec_enable_execlog;
60648 +extern int grsec_enable_signal;
60649 +extern int grsec_enable_audit_ptrace;
60650 +extern int grsec_enable_forkfail;
60651 +extern int grsec_enable_time;
60652 +extern int grsec_enable_rofs;
60653 +extern int grsec_enable_chroot_shmat;
60654 +extern int grsec_enable_chroot_mount;
60655 +extern int grsec_enable_chroot_double;
60656 +extern int grsec_enable_chroot_pivot;
60657 +extern int grsec_enable_chroot_chdir;
60658 +extern int grsec_enable_chroot_chmod;
60659 +extern int grsec_enable_chroot_mknod;
60660 +extern int grsec_enable_chroot_fchdir;
60661 +extern int grsec_enable_chroot_nice;
60662 +extern int grsec_enable_chroot_execlog;
60663 +extern int grsec_enable_chroot_caps;
60664 +extern int grsec_enable_chroot_sysctl;
60665 +extern int grsec_enable_chroot_unix;
60666 +extern int grsec_enable_tpe;
60667 +extern int grsec_tpe_gid;
60668 +extern int grsec_enable_tpe_all;
60669 +extern int grsec_enable_tpe_invert;
60670 +extern int grsec_enable_socket_all;
60671 +extern int grsec_socket_all_gid;
60672 +extern int grsec_enable_socket_client;
60673 +extern int grsec_socket_client_gid;
60674 +extern int grsec_enable_socket_server;
60675 +extern int grsec_socket_server_gid;
60676 +extern int grsec_audit_gid;
60677 +extern int grsec_enable_group;
60678 +extern int grsec_enable_audit_textrel;
60679 +extern int grsec_enable_log_rwxmaps;
60680 +extern int grsec_enable_mount;
60681 +extern int grsec_enable_chdir;
60682 +extern int grsec_resource_logging;
60683 +extern int grsec_enable_blackhole;
60684 +extern int grsec_lastack_retries;
60685 +extern int grsec_enable_brute;
60686 +extern int grsec_lock;
60687 +
60688 +extern spinlock_t grsec_alert_lock;
60689 +extern unsigned long grsec_alert_wtime;
60690 +extern unsigned long grsec_alert_fyet;
60691 +
60692 +extern spinlock_t grsec_audit_lock;
60693 +
60694 +extern rwlock_t grsec_exec_file_lock;
60695 +
60696 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60697 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60698 + (tsk)->exec_file->f_vfsmnt) : "/")
60699 +
60700 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60701 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60702 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60703 +
60704 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60705 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
60706 + (tsk)->exec_file->f_vfsmnt) : "/")
60707 +
60708 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60709 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60710 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60711 +
60712 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60713 +
60714 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60715 +
60716 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60717 + (task)->pid, (cred)->uid, \
60718 + (cred)->euid, (cred)->gid, (cred)->egid, \
60719 + gr_parent_task_fullpath(task), \
60720 + (task)->real_parent->comm, (task)->real_parent->pid, \
60721 + (pcred)->uid, (pcred)->euid, \
60722 + (pcred)->gid, (pcred)->egid
60723 +
60724 +#define GR_CHROOT_CAPS {{ \
60725 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60726 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60727 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60728 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60729 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60730 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60731 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60732 +
60733 +#define security_learn(normal_msg,args...) \
60734 +({ \
60735 + read_lock(&grsec_exec_file_lock); \
60736 + gr_add_learn_entry(normal_msg "\n", ## args); \
60737 + read_unlock(&grsec_exec_file_lock); \
60738 +})
60739 +
60740 +enum {
60741 + GR_DO_AUDIT,
60742 + GR_DONT_AUDIT,
60743 + /* used for non-audit messages that we shouldn't kill the task on */
60744 + GR_DONT_AUDIT_GOOD
60745 +};
60746 +
60747 +enum {
60748 + GR_TTYSNIFF,
60749 + GR_RBAC,
60750 + GR_RBAC_STR,
60751 + GR_STR_RBAC,
60752 + GR_RBAC_MODE2,
60753 + GR_RBAC_MODE3,
60754 + GR_FILENAME,
60755 + GR_SYSCTL_HIDDEN,
60756 + GR_NOARGS,
60757 + GR_ONE_INT,
60758 + GR_ONE_INT_TWO_STR,
60759 + GR_ONE_STR,
60760 + GR_STR_INT,
60761 + GR_TWO_STR_INT,
60762 + GR_TWO_INT,
60763 + GR_TWO_U64,
60764 + GR_THREE_INT,
60765 + GR_FIVE_INT_TWO_STR,
60766 + GR_TWO_STR,
60767 + GR_THREE_STR,
60768 + GR_FOUR_STR,
60769 + GR_STR_FILENAME,
60770 + GR_FILENAME_STR,
60771 + GR_FILENAME_TWO_INT,
60772 + GR_FILENAME_TWO_INT_STR,
60773 + GR_TEXTREL,
60774 + GR_PTRACE,
60775 + GR_RESOURCE,
60776 + GR_CAP,
60777 + GR_SIG,
60778 + GR_SIG2,
60779 + GR_CRASH1,
60780 + GR_CRASH2,
60781 + GR_PSACCT,
60782 + GR_RWXMAP
60783 +};
60784 +
60785 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60786 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60787 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60788 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60789 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60790 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60791 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60792 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60793 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60794 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60795 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60796 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60797 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60798 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60799 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60800 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60801 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60802 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60803 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60804 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60805 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60806 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60807 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60808 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60809 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60810 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60811 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60812 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60813 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60814 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60815 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60816 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60817 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60818 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60819 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60820 +
60821 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60822 +
60823 +#endif
60824 +
60825 +#endif
60826 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60827 new file mode 100644
60828 index 0000000..ae576a1
60829 --- /dev/null
60830 +++ b/include/linux/grmsg.h
60831 @@ -0,0 +1,109 @@
60832 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60833 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60834 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60835 +#define GR_STOPMOD_MSG "denied modification of module state by "
60836 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60837 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60838 +#define GR_IOPERM_MSG "denied use of ioperm() by "
60839 +#define GR_IOPL_MSG "denied use of iopl() by "
60840 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60841 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60842 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60843 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60844 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60845 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60846 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60847 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60848 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60849 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60850 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60851 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60852 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60853 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60854 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60855 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60856 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60857 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60858 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60859 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60860 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60861 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60862 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60863 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60864 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60865 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60866 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60867 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60868 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60869 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60870 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60871 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60872 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60873 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60874 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60875 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60876 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60877 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60878 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60879 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60880 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60881 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60882 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60883 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60884 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60885 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60886 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60887 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60888 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
60889 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
60890 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
60891 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
60892 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
60893 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
60894 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
60895 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
60896 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
60897 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
60898 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
60899 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
60900 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
60901 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
60902 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
60903 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
60904 +#define GR_NICE_CHROOT_MSG "denied priority change by "
60905 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
60906 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
60907 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
60908 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
60909 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
60910 +#define GR_TIME_MSG "time set by "
60911 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
60912 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
60913 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
60914 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
60915 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
60916 +#define GR_BIND_MSG "denied bind() by "
60917 +#define GR_CONNECT_MSG "denied connect() by "
60918 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
60919 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
60920 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
60921 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
60922 +#define GR_CAP_ACL_MSG "use of %s denied for "
60923 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
60924 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
60925 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
60926 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
60927 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
60928 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
60929 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
60930 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
60931 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
60932 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
60933 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
60934 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
60935 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
60936 +#define GR_VM86_MSG "denied use of vm86 by "
60937 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
60938 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
60939 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
60940 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
60941 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
60942 new file mode 100644
60943 index 0000000..acd05db
60944 --- /dev/null
60945 +++ b/include/linux/grsecurity.h
60946 @@ -0,0 +1,232 @@
60947 +#ifndef GR_SECURITY_H
60948 +#define GR_SECURITY_H
60949 +#include <linux/fs.h>
60950 +#include <linux/fs_struct.h>
60951 +#include <linux/binfmts.h>
60952 +#include <linux/gracl.h>
60953 +
60954 +/* notify of brain-dead configs */
60955 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60956 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
60957 +#endif
60958 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
60959 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
60960 +#endif
60961 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
60962 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
60963 +#endif
60964 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
60965 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
60966 +#endif
60967 +
60968 +#include <linux/compat.h>
60969 +
60970 +struct user_arg_ptr {
60971 +#ifdef CONFIG_COMPAT
60972 + bool is_compat;
60973 +#endif
60974 + union {
60975 + const char __user *const __user *native;
60976 +#ifdef CONFIG_COMPAT
60977 + compat_uptr_t __user *compat;
60978 +#endif
60979 + } ptr;
60980 +};
60981 +
60982 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
60983 +void gr_handle_brute_check(void);
60984 +void gr_handle_kernel_exploit(void);
60985 +int gr_process_user_ban(void);
60986 +
60987 +char gr_roletype_to_char(void);
60988 +
60989 +int gr_acl_enable_at_secure(void);
60990 +
60991 +int gr_check_user_change(int real, int effective, int fs);
60992 +int gr_check_group_change(int real, int effective, int fs);
60993 +
60994 +void gr_del_task_from_ip_table(struct task_struct *p);
60995 +
60996 +int gr_pid_is_chrooted(struct task_struct *p);
60997 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
60998 +int gr_handle_chroot_nice(void);
60999 +int gr_handle_chroot_sysctl(const int op);
61000 +int gr_handle_chroot_setpriority(struct task_struct *p,
61001 + const int niceval);
61002 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61003 +int gr_handle_chroot_chroot(const struct dentry *dentry,
61004 + const struct vfsmount *mnt);
61005 +void gr_handle_chroot_chdir(struct path *path);
61006 +int gr_handle_chroot_chmod(const struct dentry *dentry,
61007 + const struct vfsmount *mnt, const int mode);
61008 +int gr_handle_chroot_mknod(const struct dentry *dentry,
61009 + const struct vfsmount *mnt, const int mode);
61010 +int gr_handle_chroot_mount(const struct dentry *dentry,
61011 + const struct vfsmount *mnt,
61012 + const char *dev_name);
61013 +int gr_handle_chroot_pivot(void);
61014 +int gr_handle_chroot_unix(const pid_t pid);
61015 +
61016 +int gr_handle_rawio(const struct inode *inode);
61017 +
61018 +void gr_handle_ioperm(void);
61019 +void gr_handle_iopl(void);
61020 +
61021 +umode_t gr_acl_umask(void);
61022 +
61023 +int gr_tpe_allow(const struct file *file);
61024 +
61025 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61026 +void gr_clear_chroot_entries(struct task_struct *task);
61027 +
61028 +void gr_log_forkfail(const int retval);
61029 +void gr_log_timechange(void);
61030 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61031 +void gr_log_chdir(const struct dentry *dentry,
61032 + const struct vfsmount *mnt);
61033 +void gr_log_chroot_exec(const struct dentry *dentry,
61034 + const struct vfsmount *mnt);
61035 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61036 +void gr_log_remount(const char *devname, const int retval);
61037 +void gr_log_unmount(const char *devname, const int retval);
61038 +void gr_log_mount(const char *from, const char *to, const int retval);
61039 +void gr_log_textrel(struct vm_area_struct *vma);
61040 +void gr_log_rwxmmap(struct file *file);
61041 +void gr_log_rwxmprotect(struct file *file);
61042 +
61043 +int gr_handle_follow_link(const struct inode *parent,
61044 + const struct inode *inode,
61045 + const struct dentry *dentry,
61046 + const struct vfsmount *mnt);
61047 +int gr_handle_fifo(const struct dentry *dentry,
61048 + const struct vfsmount *mnt,
61049 + const struct dentry *dir, const int flag,
61050 + const int acc_mode);
61051 +int gr_handle_hardlink(const struct dentry *dentry,
61052 + const struct vfsmount *mnt,
61053 + struct inode *inode,
61054 + const int mode, const char *to);
61055 +
61056 +int gr_is_capable(const int cap);
61057 +int gr_is_capable_nolog(const int cap);
61058 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61059 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61060 +
61061 +void gr_learn_resource(const struct task_struct *task, const int limit,
61062 + const unsigned long wanted, const int gt);
61063 +void gr_copy_label(struct task_struct *tsk);
61064 +void gr_handle_crash(struct task_struct *task, const int sig);
61065 +int gr_handle_signal(const struct task_struct *p, const int sig);
61066 +int gr_check_crash_uid(const uid_t uid);
61067 +int gr_check_protected_task(const struct task_struct *task);
61068 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61069 +int gr_acl_handle_mmap(const struct file *file,
61070 + const unsigned long prot);
61071 +int gr_acl_handle_mprotect(const struct file *file,
61072 + const unsigned long prot);
61073 +int gr_check_hidden_task(const struct task_struct *tsk);
61074 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61075 + const struct vfsmount *mnt);
61076 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61077 + const struct vfsmount *mnt);
61078 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61079 + const struct vfsmount *mnt, const int fmode);
61080 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61081 + const struct vfsmount *mnt, umode_t *mode);
61082 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61083 + const struct vfsmount *mnt);
61084 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61085 + const struct vfsmount *mnt);
61086 +int gr_handle_ptrace(struct task_struct *task, const long request);
61087 +int gr_handle_proc_ptrace(struct task_struct *task);
61088 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61089 + const struct vfsmount *mnt);
61090 +int gr_check_crash_exec(const struct file *filp);
61091 +int gr_acl_is_enabled(void);
61092 +void gr_set_kernel_label(struct task_struct *task);
61093 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61094 + const gid_t gid);
61095 +int gr_set_proc_label(const struct dentry *dentry,
61096 + const struct vfsmount *mnt,
61097 + const int unsafe_flags);
61098 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61099 + const struct vfsmount *mnt);
61100 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61101 + const struct vfsmount *mnt, int acc_mode);
61102 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61103 + const struct dentry *p_dentry,
61104 + const struct vfsmount *p_mnt,
61105 + int open_flags, int acc_mode, const int imode);
61106 +void gr_handle_create(const struct dentry *dentry,
61107 + const struct vfsmount *mnt);
61108 +void gr_handle_proc_create(const struct dentry *dentry,
61109 + const struct inode *inode);
61110 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61111 + const struct dentry *parent_dentry,
61112 + const struct vfsmount *parent_mnt,
61113 + const int mode);
61114 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61115 + const struct dentry *parent_dentry,
61116 + const struct vfsmount *parent_mnt);
61117 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61118 + const struct vfsmount *mnt);
61119 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61120 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61121 + const struct vfsmount *mnt);
61122 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61123 + const struct dentry *parent_dentry,
61124 + const struct vfsmount *parent_mnt,
61125 + const char *from);
61126 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61127 + const struct dentry *parent_dentry,
61128 + const struct vfsmount *parent_mnt,
61129 + const struct dentry *old_dentry,
61130 + const struct vfsmount *old_mnt, const char *to);
61131 +int gr_acl_handle_rename(struct dentry *new_dentry,
61132 + struct dentry *parent_dentry,
61133 + const struct vfsmount *parent_mnt,
61134 + struct dentry *old_dentry,
61135 + struct inode *old_parent_inode,
61136 + struct vfsmount *old_mnt, const char *newname);
61137 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61138 + struct dentry *old_dentry,
61139 + struct dentry *new_dentry,
61140 + struct vfsmount *mnt, const __u8 replace);
61141 +__u32 gr_check_link(const struct dentry *new_dentry,
61142 + const struct dentry *parent_dentry,
61143 + const struct vfsmount *parent_mnt,
61144 + const struct dentry *old_dentry,
61145 + const struct vfsmount *old_mnt);
61146 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61147 + const unsigned int namelen, const ino_t ino);
61148 +
61149 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61150 + const struct vfsmount *mnt);
61151 +void gr_acl_handle_exit(void);
61152 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61153 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61154 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61155 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61156 +void gr_audit_ptrace(struct task_struct *task);
61157 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61158 +
61159 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61160 +
61161 +#ifdef CONFIG_GRKERNSEC
61162 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61163 +void gr_handle_vm86(void);
61164 +void gr_handle_mem_readwrite(u64 from, u64 to);
61165 +
61166 +void gr_log_badprocpid(const char *entry);
61167 +
61168 +extern int grsec_enable_dmesg;
61169 +extern int grsec_disable_privio;
61170 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61171 +extern int grsec_enable_chroot_findtask;
61172 +#endif
61173 +#ifdef CONFIG_GRKERNSEC_SETXID
61174 +extern int grsec_enable_setxid;
61175 +#endif
61176 +#endif
61177 +
61178 +#endif
61179 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61180 new file mode 100644
61181 index 0000000..e7ffaaf
61182 --- /dev/null
61183 +++ b/include/linux/grsock.h
61184 @@ -0,0 +1,19 @@
61185 +#ifndef __GRSOCK_H
61186 +#define __GRSOCK_H
61187 +
61188 +extern void gr_attach_curr_ip(const struct sock *sk);
61189 +extern int gr_handle_sock_all(const int family, const int type,
61190 + const int protocol);
61191 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61192 +extern int gr_handle_sock_server_other(const struct sock *sck);
61193 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61194 +extern int gr_search_connect(struct socket * sock,
61195 + struct sockaddr_in * addr);
61196 +extern int gr_search_bind(struct socket * sock,
61197 + struct sockaddr_in * addr);
61198 +extern int gr_search_listen(struct socket * sock);
61199 +extern int gr_search_accept(struct socket * sock);
61200 +extern int gr_search_socket(const int domain, const int type,
61201 + const int protocol);
61202 +
61203 +#endif
61204 diff --git a/include/linux/hid.h b/include/linux/hid.h
61205 index 3a95da6..51986f1 100644
61206 --- a/include/linux/hid.h
61207 +++ b/include/linux/hid.h
61208 @@ -696,7 +696,7 @@ struct hid_ll_driver {
61209 unsigned int code, int value);
61210
61211 int (*parse)(struct hid_device *hdev);
61212 -};
61213 +} __no_const;
61214
61215 #define PM_HINT_FULLON 1<<5
61216 #define PM_HINT_NORMAL 1<<1
61217 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61218 index d3999b4..1304cb4 100644
61219 --- a/include/linux/highmem.h
61220 +++ b/include/linux/highmem.h
61221 @@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
61222 kunmap_atomic(kaddr);
61223 }
61224
61225 +static inline void sanitize_highpage(struct page *page)
61226 +{
61227 + void *kaddr;
61228 + unsigned long flags;
61229 +
61230 + local_irq_save(flags);
61231 + kaddr = kmap_atomic(page);
61232 + clear_page(kaddr);
61233 + kunmap_atomic(kaddr);
61234 + local_irq_restore(flags);
61235 +}
61236 +
61237 static inline void zero_user_segments(struct page *page,
61238 unsigned start1, unsigned end1,
61239 unsigned start2, unsigned end2)
61240 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61241 index 195d8b3..e20cfab 100644
61242 --- a/include/linux/i2c.h
61243 +++ b/include/linux/i2c.h
61244 @@ -365,6 +365,7 @@ struct i2c_algorithm {
61245 /* To determine what the adapter supports */
61246 u32 (*functionality) (struct i2c_adapter *);
61247 };
61248 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61249
61250 /*
61251 * i2c_adapter is the structure used to identify a physical i2c bus along
61252 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61253 index d23c3c2..eb63c81 100644
61254 --- a/include/linux/i2o.h
61255 +++ b/include/linux/i2o.h
61256 @@ -565,7 +565,7 @@ struct i2o_controller {
61257 struct i2o_device *exec; /* Executive */
61258 #if BITS_PER_LONG == 64
61259 spinlock_t context_list_lock; /* lock for context_list */
61260 - atomic_t context_list_counter; /* needed for unique contexts */
61261 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61262 struct list_head context_list; /* list of context id's
61263 and pointers */
61264 #endif
61265 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61266 index 58404b0..439ed95 100644
61267 --- a/include/linux/if_team.h
61268 +++ b/include/linux/if_team.h
61269 @@ -64,6 +64,7 @@ struct team_mode_ops {
61270 void (*port_leave)(struct team *team, struct team_port *port);
61271 void (*port_change_mac)(struct team *team, struct team_port *port);
61272 };
61273 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61274
61275 enum team_option_type {
61276 TEAM_OPTION_TYPE_U32,
61277 @@ -112,7 +113,7 @@ struct team {
61278 struct list_head option_list;
61279
61280 const struct team_mode *mode;
61281 - struct team_mode_ops ops;
61282 + team_mode_ops_no_const ops;
61283 long mode_priv[TEAM_MODE_PRIV_LONGS];
61284 };
61285
61286 diff --git a/include/linux/init.h b/include/linux/init.h
61287 index 6b95109..4aca62c 100644
61288 --- a/include/linux/init.h
61289 +++ b/include/linux/init.h
61290 @@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
61291
61292 /* Each module must use one module_init(). */
61293 #define module_init(initfn) \
61294 - static inline initcall_t __inittest(void) \
61295 + static inline __used initcall_t __inittest(void) \
61296 { return initfn; } \
61297 int init_module(void) __attribute__((alias(#initfn)));
61298
61299 /* This is only required if you want to be unloadable. */
61300 #define module_exit(exitfn) \
61301 - static inline exitcall_t __exittest(void) \
61302 + static inline __used exitcall_t __exittest(void) \
61303 { return exitfn; } \
61304 void cleanup_module(void) __attribute__((alias(#exitfn)));
61305
61306 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61307 index e4baff5..83bb175 100644
61308 --- a/include/linux/init_task.h
61309 +++ b/include/linux/init_task.h
61310 @@ -134,6 +134,12 @@ extern struct cred init_cred;
61311
61312 #define INIT_TASK_COMM "swapper"
61313
61314 +#ifdef CONFIG_X86
61315 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61316 +#else
61317 +#define INIT_TASK_THREAD_INFO
61318 +#endif
61319 +
61320 /*
61321 * INIT_TASK is used to set up the first task table, touch at
61322 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61323 @@ -172,6 +178,7 @@ extern struct cred init_cred;
61324 RCU_INIT_POINTER(.cred, &init_cred), \
61325 .comm = INIT_TASK_COMM, \
61326 .thread = INIT_THREAD, \
61327 + INIT_TASK_THREAD_INFO \
61328 .fs = &init_fs, \
61329 .files = &init_files, \
61330 .signal = &init_signals, \
61331 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61332 index e6ca56d..8583707 100644
61333 --- a/include/linux/intel-iommu.h
61334 +++ b/include/linux/intel-iommu.h
61335 @@ -296,7 +296,7 @@ struct iommu_flush {
61336 u8 fm, u64 type);
61337 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61338 unsigned int size_order, u64 type);
61339 -};
61340 +} __no_const;
61341
61342 enum {
61343 SR_DMAR_FECTL_REG,
61344 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61345 index 2aea5d2..0b82f0c 100644
61346 --- a/include/linux/interrupt.h
61347 +++ b/include/linux/interrupt.h
61348 @@ -439,7 +439,7 @@ enum
61349 /* map softirq index to softirq name. update 'softirq_to_name' in
61350 * kernel/softirq.c when adding a new softirq.
61351 */
61352 -extern char *softirq_to_name[NR_SOFTIRQS];
61353 +extern const char * const softirq_to_name[NR_SOFTIRQS];
61354
61355 /* softirq mask and active fields moved to irq_cpustat_t in
61356 * asm/hardirq.h to get better cache usage. KAO
61357 @@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61358
61359 struct softirq_action
61360 {
61361 - void (*action)(struct softirq_action *);
61362 + void (*action)(void);
61363 };
61364
61365 asmlinkage void do_softirq(void);
61366 asmlinkage void __do_softirq(void);
61367 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61368 +extern void open_softirq(int nr, void (*action)(void));
61369 extern void softirq_init(void);
61370 extern void __raise_softirq_irqoff(unsigned int nr);
61371
61372 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61373 index 3875719..4cd454c 100644
61374 --- a/include/linux/kallsyms.h
61375 +++ b/include/linux/kallsyms.h
61376 @@ -15,7 +15,8 @@
61377
61378 struct module;
61379
61380 -#ifdef CONFIG_KALLSYMS
61381 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61382 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61383 /* Lookup the address for a symbol. Returns 0 if not found. */
61384 unsigned long kallsyms_lookup_name(const char *name);
61385
61386 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61387 /* Stupid that this does nothing, but I didn't create this mess. */
61388 #define __print_symbol(fmt, addr)
61389 #endif /*CONFIG_KALLSYMS*/
61390 +#else /* when included by kallsyms.c, vsnprintf.c, or
61391 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61392 +extern void __print_symbol(const char *fmt, unsigned long address);
61393 +extern int sprint_backtrace(char *buffer, unsigned long address);
61394 +extern int sprint_symbol(char *buffer, unsigned long address);
61395 +const char *kallsyms_lookup(unsigned long addr,
61396 + unsigned long *symbolsize,
61397 + unsigned long *offset,
61398 + char **modname, char *namebuf);
61399 +#endif
61400
61401 /* This macro allows us to keep printk typechecking */
61402 static __printf(1, 2)
61403 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61404 index c4d2fc1..5df9c19 100644
61405 --- a/include/linux/kgdb.h
61406 +++ b/include/linux/kgdb.h
61407 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61408 extern int kgdb_io_module_registered;
61409
61410 extern atomic_t kgdb_setting_breakpoint;
61411 -extern atomic_t kgdb_cpu_doing_single_step;
61412 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61413
61414 extern struct task_struct *kgdb_usethread;
61415 extern struct task_struct *kgdb_contthread;
61416 @@ -252,7 +252,7 @@ struct kgdb_arch {
61417 void (*disable_hw_break)(struct pt_regs *regs);
61418 void (*remove_all_hw_break)(void);
61419 void (*correct_hw_break)(void);
61420 -};
61421 +} __do_const;
61422
61423 /**
61424 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61425 @@ -277,7 +277,7 @@ struct kgdb_io {
61426 void (*pre_exception) (void);
61427 void (*post_exception) (void);
61428 int is_console;
61429 -};
61430 +} __do_const;
61431
61432 extern struct kgdb_arch arch_kgdb_ops;
61433
61434 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61435 index dd99c32..da06047 100644
61436 --- a/include/linux/kmod.h
61437 +++ b/include/linux/kmod.h
61438 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61439 * usually useless though. */
61440 extern __printf(2, 3)
61441 int __request_module(bool wait, const char *name, ...);
61442 +extern __printf(3, 4)
61443 +int ___request_module(bool wait, char *param_name, const char *name, ...);
61444 #define request_module(mod...) __request_module(true, mod)
61445 #define request_module_nowait(mod...) __request_module(false, mod)
61446 #define try_then_request_module(x, mod...) \
61447 diff --git a/include/linux/kref.h b/include/linux/kref.h
61448 index 9c07dce..a92fa71 100644
61449 --- a/include/linux/kref.h
61450 +++ b/include/linux/kref.h
61451 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
61452 static inline int kref_sub(struct kref *kref, unsigned int count,
61453 void (*release)(struct kref *kref))
61454 {
61455 - WARN_ON(release == NULL);
61456 + BUG_ON(release == NULL);
61457
61458 if (atomic_sub_and_test((int) count, &kref->refcount)) {
61459 release(kref);
61460 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61461 index 72cbf08..dd0201d 100644
61462 --- a/include/linux/kvm_host.h
61463 +++ b/include/linux/kvm_host.h
61464 @@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61465 void vcpu_load(struct kvm_vcpu *vcpu);
61466 void vcpu_put(struct kvm_vcpu *vcpu);
61467
61468 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61469 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61470 struct module *module);
61471 void kvm_exit(void);
61472
61473 @@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61474 struct kvm_guest_debug *dbg);
61475 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61476
61477 -int kvm_arch_init(void *opaque);
61478 +int kvm_arch_init(const void *opaque);
61479 void kvm_arch_exit(void);
61480
61481 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61482 diff --git a/include/linux/libata.h b/include/linux/libata.h
61483 index e926df7..1713bd8 100644
61484 --- a/include/linux/libata.h
61485 +++ b/include/linux/libata.h
61486 @@ -909,7 +909,7 @@ struct ata_port_operations {
61487 * fields must be pointers.
61488 */
61489 const struct ata_port_operations *inherits;
61490 -};
61491 +} __do_const;
61492
61493 struct ata_port_info {
61494 unsigned long flags;
61495 diff --git a/include/linux/mca.h b/include/linux/mca.h
61496 index 3797270..7765ede 100644
61497 --- a/include/linux/mca.h
61498 +++ b/include/linux/mca.h
61499 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61500 int region);
61501 void * (*mca_transform_memory)(struct mca_device *,
61502 void *memory);
61503 -};
61504 +} __no_const;
61505
61506 struct mca_bus {
61507 u64 default_dma_mask;
61508 diff --git a/include/linux/memory.h b/include/linux/memory.h
61509 index 1ac7f6e..a5794d0 100644
61510 --- a/include/linux/memory.h
61511 +++ b/include/linux/memory.h
61512 @@ -143,7 +143,7 @@ struct memory_accessor {
61513 size_t count);
61514 ssize_t (*write)(struct memory_accessor *, const char *buf,
61515 off_t offset, size_t count);
61516 -};
61517 +} __no_const;
61518
61519 /*
61520 * Kernel text modification mutex, used for code patching. Users of this lock
61521 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61522 index ee96cd5..7823c3a 100644
61523 --- a/include/linux/mfd/abx500.h
61524 +++ b/include/linux/mfd/abx500.h
61525 @@ -455,6 +455,7 @@ struct abx500_ops {
61526 int (*event_registers_startup_state_get) (struct device *, u8 *);
61527 int (*startup_irq_enabled) (struct device *, unsigned int);
61528 };
61529 +typedef struct abx500_ops __no_const abx500_ops_no_const;
61530
61531 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61532 void abx500_remove_ops(struct device *dev);
61533 diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
61534 index 9b07725..3d55001 100644
61535 --- a/include/linux/mfd/abx500/ux500_chargalg.h
61536 +++ b/include/linux/mfd/abx500/ux500_chargalg.h
61537 @@ -19,7 +19,7 @@ struct ux500_charger_ops {
61538 int (*enable) (struct ux500_charger *, int, int, int);
61539 int (*kick_wd) (struct ux500_charger *);
61540 int (*update_curr) (struct ux500_charger *, int);
61541 -};
61542 +} __no_const;
61543
61544 /**
61545 * struct ux500_charger - power supply ux500 charger sub class
61546 diff --git a/include/linux/mm.h b/include/linux/mm.h
61547 index 74aa71b..4ae97ba 100644
61548 --- a/include/linux/mm.h
61549 +++ b/include/linux/mm.h
61550 @@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
61551
61552 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61553 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61554 +
61555 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61556 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61557 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61558 +#else
61559 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61560 +#endif
61561 +
61562 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61563 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61564
61565 @@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
61566 int set_page_dirty_lock(struct page *page);
61567 int clear_page_dirty_for_io(struct page *page);
61568
61569 -/* Is the vma a continuation of the stack vma above it? */
61570 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61571 -{
61572 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61573 -}
61574 -
61575 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
61576 - unsigned long addr)
61577 -{
61578 - return (vma->vm_flags & VM_GROWSDOWN) &&
61579 - (vma->vm_start == addr) &&
61580 - !vma_growsdown(vma->vm_prev, addr);
61581 -}
61582 -
61583 -/* Is the vma a continuation of the stack vma below it? */
61584 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61585 -{
61586 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61587 -}
61588 -
61589 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
61590 - unsigned long addr)
61591 -{
61592 - return (vma->vm_flags & VM_GROWSUP) &&
61593 - (vma->vm_end == addr) &&
61594 - !vma_growsup(vma->vm_next, addr);
61595 -}
61596 -
61597 extern pid_t
61598 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
61599
61600 @@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
61601 }
61602 #endif
61603
61604 +#ifdef CONFIG_MMU
61605 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61606 +#else
61607 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61608 +{
61609 + return __pgprot(0);
61610 +}
61611 +#endif
61612 +
61613 int vma_wants_writenotify(struct vm_area_struct *vma);
61614
61615 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61616 @@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
61617 {
61618 return 0;
61619 }
61620 +
61621 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
61622 + unsigned long address)
61623 +{
61624 + return 0;
61625 +}
61626 #else
61627 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61628 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61629 #endif
61630
61631 #ifdef __PAGETABLE_PMD_FOLDED
61632 @@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
61633 {
61634 return 0;
61635 }
61636 +
61637 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
61638 + unsigned long address)
61639 +{
61640 + return 0;
61641 +}
61642 #else
61643 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
61644 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
61645 #endif
61646
61647 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
61648 @@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
61649 NULL: pud_offset(pgd, address);
61650 }
61651
61652 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
61653 +{
61654 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
61655 + NULL: pud_offset(pgd, address);
61656 +}
61657 +
61658 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
61659 {
61660 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
61661 NULL: pmd_offset(pud, address);
61662 }
61663 +
61664 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
61665 +{
61666 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
61667 + NULL: pmd_offset(pud, address);
61668 +}
61669 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
61670
61671 #if USE_SPLIT_PTLOCKS
61672 @@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
61673 unsigned long, unsigned long,
61674 unsigned long, unsigned long);
61675 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61676 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61677
61678 /* These take the mm semaphore themselves */
61679 extern unsigned long vm_brk(unsigned long, unsigned long);
61680 @@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61681 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61682 struct vm_area_struct **pprev);
61683
61684 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61685 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61686 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61687 +
61688 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61689 NULL if none. Assume start_addr < end_addr. */
61690 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61691 @@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
61692 return vma;
61693 }
61694
61695 -#ifdef CONFIG_MMU
61696 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
61697 -#else
61698 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61699 -{
61700 - return __pgprot(0);
61701 -}
61702 -#endif
61703 -
61704 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61705 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61706 unsigned long pfn, unsigned long size, pgprot_t);
61707 @@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long pfn);
61708 extern int sysctl_memory_failure_early_kill;
61709 extern int sysctl_memory_failure_recovery;
61710 extern void shake_page(struct page *p, int access);
61711 -extern atomic_long_t mce_bad_pages;
61712 +extern atomic_long_unchecked_t mce_bad_pages;
61713 extern int soft_offline_page(struct page *page, int flags);
61714
61715 extern void dump_page(struct page *page);
61716 @@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
61717 static inline bool page_is_guard(struct page *page) { return false; }
61718 #endif /* CONFIG_DEBUG_PAGEALLOC */
61719
61720 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61721 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61722 +#else
61723 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61724 +#endif
61725 +
61726 #endif /* __KERNEL__ */
61727 #endif /* _LINUX_MM_H */
61728 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61729 index 3cc3062..efeaeb7 100644
61730 --- a/include/linux/mm_types.h
61731 +++ b/include/linux/mm_types.h
61732 @@ -252,6 +252,8 @@ struct vm_area_struct {
61733 #ifdef CONFIG_NUMA
61734 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61735 #endif
61736 +
61737 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61738 };
61739
61740 struct core_thread {
61741 @@ -326,7 +328,7 @@ struct mm_struct {
61742 unsigned long def_flags;
61743 unsigned long nr_ptes; /* Page table pages */
61744 unsigned long start_code, end_code, start_data, end_data;
61745 - unsigned long start_brk, brk, start_stack;
61746 + unsigned long brk_gap, start_brk, brk, start_stack;
61747 unsigned long arg_start, arg_end, env_start, env_end;
61748
61749 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
61750 @@ -388,6 +390,24 @@ struct mm_struct {
61751 #ifdef CONFIG_CPUMASK_OFFSTACK
61752 struct cpumask cpumask_allocation;
61753 #endif
61754 +
61755 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
61756 + unsigned long pax_flags;
61757 +#endif
61758 +
61759 +#ifdef CONFIG_PAX_DLRESOLVE
61760 + unsigned long call_dl_resolve;
61761 +#endif
61762 +
61763 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61764 + unsigned long call_syscall;
61765 +#endif
61766 +
61767 +#ifdef CONFIG_PAX_ASLR
61768 + unsigned long delta_mmap; /* randomized offset */
61769 + unsigned long delta_stack; /* randomized offset */
61770 +#endif
61771 +
61772 };
61773
61774 static inline void mm_init_cpumask(struct mm_struct *mm)
61775 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61776 index 1d1b1e1..2a13c78 100644
61777 --- a/include/linux/mmu_notifier.h
61778 +++ b/include/linux/mmu_notifier.h
61779 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61780 */
61781 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61782 ({ \
61783 - pte_t __pte; \
61784 + pte_t ___pte; \
61785 struct vm_area_struct *___vma = __vma; \
61786 unsigned long ___address = __address; \
61787 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61788 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61789 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61790 - __pte; \
61791 + ___pte; \
61792 })
61793
61794 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61795 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61796 index dff7115..0e001c8 100644
61797 --- a/include/linux/mmzone.h
61798 +++ b/include/linux/mmzone.h
61799 @@ -380,7 +380,7 @@ struct zone {
61800 unsigned long flags; /* zone flags, see below */
61801
61802 /* Zone statistics */
61803 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61804 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61805
61806 /*
61807 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61808 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61809 index 501da4c..ba79bb4 100644
61810 --- a/include/linux/mod_devicetable.h
61811 +++ b/include/linux/mod_devicetable.h
61812 @@ -12,7 +12,7 @@
61813 typedef unsigned long kernel_ulong_t;
61814 #endif
61815
61816 -#define PCI_ANY_ID (~0)
61817 +#define PCI_ANY_ID ((__u16)~0)
61818
61819 struct pci_device_id {
61820 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61821 @@ -131,7 +131,7 @@ struct usb_device_id {
61822 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61823 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61824
61825 -#define HID_ANY_ID (~0)
61826 +#define HID_ANY_ID (~0U)
61827
61828 struct hid_device_id {
61829 __u16 bus;
61830 diff --git a/include/linux/module.h b/include/linux/module.h
61831 index fbcafe2..e5d9587 100644
61832 --- a/include/linux/module.h
61833 +++ b/include/linux/module.h
61834 @@ -17,6 +17,7 @@
61835 #include <linux/moduleparam.h>
61836 #include <linux/tracepoint.h>
61837 #include <linux/export.h>
61838 +#include <linux/fs.h>
61839
61840 #include <linux/percpu.h>
61841 #include <asm/module.h>
61842 @@ -273,19 +274,16 @@ struct module
61843 int (*init)(void);
61844
61845 /* If this is non-NULL, vfree after init() returns */
61846 - void *module_init;
61847 + void *module_init_rx, *module_init_rw;
61848
61849 /* Here is the actual code + data, vfree'd on unload. */
61850 - void *module_core;
61851 + void *module_core_rx, *module_core_rw;
61852
61853 /* Here are the sizes of the init and core sections */
61854 - unsigned int init_size, core_size;
61855 + unsigned int init_size_rw, core_size_rw;
61856
61857 /* The size of the executable code in each section. */
61858 - unsigned int init_text_size, core_text_size;
61859 -
61860 - /* Size of RO sections of the module (text+rodata) */
61861 - unsigned int init_ro_size, core_ro_size;
61862 + unsigned int init_size_rx, core_size_rx;
61863
61864 /* Arch-specific module values */
61865 struct mod_arch_specific arch;
61866 @@ -341,6 +339,10 @@ struct module
61867 #ifdef CONFIG_EVENT_TRACING
61868 struct ftrace_event_call **trace_events;
61869 unsigned int num_trace_events;
61870 + struct file_operations trace_id;
61871 + struct file_operations trace_enable;
61872 + struct file_operations trace_format;
61873 + struct file_operations trace_filter;
61874 #endif
61875 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61876 unsigned int num_ftrace_callsites;
61877 @@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
61878 bool is_module_percpu_address(unsigned long addr);
61879 bool is_module_text_address(unsigned long addr);
61880
61881 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61882 +{
61883 +
61884 +#ifdef CONFIG_PAX_KERNEXEC
61885 + if (ktla_ktva(addr) >= (unsigned long)start &&
61886 + ktla_ktva(addr) < (unsigned long)start + size)
61887 + return 1;
61888 +#endif
61889 +
61890 + return ((void *)addr >= start && (void *)addr < start + size);
61891 +}
61892 +
61893 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
61894 +{
61895 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
61896 +}
61897 +
61898 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
61899 +{
61900 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
61901 +}
61902 +
61903 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
61904 +{
61905 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
61906 +}
61907 +
61908 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
61909 +{
61910 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
61911 +}
61912 +
61913 static inline int within_module_core(unsigned long addr, struct module *mod)
61914 {
61915 - return (unsigned long)mod->module_core <= addr &&
61916 - addr < (unsigned long)mod->module_core + mod->core_size;
61917 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
61918 }
61919
61920 static inline int within_module_init(unsigned long addr, struct module *mod)
61921 {
61922 - return (unsigned long)mod->module_init <= addr &&
61923 - addr < (unsigned long)mod->module_init + mod->init_size;
61924 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
61925 }
61926
61927 /* Search for module by name: must hold module_mutex. */
61928 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
61929 index b2be02e..72d2f78 100644
61930 --- a/include/linux/moduleloader.h
61931 +++ b/include/linux/moduleloader.h
61932 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
61933
61934 /* Allocator used for allocating struct module, core sections and init
61935 sections. Returns NULL on failure. */
61936 -void *module_alloc(unsigned long size);
61937 +void *module_alloc(unsigned long size) __size_overflow(1);
61938 +
61939 +#ifdef CONFIG_PAX_KERNEXEC
61940 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
61941 +#else
61942 +#define module_alloc_exec(x) module_alloc(x)
61943 +#endif
61944
61945 /* Free memory returned from module_alloc. */
61946 void module_free(struct module *mod, void *module_region);
61947
61948 +#ifdef CONFIG_PAX_KERNEXEC
61949 +void module_free_exec(struct module *mod, void *module_region);
61950 +#else
61951 +#define module_free_exec(x, y) module_free((x), (y))
61952 +#endif
61953 +
61954 /* Apply the given relocation to the (simplified) ELF. Return -error
61955 or 0. */
61956 int apply_relocate(Elf_Shdr *sechdrs,
61957 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
61958 index ea36486..91e70f4 100644
61959 --- a/include/linux/moduleparam.h
61960 +++ b/include/linux/moduleparam.h
61961 @@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
61962 * @len is usually just sizeof(string).
61963 */
61964 #define module_param_string(name, string, len, perm) \
61965 - static const struct kparam_string __param_string_##name \
61966 + static const struct kparam_string __param_string_##name __used \
61967 = { len, string }; \
61968 __module_param_call(MODULE_PARAM_PREFIX, name, \
61969 &param_ops_string, \
61970 @@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
61971 */
61972 #define module_param_array_named(name, array, type, nump, perm) \
61973 param_check_##type(name, &(array)[0]); \
61974 - static const struct kparam_array __param_arr_##name \
61975 + static const struct kparam_array __param_arr_##name __used \
61976 = { .max = ARRAY_SIZE(array), .num = nump, \
61977 .ops = &param_ops_##type, \
61978 .elemsize = sizeof(array[0]), .elem = array }; \
61979 diff --git a/include/linux/namei.h b/include/linux/namei.h
61980 index ffc0213..2c1f2cb 100644
61981 --- a/include/linux/namei.h
61982 +++ b/include/linux/namei.h
61983 @@ -24,7 +24,7 @@ struct nameidata {
61984 unsigned seq;
61985 int last_type;
61986 unsigned depth;
61987 - char *saved_names[MAX_NESTED_LINKS + 1];
61988 + const char *saved_names[MAX_NESTED_LINKS + 1];
61989
61990 /* Intent data */
61991 union {
61992 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
61993 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
61994 extern void unlock_rename(struct dentry *, struct dentry *);
61995
61996 -static inline void nd_set_link(struct nameidata *nd, char *path)
61997 +static inline void nd_set_link(struct nameidata *nd, const char *path)
61998 {
61999 nd->saved_names[nd->depth] = path;
62000 }
62001
62002 -static inline char *nd_get_link(struct nameidata *nd)
62003 +static inline const char *nd_get_link(const struct nameidata *nd)
62004 {
62005 return nd->saved_names[nd->depth];
62006 }
62007 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62008 index 33900a5..2072000 100644
62009 --- a/include/linux/netdevice.h
62010 +++ b/include/linux/netdevice.h
62011 @@ -1003,6 +1003,7 @@ struct net_device_ops {
62012 int (*ndo_neigh_construct)(struct neighbour *n);
62013 void (*ndo_neigh_destroy)(struct neighbour *n);
62014 };
62015 +typedef struct net_device_ops __no_const net_device_ops_no_const;
62016
62017 /*
62018 * The DEVICE structure.
62019 @@ -1064,7 +1065,7 @@ struct net_device {
62020 int iflink;
62021
62022 struct net_device_stats stats;
62023 - atomic_long_t rx_dropped; /* dropped packets by core network
62024 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62025 * Do not use this in drivers.
62026 */
62027
62028 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62029 new file mode 100644
62030 index 0000000..33f4af8
62031 --- /dev/null
62032 +++ b/include/linux/netfilter/xt_gradm.h
62033 @@ -0,0 +1,9 @@
62034 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
62035 +#define _LINUX_NETFILTER_XT_GRADM_H 1
62036 +
62037 +struct xt_gradm_mtinfo {
62038 + __u16 flags;
62039 + __u16 invflags;
62040 +};
62041 +
62042 +#endif
62043 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62044 index c65a18a..0c05f3a 100644
62045 --- a/include/linux/of_pdt.h
62046 +++ b/include/linux/of_pdt.h
62047 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62048
62049 /* return 0 on success; fill in 'len' with number of bytes in path */
62050 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62051 -};
62052 +} __no_const;
62053
62054 extern void *prom_early_alloc(unsigned long size);
62055
62056 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62057 index a4c5624..79d6d88 100644
62058 --- a/include/linux/oprofile.h
62059 +++ b/include/linux/oprofile.h
62060 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62061 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62062 char const * name, ulong * val);
62063
62064 -/** Create a file for read-only access to an atomic_t. */
62065 +/** Create a file for read-only access to an atomic_unchecked_t. */
62066 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62067 - char const * name, atomic_t * val);
62068 + char const * name, atomic_unchecked_t * val);
62069
62070 /** create a directory */
62071 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62072 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62073 index ddbb6a9..be1680e 100644
62074 --- a/include/linux/perf_event.h
62075 +++ b/include/linux/perf_event.h
62076 @@ -879,8 +879,8 @@ struct perf_event {
62077
62078 enum perf_event_active_state state;
62079 unsigned int attach_state;
62080 - local64_t count;
62081 - atomic64_t child_count;
62082 + local64_t count; /* PaX: fix it one day */
62083 + atomic64_unchecked_t child_count;
62084
62085 /*
62086 * These are the total time in nanoseconds that the event
62087 @@ -931,8 +931,8 @@ struct perf_event {
62088 * These accumulate total time (in nanoseconds) that children
62089 * events have been enabled and running, respectively.
62090 */
62091 - atomic64_t child_total_time_enabled;
62092 - atomic64_t child_total_time_running;
62093 + atomic64_unchecked_t child_total_time_enabled;
62094 + atomic64_unchecked_t child_total_time_running;
62095
62096 /*
62097 * Protect attach/detach and child_list:
62098 diff --git a/include/linux/personality.h b/include/linux/personality.h
62099 index 8fc7dd1a..c19d89e 100644
62100 --- a/include/linux/personality.h
62101 +++ b/include/linux/personality.h
62102 @@ -44,6 +44,7 @@ enum {
62103 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62104 ADDR_NO_RANDOMIZE | \
62105 ADDR_COMPAT_LAYOUT | \
62106 + ADDR_LIMIT_3GB | \
62107 MMAP_PAGE_ZERO)
62108
62109 /*
62110 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62111 index e1ac1ce..0675fed 100644
62112 --- a/include/linux/pipe_fs_i.h
62113 +++ b/include/linux/pipe_fs_i.h
62114 @@ -45,9 +45,9 @@ struct pipe_buffer {
62115 struct pipe_inode_info {
62116 wait_queue_head_t wait;
62117 unsigned int nrbufs, curbuf, buffers;
62118 - unsigned int readers;
62119 - unsigned int writers;
62120 - unsigned int waiting_writers;
62121 + atomic_t readers;
62122 + atomic_t writers;
62123 + atomic_t waiting_writers;
62124 unsigned int r_counter;
62125 unsigned int w_counter;
62126 struct page *tmp_page;
62127 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62128 index 609daae..5392427 100644
62129 --- a/include/linux/pm_runtime.h
62130 +++ b/include/linux/pm_runtime.h
62131 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62132
62133 static inline void pm_runtime_mark_last_busy(struct device *dev)
62134 {
62135 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62136 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62137 }
62138
62139 #else /* !CONFIG_PM_RUNTIME */
62140 diff --git a/include/linux/poison.h b/include/linux/poison.h
62141 index 2110a81..13a11bb 100644
62142 --- a/include/linux/poison.h
62143 +++ b/include/linux/poison.h
62144 @@ -19,8 +19,8 @@
62145 * under normal circumstances, used to verify that nobody uses
62146 * non-initialized list entries.
62147 */
62148 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62149 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62150 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62151 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62152
62153 /********** include/linux/timer.h **********/
62154 /*
62155 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62156 index 5a710b9..0b0dab9 100644
62157 --- a/include/linux/preempt.h
62158 +++ b/include/linux/preempt.h
62159 @@ -126,7 +126,7 @@ struct preempt_ops {
62160 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62161 void (*sched_out)(struct preempt_notifier *notifier,
62162 struct task_struct *next);
62163 -};
62164 +} __no_const;
62165
62166 /**
62167 * preempt_notifier - key for installing preemption notifiers
62168 diff --git a/include/linux/printk.h b/include/linux/printk.h
62169 index 0525927..a5388b6 100644
62170 --- a/include/linux/printk.h
62171 +++ b/include/linux/printk.h
62172 @@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
62173 extern int printk_needs_cpu(int cpu);
62174 extern void printk_tick(void);
62175
62176 +extern int kptr_restrict;
62177 +
62178 #ifdef CONFIG_PRINTK
62179 asmlinkage __printf(1, 0)
62180 int vprintk(const char *fmt, va_list args);
62181 @@ -117,7 +119,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
62182
62183 extern int printk_delay_msec;
62184 extern int dmesg_restrict;
62185 -extern int kptr_restrict;
62186
62187 void log_buf_kexec_setup(void);
62188 void __init setup_log_buf(int early);
62189 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62190 index 85c5073..51fac8b 100644
62191 --- a/include/linux/proc_fs.h
62192 +++ b/include/linux/proc_fs.h
62193 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62194 return proc_create_data(name, mode, parent, proc_fops, NULL);
62195 }
62196
62197 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62198 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62199 +{
62200 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62201 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62202 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62203 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62204 +#else
62205 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62206 +#endif
62207 +}
62208 +
62209 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62210 umode_t mode, struct proc_dir_entry *base,
62211 read_proc_t *read_proc, void * data)
62212 @@ -258,7 +270,7 @@ union proc_op {
62213 int (*proc_show)(struct seq_file *m,
62214 struct pid_namespace *ns, struct pid *pid,
62215 struct task_struct *task);
62216 -};
62217 +} __no_const;
62218
62219 struct ctl_table_header;
62220 struct ctl_table;
62221 diff --git a/include/linux/random.h b/include/linux/random.h
62222 index 8f74538..02a1012 100644
62223 --- a/include/linux/random.h
62224 +++ b/include/linux/random.h
62225 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
62226
62227 u32 prandom32(struct rnd_state *);
62228
62229 +static inline unsigned long pax_get_random_long(void)
62230 +{
62231 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62232 +}
62233 +
62234 /*
62235 * Handle minimum values for seeds
62236 */
62237 static inline u32 __seed(u32 x, u32 m)
62238 {
62239 - return (x < m) ? x + m : x;
62240 + return (x <= m) ? x + m + 1 : x;
62241 }
62242
62243 /**
62244 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62245 index e0879a7..a12f962 100644
62246 --- a/include/linux/reboot.h
62247 +++ b/include/linux/reboot.h
62248 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62249 * Architecture-specific implementations of sys_reboot commands.
62250 */
62251
62252 -extern void machine_restart(char *cmd);
62253 -extern void machine_halt(void);
62254 -extern void machine_power_off(void);
62255 +extern void machine_restart(char *cmd) __noreturn;
62256 +extern void machine_halt(void) __noreturn;
62257 +extern void machine_power_off(void) __noreturn;
62258
62259 extern void machine_shutdown(void);
62260 struct pt_regs;
62261 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62262 */
62263
62264 extern void kernel_restart_prepare(char *cmd);
62265 -extern void kernel_restart(char *cmd);
62266 -extern void kernel_halt(void);
62267 -extern void kernel_power_off(void);
62268 +extern void kernel_restart(char *cmd) __noreturn;
62269 +extern void kernel_halt(void) __noreturn;
62270 +extern void kernel_power_off(void) __noreturn;
62271
62272 extern int C_A_D; /* for sysctl */
62273 void ctrl_alt_del(void);
62274 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62275 * Emergency restart, callable from an interrupt handler.
62276 */
62277
62278 -extern void emergency_restart(void);
62279 +extern void emergency_restart(void) __noreturn;
62280 #include <asm/emergency-restart.h>
62281
62282 #endif
62283 diff --git a/include/linux/relay.h b/include/linux/relay.h
62284 index 91cacc3..b55ff74 100644
62285 --- a/include/linux/relay.h
62286 +++ b/include/linux/relay.h
62287 @@ -160,7 +160,7 @@ struct rchan_callbacks
62288 * The callback should return 0 if successful, negative if not.
62289 */
62290 int (*remove_buf_file)(struct dentry *dentry);
62291 -};
62292 +} __no_const;
62293
62294 /*
62295 * CONFIG_RELAY kernel API, kernel/relay.c
62296 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62297 index 6fdf027..ff72610 100644
62298 --- a/include/linux/rfkill.h
62299 +++ b/include/linux/rfkill.h
62300 @@ -147,6 +147,7 @@ struct rfkill_ops {
62301 void (*query)(struct rfkill *rfkill, void *data);
62302 int (*set_block)(void *data, bool blocked);
62303 };
62304 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62305
62306 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62307 /**
62308 diff --git a/include/linux/rio.h b/include/linux/rio.h
62309 index 4d50611..c6858a2 100644
62310 --- a/include/linux/rio.h
62311 +++ b/include/linux/rio.h
62312 @@ -315,7 +315,7 @@ struct rio_ops {
62313 int mbox, void *buffer, size_t len);
62314 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62315 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62316 -};
62317 +} __no_const;
62318
62319 #define RIO_RESOURCE_MEM 0x00000100
62320 #define RIO_RESOURCE_DOORBELL 0x00000200
62321 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62322 index fd07c45..4676b8e 100644
62323 --- a/include/linux/rmap.h
62324 +++ b/include/linux/rmap.h
62325 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62326 void anon_vma_init(void); /* create anon_vma_cachep */
62327 int anon_vma_prepare(struct vm_area_struct *);
62328 void unlink_anon_vmas(struct vm_area_struct *);
62329 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62330 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62331 void anon_vma_moveto_tail(struct vm_area_struct *);
62332 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62333 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62334
62335 static inline void anon_vma_merge(struct vm_area_struct *vma,
62336 struct vm_area_struct *next)
62337 diff --git a/include/linux/sched.h b/include/linux/sched.h
62338 index 81a173c..85ccd8f 100644
62339 --- a/include/linux/sched.h
62340 +++ b/include/linux/sched.h
62341 @@ -100,6 +100,7 @@ struct bio_list;
62342 struct fs_struct;
62343 struct perf_event_context;
62344 struct blk_plug;
62345 +struct linux_binprm;
62346
62347 /*
62348 * List of flags we want to share for kernel threads,
62349 @@ -382,10 +383,13 @@ struct user_namespace;
62350 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62351
62352 extern int sysctl_max_map_count;
62353 +extern unsigned long sysctl_heap_stack_gap;
62354
62355 #include <linux/aio.h>
62356
62357 #ifdef CONFIG_MMU
62358 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62359 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62360 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62361 extern unsigned long
62362 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62363 @@ -643,6 +647,17 @@ struct signal_struct {
62364 #ifdef CONFIG_TASKSTATS
62365 struct taskstats *stats;
62366 #endif
62367 +
62368 +#ifdef CONFIG_GRKERNSEC
62369 + u32 curr_ip;
62370 + u32 saved_ip;
62371 + u32 gr_saddr;
62372 + u32 gr_daddr;
62373 + u16 gr_sport;
62374 + u16 gr_dport;
62375 + u8 used_accept:1;
62376 +#endif
62377 +
62378 #ifdef CONFIG_AUDIT
62379 unsigned audit_tty;
62380 struct tty_audit_buf *tty_audit_buf;
62381 @@ -726,6 +741,11 @@ struct user_struct {
62382 struct key *session_keyring; /* UID's default session keyring */
62383 #endif
62384
62385 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62386 + unsigned int banned;
62387 + unsigned long ban_expires;
62388 +#endif
62389 +
62390 /* Hash table maintenance information */
62391 struct hlist_node uidhash_node;
62392 uid_t uid;
62393 @@ -1386,8 +1406,8 @@ struct task_struct {
62394 struct list_head thread_group;
62395
62396 struct completion *vfork_done; /* for vfork() */
62397 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62398 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62399 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62400 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62401
62402 cputime_t utime, stime, utimescaled, stimescaled;
62403 cputime_t gtime;
62404 @@ -1403,13 +1423,6 @@ struct task_struct {
62405 struct task_cputime cputime_expires;
62406 struct list_head cpu_timers[3];
62407
62408 -/* process credentials */
62409 - const struct cred __rcu *real_cred; /* objective and real subjective task
62410 - * credentials (COW) */
62411 - const struct cred __rcu *cred; /* effective (overridable) subjective task
62412 - * credentials (COW) */
62413 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62414 -
62415 char comm[TASK_COMM_LEN]; /* executable name excluding path
62416 - access with [gs]et_task_comm (which lock
62417 it with task_lock())
62418 @@ -1426,8 +1439,16 @@ struct task_struct {
62419 #endif
62420 /* CPU-specific state of this task */
62421 struct thread_struct thread;
62422 +/* thread_info moved to task_struct */
62423 +#ifdef CONFIG_X86
62424 + struct thread_info tinfo;
62425 +#endif
62426 /* filesystem information */
62427 struct fs_struct *fs;
62428 +
62429 + const struct cred __rcu *cred; /* effective (overridable) subjective task
62430 + * credentials (COW) */
62431 +
62432 /* open file information */
62433 struct files_struct *files;
62434 /* namespaces */
62435 @@ -1469,6 +1490,11 @@ struct task_struct {
62436 struct rt_mutex_waiter *pi_blocked_on;
62437 #endif
62438
62439 +/* process credentials */
62440 + const struct cred __rcu *real_cred; /* objective and real subjective task
62441 + * credentials (COW) */
62442 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62443 +
62444 #ifdef CONFIG_DEBUG_MUTEXES
62445 /* mutex deadlock detection */
62446 struct mutex_waiter *blocked_on;
62447 @@ -1585,6 +1611,27 @@ struct task_struct {
62448 unsigned long default_timer_slack_ns;
62449
62450 struct list_head *scm_work_list;
62451 +
62452 +#ifdef CONFIG_GRKERNSEC
62453 + /* grsecurity */
62454 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62455 + u64 exec_id;
62456 +#endif
62457 +#ifdef CONFIG_GRKERNSEC_SETXID
62458 + const struct cred *delayed_cred;
62459 +#endif
62460 + struct dentry *gr_chroot_dentry;
62461 + struct acl_subject_label *acl;
62462 + struct acl_role_label *role;
62463 + struct file *exec_file;
62464 + u16 acl_role_id;
62465 + /* is this the task that authenticated to the special role */
62466 + u8 acl_sp_role;
62467 + u8 is_writable;
62468 + u8 brute;
62469 + u8 gr_is_chrooted;
62470 +#endif
62471 +
62472 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62473 /* Index of current stored address in ret_stack */
62474 int curr_ret_stack;
62475 @@ -1619,6 +1666,51 @@ struct task_struct {
62476 #endif
62477 };
62478
62479 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62480 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62481 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62482 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62483 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62484 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62485 +
62486 +#ifdef CONFIG_PAX_SOFTMODE
62487 +extern int pax_softmode;
62488 +#endif
62489 +
62490 +extern int pax_check_flags(unsigned long *);
62491 +
62492 +/* if tsk != current then task_lock must be held on it */
62493 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62494 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62495 +{
62496 + if (likely(tsk->mm))
62497 + return tsk->mm->pax_flags;
62498 + else
62499 + return 0UL;
62500 +}
62501 +
62502 +/* if tsk != current then task_lock must be held on it */
62503 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62504 +{
62505 + if (likely(tsk->mm)) {
62506 + tsk->mm->pax_flags = flags;
62507 + return 0;
62508 + }
62509 + return -EINVAL;
62510 +}
62511 +#endif
62512 +
62513 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62514 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62515 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62516 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62517 +#endif
62518 +
62519 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62520 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62521 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62522 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62523 +
62524 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62525 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62526
62527 @@ -2138,7 +2230,9 @@ void yield(void);
62528 extern struct exec_domain default_exec_domain;
62529
62530 union thread_union {
62531 +#ifndef CONFIG_X86
62532 struct thread_info thread_info;
62533 +#endif
62534 unsigned long stack[THREAD_SIZE/sizeof(long)];
62535 };
62536
62537 @@ -2171,6 +2265,7 @@ extern struct pid_namespace init_pid_ns;
62538 */
62539
62540 extern struct task_struct *find_task_by_vpid(pid_t nr);
62541 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62542 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62543 struct pid_namespace *ns);
62544
62545 @@ -2314,7 +2409,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62546 extern void exit_itimers(struct signal_struct *);
62547 extern void flush_itimer_signals(void);
62548
62549 -extern void do_group_exit(int);
62550 +extern __noreturn void do_group_exit(int);
62551
62552 extern void daemonize(const char *, ...);
62553 extern int allow_signal(int);
62554 @@ -2515,13 +2610,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62555
62556 #endif
62557
62558 -static inline int object_is_on_stack(void *obj)
62559 +static inline int object_starts_on_stack(void *obj)
62560 {
62561 - void *stack = task_stack_page(current);
62562 + const void *stack = task_stack_page(current);
62563
62564 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62565 }
62566
62567 +#ifdef CONFIG_PAX_USERCOPY
62568 +extern int object_is_on_stack(const void *obj, unsigned long len);
62569 +#endif
62570 +
62571 extern void thread_info_cache_init(void);
62572
62573 #ifdef CONFIG_DEBUG_STACK_USAGE
62574 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62575 index 899fbb4..1cb4138 100644
62576 --- a/include/linux/screen_info.h
62577 +++ b/include/linux/screen_info.h
62578 @@ -43,7 +43,8 @@ struct screen_info {
62579 __u16 pages; /* 0x32 */
62580 __u16 vesa_attributes; /* 0x34 */
62581 __u32 capabilities; /* 0x36 */
62582 - __u8 _reserved[6]; /* 0x3a */
62583 + __u16 vesapm_size; /* 0x3a */
62584 + __u8 _reserved[4]; /* 0x3c */
62585 } __attribute__((packed));
62586
62587 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62588 diff --git a/include/linux/security.h b/include/linux/security.h
62589 index 673afbb..2b7454b 100644
62590 --- a/include/linux/security.h
62591 +++ b/include/linux/security.h
62592 @@ -26,6 +26,7 @@
62593 #include <linux/capability.h>
62594 #include <linux/slab.h>
62595 #include <linux/err.h>
62596 +#include <linux/grsecurity.h>
62597
62598 struct linux_binprm;
62599 struct cred;
62600 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62601 index fc61854..d7c490b 100644
62602 --- a/include/linux/seq_file.h
62603 +++ b/include/linux/seq_file.h
62604 @@ -25,6 +25,9 @@ struct seq_file {
62605 struct mutex lock;
62606 const struct seq_operations *op;
62607 int poll_event;
62608 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62609 + u64 exec_id;
62610 +#endif
62611 void *private;
62612 };
62613
62614 @@ -34,6 +37,7 @@ struct seq_operations {
62615 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62616 int (*show) (struct seq_file *m, void *v);
62617 };
62618 +typedef struct seq_operations __no_const seq_operations_no_const;
62619
62620 #define SEQ_SKIP 1
62621
62622 diff --git a/include/linux/shm.h b/include/linux/shm.h
62623 index 92808b8..c28cac4 100644
62624 --- a/include/linux/shm.h
62625 +++ b/include/linux/shm.h
62626 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62627
62628 /* The task created the shm object. NULL if the task is dead. */
62629 struct task_struct *shm_creator;
62630 +#ifdef CONFIG_GRKERNSEC
62631 + time_t shm_createtime;
62632 + pid_t shm_lapid;
62633 +#endif
62634 };
62635
62636 /* shm_mode upper byte flags */
62637 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62638 index c168907..c7756db 100644
62639 --- a/include/linux/skbuff.h
62640 +++ b/include/linux/skbuff.h
62641 @@ -666,7 +666,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62642 */
62643 static inline int skb_queue_empty(const struct sk_buff_head *list)
62644 {
62645 - return list->next == (struct sk_buff *)list;
62646 + return list->next == (const struct sk_buff *)list;
62647 }
62648
62649 /**
62650 @@ -679,7 +679,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62651 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62652 const struct sk_buff *skb)
62653 {
62654 - return skb->next == (struct sk_buff *)list;
62655 + return skb->next == (const struct sk_buff *)list;
62656 }
62657
62658 /**
62659 @@ -692,7 +692,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62660 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62661 const struct sk_buff *skb)
62662 {
62663 - return skb->prev == (struct sk_buff *)list;
62664 + return skb->prev == (const struct sk_buff *)list;
62665 }
62666
62667 /**
62668 @@ -1587,7 +1587,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62669 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62670 */
62671 #ifndef NET_SKB_PAD
62672 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62673 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62674 #endif
62675
62676 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62677 diff --git a/include/linux/slab.h b/include/linux/slab.h
62678 index a595dce..c403597 100644
62679 --- a/include/linux/slab.h
62680 +++ b/include/linux/slab.h
62681 @@ -11,12 +11,20 @@
62682
62683 #include <linux/gfp.h>
62684 #include <linux/types.h>
62685 +#include <linux/err.h>
62686
62687 /*
62688 * Flags to pass to kmem_cache_create().
62689 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62690 */
62691 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62692 +
62693 +#ifdef CONFIG_PAX_USERCOPY
62694 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62695 +#else
62696 +#define SLAB_USERCOPY 0x00000000UL
62697 +#endif
62698 +
62699 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62700 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62701 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62702 @@ -87,10 +95,13 @@
62703 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62704 * Both make kfree a no-op.
62705 */
62706 -#define ZERO_SIZE_PTR ((void *)16)
62707 +#define ZERO_SIZE_PTR \
62708 +({ \
62709 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62710 + (void *)(-MAX_ERRNO-1L); \
62711 +})
62712
62713 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62714 - (unsigned long)ZERO_SIZE_PTR)
62715 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62716
62717 /*
62718 * struct kmem_cache related prototypes
62719 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62720 void kfree(const void *);
62721 void kzfree(const void *);
62722 size_t ksize(const void *);
62723 +void check_object_size(const void *ptr, unsigned long n, bool to);
62724
62725 /*
62726 * Allocator specific definitions. These are mainly used to establish optimized
62727 @@ -240,6 +252,7 @@ size_t ksize(const void *);
62728 * for general use, and so are not documented here. For a full list of
62729 * potential flags, always refer to linux/gfp.h.
62730 */
62731 +static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
62732 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
62733 {
62734 if (size != 0 && n > ULONG_MAX / size)
62735 @@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
62736 */
62737 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62738 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62739 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62740 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
62741 #define kmalloc_track_caller(size, flags) \
62742 __kmalloc_track_caller(size, flags, _RET_IP_)
62743 #else
62744 @@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62745 */
62746 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62747 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62748 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
62749 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
62750 #define kmalloc_node_track_caller(size, flags, node) \
62751 __kmalloc_node_track_caller(size, flags, node, \
62752 _RET_IP_)
62753 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62754 index fbd1117..d4d8ef8 100644
62755 --- a/include/linux/slab_def.h
62756 +++ b/include/linux/slab_def.h
62757 @@ -66,10 +66,10 @@ struct kmem_cache {
62758 unsigned long node_allocs;
62759 unsigned long node_frees;
62760 unsigned long node_overflow;
62761 - atomic_t allochit;
62762 - atomic_t allocmiss;
62763 - atomic_t freehit;
62764 - atomic_t freemiss;
62765 + atomic_unchecked_t allochit;
62766 + atomic_unchecked_t allocmiss;
62767 + atomic_unchecked_t freehit;
62768 + atomic_unchecked_t freemiss;
62769
62770 /*
62771 * If debugging is enabled, then the allocator can add additional
62772 @@ -107,7 +107,7 @@ struct cache_sizes {
62773 extern struct cache_sizes malloc_sizes[];
62774
62775 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62776 -void *__kmalloc(size_t size, gfp_t flags);
62777 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62778
62779 #ifdef CONFIG_TRACING
62780 extern void *kmem_cache_alloc_trace(size_t size,
62781 @@ -160,7 +160,7 @@ found:
62782 }
62783
62784 #ifdef CONFIG_NUMA
62785 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
62786 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62787 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62788
62789 #ifdef CONFIG_TRACING
62790 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
62791 index 0ec00b3..39cb7fc 100644
62792 --- a/include/linux/slob_def.h
62793 +++ b/include/linux/slob_def.h
62794 @@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
62795 return kmem_cache_alloc_node(cachep, flags, -1);
62796 }
62797
62798 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
62799 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62800
62801 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
62802 {
62803 @@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62804 return __kmalloc_node(size, flags, -1);
62805 }
62806
62807 +static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62808 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
62809 {
62810 return kmalloc(size, flags);
62811 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62812 index c2f8c8b..be9e036 100644
62813 --- a/include/linux/slub_def.h
62814 +++ b/include/linux/slub_def.h
62815 @@ -92,7 +92,7 @@ struct kmem_cache {
62816 struct kmem_cache_order_objects max;
62817 struct kmem_cache_order_objects min;
62818 gfp_t allocflags; /* gfp flags to use on each alloc */
62819 - int refcount; /* Refcount for slab cache destroy */
62820 + atomic_t refcount; /* Refcount for slab cache destroy */
62821 void (*ctor)(void *);
62822 int inuse; /* Offset to metadata */
62823 int align; /* Alignment */
62824 @@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
62825 * Sorry that the following has to be that ugly but some versions of GCC
62826 * have trouble with constant propagation and loops.
62827 */
62828 +static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
62829 static __always_inline int kmalloc_index(size_t size)
62830 {
62831 if (!size)
62832 @@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62833 }
62834
62835 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62836 -void *__kmalloc(size_t size, gfp_t flags);
62837 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
62838
62839 static __always_inline void *
62840 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62841 @@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
62842 }
62843 #endif
62844
62845 +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
62846 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
62847 {
62848 unsigned int order = get_order(size);
62849 @@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62850 }
62851
62852 #ifdef CONFIG_NUMA
62853 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
62854 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62855 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62856
62857 #ifdef CONFIG_TRACING
62858 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62859 index de8832d..0147b46 100644
62860 --- a/include/linux/sonet.h
62861 +++ b/include/linux/sonet.h
62862 @@ -61,7 +61,7 @@ struct sonet_stats {
62863 #include <linux/atomic.h>
62864
62865 struct k_sonet_stats {
62866 -#define __HANDLE_ITEM(i) atomic_t i
62867 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
62868 __SONET_ITEMS
62869 #undef __HANDLE_ITEM
62870 };
62871 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62872 index 523547e..2cb7140 100644
62873 --- a/include/linux/sunrpc/clnt.h
62874 +++ b/include/linux/sunrpc/clnt.h
62875 @@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62876 {
62877 switch (sap->sa_family) {
62878 case AF_INET:
62879 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
62880 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62881 case AF_INET6:
62882 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62883 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62884 }
62885 return 0;
62886 }
62887 @@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62888 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
62889 const struct sockaddr *src)
62890 {
62891 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
62892 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
62893 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
62894
62895 dsin->sin_family = ssin->sin_family;
62896 @@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
62897 if (sa->sa_family != AF_INET6)
62898 return 0;
62899
62900 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
62901 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
62902 }
62903
62904 #endif /* __KERNEL__ */
62905 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
62906 index dc0c3cc..8503fb6 100644
62907 --- a/include/linux/sunrpc/sched.h
62908 +++ b/include/linux/sunrpc/sched.h
62909 @@ -106,6 +106,7 @@ struct rpc_call_ops {
62910 void (*rpc_count_stats)(struct rpc_task *, void *);
62911 void (*rpc_release)(void *);
62912 };
62913 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
62914
62915 struct rpc_task_setup {
62916 struct rpc_task *task;
62917 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
62918 index 0b8e3e6..33e0a01 100644
62919 --- a/include/linux/sunrpc/svc_rdma.h
62920 +++ b/include/linux/sunrpc/svc_rdma.h
62921 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
62922 extern unsigned int svcrdma_max_requests;
62923 extern unsigned int svcrdma_max_req_size;
62924
62925 -extern atomic_t rdma_stat_recv;
62926 -extern atomic_t rdma_stat_read;
62927 -extern atomic_t rdma_stat_write;
62928 -extern atomic_t rdma_stat_sq_starve;
62929 -extern atomic_t rdma_stat_rq_starve;
62930 -extern atomic_t rdma_stat_rq_poll;
62931 -extern atomic_t rdma_stat_rq_prod;
62932 -extern atomic_t rdma_stat_sq_poll;
62933 -extern atomic_t rdma_stat_sq_prod;
62934 +extern atomic_unchecked_t rdma_stat_recv;
62935 +extern atomic_unchecked_t rdma_stat_read;
62936 +extern atomic_unchecked_t rdma_stat_write;
62937 +extern atomic_unchecked_t rdma_stat_sq_starve;
62938 +extern atomic_unchecked_t rdma_stat_rq_starve;
62939 +extern atomic_unchecked_t rdma_stat_rq_poll;
62940 +extern atomic_unchecked_t rdma_stat_rq_prod;
62941 +extern atomic_unchecked_t rdma_stat_sq_poll;
62942 +extern atomic_unchecked_t rdma_stat_sq_prod;
62943
62944 #define RPCRDMA_VERSION 1
62945
62946 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
62947 index c34b4c8..a65b67d 100644
62948 --- a/include/linux/sysctl.h
62949 +++ b/include/linux/sysctl.h
62950 @@ -155,7 +155,11 @@ enum
62951 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
62952 };
62953
62954 -
62955 +#ifdef CONFIG_PAX_SOFTMODE
62956 +enum {
62957 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
62958 +};
62959 +#endif
62960
62961 /* CTL_VM names: */
62962 enum
62963 @@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
62964
62965 extern int proc_dostring(struct ctl_table *, int,
62966 void __user *, size_t *, loff_t *);
62967 +extern int proc_dostring_modpriv(struct ctl_table *, int,
62968 + void __user *, size_t *, loff_t *);
62969 extern int proc_dointvec(struct ctl_table *, int,
62970 void __user *, size_t *, loff_t *);
62971 extern int proc_dointvec_minmax(struct ctl_table *, int,
62972 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
62973 index ff7dc08..893e1bd 100644
62974 --- a/include/linux/tty_ldisc.h
62975 +++ b/include/linux/tty_ldisc.h
62976 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
62977
62978 struct module *owner;
62979
62980 - int refcount;
62981 + atomic_t refcount;
62982 };
62983
62984 struct tty_ldisc {
62985 diff --git a/include/linux/types.h b/include/linux/types.h
62986 index 7f480db..175c256 100644
62987 --- a/include/linux/types.h
62988 +++ b/include/linux/types.h
62989 @@ -220,10 +220,26 @@ typedef struct {
62990 int counter;
62991 } atomic_t;
62992
62993 +#ifdef CONFIG_PAX_REFCOUNT
62994 +typedef struct {
62995 + int counter;
62996 +} atomic_unchecked_t;
62997 +#else
62998 +typedef atomic_t atomic_unchecked_t;
62999 +#endif
63000 +
63001 #ifdef CONFIG_64BIT
63002 typedef struct {
63003 long counter;
63004 } atomic64_t;
63005 +
63006 +#ifdef CONFIG_PAX_REFCOUNT
63007 +typedef struct {
63008 + long counter;
63009 +} atomic64_unchecked_t;
63010 +#else
63011 +typedef atomic64_t atomic64_unchecked_t;
63012 +#endif
63013 #endif
63014
63015 struct list_head {
63016 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63017 index 5ca0951..ab496a5 100644
63018 --- a/include/linux/uaccess.h
63019 +++ b/include/linux/uaccess.h
63020 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63021 long ret; \
63022 mm_segment_t old_fs = get_fs(); \
63023 \
63024 - set_fs(KERNEL_DS); \
63025 pagefault_disable(); \
63026 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63027 - pagefault_enable(); \
63028 + set_fs(KERNEL_DS); \
63029 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63030 set_fs(old_fs); \
63031 + pagefault_enable(); \
63032 ret; \
63033 })
63034
63035 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63036 index 99c1b4d..bb94261 100644
63037 --- a/include/linux/unaligned/access_ok.h
63038 +++ b/include/linux/unaligned/access_ok.h
63039 @@ -6,32 +6,32 @@
63040
63041 static inline u16 get_unaligned_le16(const void *p)
63042 {
63043 - return le16_to_cpup((__le16 *)p);
63044 + return le16_to_cpup((const __le16 *)p);
63045 }
63046
63047 static inline u32 get_unaligned_le32(const void *p)
63048 {
63049 - return le32_to_cpup((__le32 *)p);
63050 + return le32_to_cpup((const __le32 *)p);
63051 }
63052
63053 static inline u64 get_unaligned_le64(const void *p)
63054 {
63055 - return le64_to_cpup((__le64 *)p);
63056 + return le64_to_cpup((const __le64 *)p);
63057 }
63058
63059 static inline u16 get_unaligned_be16(const void *p)
63060 {
63061 - return be16_to_cpup((__be16 *)p);
63062 + return be16_to_cpup((const __be16 *)p);
63063 }
63064
63065 static inline u32 get_unaligned_be32(const void *p)
63066 {
63067 - return be32_to_cpup((__be32 *)p);
63068 + return be32_to_cpup((const __be32 *)p);
63069 }
63070
63071 static inline u64 get_unaligned_be64(const void *p)
63072 {
63073 - return be64_to_cpup((__be64 *)p);
63074 + return be64_to_cpup((const __be64 *)p);
63075 }
63076
63077 static inline void put_unaligned_le16(u16 val, void *p)
63078 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63079 index 547e59c..db6ad19 100644
63080 --- a/include/linux/usb/renesas_usbhs.h
63081 +++ b/include/linux/usb/renesas_usbhs.h
63082 @@ -39,7 +39,7 @@ enum {
63083 */
63084 struct renesas_usbhs_driver_callback {
63085 int (*notify_hotplug)(struct platform_device *pdev);
63086 -};
63087 +} __no_const;
63088
63089 /*
63090 * callback functions for platform
63091 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63092 * VBUS control is needed for Host
63093 */
63094 int (*set_vbus)(struct platform_device *pdev, int enable);
63095 -};
63096 +} __no_const;
63097
63098 /*
63099 * parameters for renesas usbhs
63100 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63101 index 6f8fbcf..8259001 100644
63102 --- a/include/linux/vermagic.h
63103 +++ b/include/linux/vermagic.h
63104 @@ -25,9 +25,35 @@
63105 #define MODULE_ARCH_VERMAGIC ""
63106 #endif
63107
63108 +#ifdef CONFIG_PAX_REFCOUNT
63109 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63110 +#else
63111 +#define MODULE_PAX_REFCOUNT ""
63112 +#endif
63113 +
63114 +#ifdef CONSTIFY_PLUGIN
63115 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63116 +#else
63117 +#define MODULE_CONSTIFY_PLUGIN ""
63118 +#endif
63119 +
63120 +#ifdef STACKLEAK_PLUGIN
63121 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63122 +#else
63123 +#define MODULE_STACKLEAK_PLUGIN ""
63124 +#endif
63125 +
63126 +#ifdef CONFIG_GRKERNSEC
63127 +#define MODULE_GRSEC "GRSEC "
63128 +#else
63129 +#define MODULE_GRSEC ""
63130 +#endif
63131 +
63132 #define VERMAGIC_STRING \
63133 UTS_RELEASE " " \
63134 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63135 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63136 - MODULE_ARCH_VERMAGIC
63137 + MODULE_ARCH_VERMAGIC \
63138 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63139 + MODULE_GRSEC
63140
63141 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63142 index dcdfc2b..ec79ab5 100644
63143 --- a/include/linux/vmalloc.h
63144 +++ b/include/linux/vmalloc.h
63145 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63146 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63147 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63148 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63149 +
63150 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63151 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63152 +#endif
63153 +
63154 /* bits [20..32] reserved for arch specific ioremap internals */
63155
63156 /*
63157 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63158 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63159 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63160 unsigned long start, unsigned long end, gfp_t gfp_mask,
63161 - pgprot_t prot, int node, void *caller);
63162 + pgprot_t prot, int node, void *caller) __size_overflow(1);
63163 extern void vfree(const void *addr);
63164
63165 extern void *vmap(struct page **pages, unsigned int count,
63166 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63167 extern void free_vm_area(struct vm_struct *area);
63168
63169 /* for /dev/kmem */
63170 -extern long vread(char *buf, char *addr, unsigned long count);
63171 -extern long vwrite(char *buf, char *addr, unsigned long count);
63172 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63173 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63174
63175 /*
63176 * Internals. Dont't use..
63177 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63178 index 65efb92..137adbb 100644
63179 --- a/include/linux/vmstat.h
63180 +++ b/include/linux/vmstat.h
63181 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63182 /*
63183 * Zone based page accounting with per cpu differentials.
63184 */
63185 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63186 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63187
63188 static inline void zone_page_state_add(long x, struct zone *zone,
63189 enum zone_stat_item item)
63190 {
63191 - atomic_long_add(x, &zone->vm_stat[item]);
63192 - atomic_long_add(x, &vm_stat[item]);
63193 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63194 + atomic_long_add_unchecked(x, &vm_stat[item]);
63195 }
63196
63197 static inline unsigned long global_page_state(enum zone_stat_item item)
63198 {
63199 - long x = atomic_long_read(&vm_stat[item]);
63200 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63201 #ifdef CONFIG_SMP
63202 if (x < 0)
63203 x = 0;
63204 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63205 static inline unsigned long zone_page_state(struct zone *zone,
63206 enum zone_stat_item item)
63207 {
63208 - long x = atomic_long_read(&zone->vm_stat[item]);
63209 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63210 #ifdef CONFIG_SMP
63211 if (x < 0)
63212 x = 0;
63213 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63214 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63215 enum zone_stat_item item)
63216 {
63217 - long x = atomic_long_read(&zone->vm_stat[item]);
63218 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63219
63220 #ifdef CONFIG_SMP
63221 int cpu;
63222 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63223
63224 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63225 {
63226 - atomic_long_inc(&zone->vm_stat[item]);
63227 - atomic_long_inc(&vm_stat[item]);
63228 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63229 + atomic_long_inc_unchecked(&vm_stat[item]);
63230 }
63231
63232 static inline void __inc_zone_page_state(struct page *page,
63233 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63234
63235 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63236 {
63237 - atomic_long_dec(&zone->vm_stat[item]);
63238 - atomic_long_dec(&vm_stat[item]);
63239 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63240 + atomic_long_dec_unchecked(&vm_stat[item]);
63241 }
63242
63243 static inline void __dec_zone_page_state(struct page *page,
63244 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63245 index e5d1220..ef6e406 100644
63246 --- a/include/linux/xattr.h
63247 +++ b/include/linux/xattr.h
63248 @@ -57,6 +57,11 @@
63249 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63250 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63251
63252 +/* User namespace */
63253 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63254 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
63255 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63256 +
63257 #ifdef __KERNEL__
63258
63259 #include <linux/types.h>
63260 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63261 index 4aeff96..b378cdc 100644
63262 --- a/include/media/saa7146_vv.h
63263 +++ b/include/media/saa7146_vv.h
63264 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63265 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63266
63267 /* the extension can override this */
63268 - struct v4l2_ioctl_ops ops;
63269 + v4l2_ioctl_ops_no_const ops;
63270 /* pointer to the saa7146 core ops */
63271 const struct v4l2_ioctl_ops *core_ops;
63272
63273 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63274 index 96d2221..2292f89 100644
63275 --- a/include/media/v4l2-dev.h
63276 +++ b/include/media/v4l2-dev.h
63277 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63278
63279
63280 struct v4l2_file_operations {
63281 - struct module *owner;
63282 + struct module * const owner;
63283 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63284 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63285 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63286 @@ -71,6 +71,7 @@ struct v4l2_file_operations {
63287 int (*open) (struct file *);
63288 int (*release) (struct file *);
63289 };
63290 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63291
63292 /*
63293 * Newer version of video_device, handled by videodev2.c
63294 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63295 index 3cb939c..f23c6bb 100644
63296 --- a/include/media/v4l2-ioctl.h
63297 +++ b/include/media/v4l2-ioctl.h
63298 @@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
63299 long (*vidioc_default) (struct file *file, void *fh,
63300 bool valid_prio, int cmd, void *arg);
63301 };
63302 -
63303 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63304
63305 /* v4l debugging and diagnostics */
63306
63307 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63308 index 6db8ecf..8c23861 100644
63309 --- a/include/net/caif/caif_hsi.h
63310 +++ b/include/net/caif/caif_hsi.h
63311 @@ -98,7 +98,7 @@ struct cfhsi_drv {
63312 void (*rx_done_cb) (struct cfhsi_drv *drv);
63313 void (*wake_up_cb) (struct cfhsi_drv *drv);
63314 void (*wake_down_cb) (struct cfhsi_drv *drv);
63315 -};
63316 +} __no_const;
63317
63318 /* Structure implemented by HSI device. */
63319 struct cfhsi_dev {
63320 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63321 index 9e5425b..8136ffc 100644
63322 --- a/include/net/caif/cfctrl.h
63323 +++ b/include/net/caif/cfctrl.h
63324 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
63325 void (*radioset_rsp)(void);
63326 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63327 struct cflayer *client_layer);
63328 -};
63329 +} __no_const;
63330
63331 /* Link Setup Parameters for CAIF-Links. */
63332 struct cfctrl_link_param {
63333 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
63334 struct cfctrl {
63335 struct cfsrvl serv;
63336 struct cfctrl_rsp res;
63337 - atomic_t req_seq_no;
63338 - atomic_t rsp_seq_no;
63339 + atomic_unchecked_t req_seq_no;
63340 + atomic_unchecked_t rsp_seq_no;
63341 struct list_head list;
63342 /* Protects from simultaneous access to first_req list */
63343 spinlock_t info_list_lock;
63344 diff --git a/include/net/flow.h b/include/net/flow.h
63345 index 6c469db..7743b8e 100644
63346 --- a/include/net/flow.h
63347 +++ b/include/net/flow.h
63348 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63349
63350 extern void flow_cache_flush(void);
63351 extern void flow_cache_flush_deferred(void);
63352 -extern atomic_t flow_cache_genid;
63353 +extern atomic_unchecked_t flow_cache_genid;
63354
63355 #endif
63356 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63357 index b94765e..053f68b 100644
63358 --- a/include/net/inetpeer.h
63359 +++ b/include/net/inetpeer.h
63360 @@ -48,8 +48,8 @@ struct inet_peer {
63361 */
63362 union {
63363 struct {
63364 - atomic_t rid; /* Frag reception counter */
63365 - atomic_t ip_id_count; /* IP ID for the next packet */
63366 + atomic_unchecked_t rid; /* Frag reception counter */
63367 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63368 __u32 tcp_ts;
63369 __u32 tcp_ts_stamp;
63370 };
63371 @@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63372 more++;
63373 inet_peer_refcheck(p);
63374 do {
63375 - old = atomic_read(&p->ip_id_count);
63376 + old = atomic_read_unchecked(&p->ip_id_count);
63377 new = old + more;
63378 if (!new)
63379 new = 1;
63380 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63381 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63382 return new;
63383 }
63384
63385 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63386 index 10422ef..662570f 100644
63387 --- a/include/net/ip_fib.h
63388 +++ b/include/net/ip_fib.h
63389 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63390
63391 #define FIB_RES_SADDR(net, res) \
63392 ((FIB_RES_NH(res).nh_saddr_genid == \
63393 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63394 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63395 FIB_RES_NH(res).nh_saddr : \
63396 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63397 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63398 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63399 index 72522f0..6f03a2b 100644
63400 --- a/include/net/ip_vs.h
63401 +++ b/include/net/ip_vs.h
63402 @@ -510,7 +510,7 @@ struct ip_vs_conn {
63403 struct ip_vs_conn *control; /* Master control connection */
63404 atomic_t n_control; /* Number of controlled ones */
63405 struct ip_vs_dest *dest; /* real server */
63406 - atomic_t in_pkts; /* incoming packet counter */
63407 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63408
63409 /* packet transmitter for different forwarding methods. If it
63410 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63411 @@ -648,7 +648,7 @@ struct ip_vs_dest {
63412 __be16 port; /* port number of the server */
63413 union nf_inet_addr addr; /* IP address of the server */
63414 volatile unsigned flags; /* dest status flags */
63415 - atomic_t conn_flags; /* flags to copy to conn */
63416 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63417 atomic_t weight; /* server weight */
63418
63419 atomic_t refcnt; /* reference counter */
63420 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63421 index 69b610a..fe3962c 100644
63422 --- a/include/net/irda/ircomm_core.h
63423 +++ b/include/net/irda/ircomm_core.h
63424 @@ -51,7 +51,7 @@ typedef struct {
63425 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63426 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63427 struct ircomm_info *);
63428 -} call_t;
63429 +} __no_const call_t;
63430
63431 struct ircomm_cb {
63432 irda_queue_t queue;
63433 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63434 index 59ba38bc..d515662 100644
63435 --- a/include/net/irda/ircomm_tty.h
63436 +++ b/include/net/irda/ircomm_tty.h
63437 @@ -35,6 +35,7 @@
63438 #include <linux/termios.h>
63439 #include <linux/timer.h>
63440 #include <linux/tty.h> /* struct tty_struct */
63441 +#include <asm/local.h>
63442
63443 #include <net/irda/irias_object.h>
63444 #include <net/irda/ircomm_core.h>
63445 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63446 unsigned short close_delay;
63447 unsigned short closing_wait; /* time to wait before closing */
63448
63449 - int open_count;
63450 - int blocked_open; /* # of blocked opens */
63451 + local_t open_count;
63452 + local_t blocked_open; /* # of blocked opens */
63453
63454 /* Protect concurent access to :
63455 * o self->open_count
63456 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63457 index cc7c197..9f2da2a 100644
63458 --- a/include/net/iucv/af_iucv.h
63459 +++ b/include/net/iucv/af_iucv.h
63460 @@ -141,7 +141,7 @@ struct iucv_sock {
63461 struct iucv_sock_list {
63462 struct hlist_head head;
63463 rwlock_t lock;
63464 - atomic_t autobind_name;
63465 + atomic_unchecked_t autobind_name;
63466 };
63467
63468 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63469 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63470 index 34c996f..bb3b4d4 100644
63471 --- a/include/net/neighbour.h
63472 +++ b/include/net/neighbour.h
63473 @@ -123,7 +123,7 @@ struct neigh_ops {
63474 void (*error_report)(struct neighbour *, struct sk_buff *);
63475 int (*output)(struct neighbour *, struct sk_buff *);
63476 int (*connected_output)(struct neighbour *, struct sk_buff *);
63477 -};
63478 +} __do_const;
63479
63480 struct pneigh_entry {
63481 struct pneigh_entry *next;
63482 diff --git a/include/net/netlink.h b/include/net/netlink.h
63483 index f394fe5..fd073f9 100644
63484 --- a/include/net/netlink.h
63485 +++ b/include/net/netlink.h
63486 @@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63487 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63488 {
63489 if (mark)
63490 - skb_trim(skb, (unsigned char *) mark - skb->data);
63491 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63492 }
63493
63494 /**
63495 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63496 index bbd023a..97c6d0d 100644
63497 --- a/include/net/netns/ipv4.h
63498 +++ b/include/net/netns/ipv4.h
63499 @@ -57,8 +57,8 @@ struct netns_ipv4 {
63500 unsigned int sysctl_ping_group_range[2];
63501 long sysctl_tcp_mem[3];
63502
63503 - atomic_t rt_genid;
63504 - atomic_t dev_addr_genid;
63505 + atomic_unchecked_t rt_genid;
63506 + atomic_unchecked_t dev_addr_genid;
63507
63508 #ifdef CONFIG_IP_MROUTE
63509 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63510 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63511 index a2ef814..31a8e3f 100644
63512 --- a/include/net/sctp/sctp.h
63513 +++ b/include/net/sctp/sctp.h
63514 @@ -318,9 +318,9 @@ do { \
63515
63516 #else /* SCTP_DEBUG */
63517
63518 -#define SCTP_DEBUG_PRINTK(whatever...)
63519 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63520 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63521 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63522 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63523 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63524 #define SCTP_ENABLE_DEBUG
63525 #define SCTP_DISABLE_DEBUG
63526 #define SCTP_ASSERT(expr, str, func)
63527 diff --git a/include/net/sock.h b/include/net/sock.h
63528 index 5a0a58a..2e3d4d0 100644
63529 --- a/include/net/sock.h
63530 +++ b/include/net/sock.h
63531 @@ -302,7 +302,7 @@ struct sock {
63532 #ifdef CONFIG_RPS
63533 __u32 sk_rxhash;
63534 #endif
63535 - atomic_t sk_drops;
63536 + atomic_unchecked_t sk_drops;
63537 int sk_rcvbuf;
63538
63539 struct sk_filter __rcu *sk_filter;
63540 @@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
63541 }
63542
63543 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63544 - char __user *from, char *to,
63545 + char __user *from, unsigned char *to,
63546 int copy, int offset)
63547 {
63548 if (skb->ip_summed == CHECKSUM_NONE) {
63549 diff --git a/include/net/tcp.h b/include/net/tcp.h
63550 index f75a04d..702cf06 100644
63551 --- a/include/net/tcp.h
63552 +++ b/include/net/tcp.h
63553 @@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
63554 char *name;
63555 sa_family_t family;
63556 const struct file_operations *seq_fops;
63557 - struct seq_operations seq_ops;
63558 + seq_operations_no_const seq_ops;
63559 };
63560
63561 struct tcp_iter_state {
63562 diff --git a/include/net/udp.h b/include/net/udp.h
63563 index 5d606d9..e879f7b 100644
63564 --- a/include/net/udp.h
63565 +++ b/include/net/udp.h
63566 @@ -244,7 +244,7 @@ struct udp_seq_afinfo {
63567 sa_family_t family;
63568 struct udp_table *udp_table;
63569 const struct file_operations *seq_fops;
63570 - struct seq_operations seq_ops;
63571 + seq_operations_no_const seq_ops;
63572 };
63573
63574 struct udp_iter_state {
63575 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63576 index 96239e7..c85b032 100644
63577 --- a/include/net/xfrm.h
63578 +++ b/include/net/xfrm.h
63579 @@ -505,7 +505,7 @@ struct xfrm_policy {
63580 struct timer_list timer;
63581
63582 struct flow_cache_object flo;
63583 - atomic_t genid;
63584 + atomic_unchecked_t genid;
63585 u32 priority;
63586 u32 index;
63587 struct xfrm_mark mark;
63588 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63589 index 1a046b1..ee0bef0 100644
63590 --- a/include/rdma/iw_cm.h
63591 +++ b/include/rdma/iw_cm.h
63592 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
63593 int backlog);
63594
63595 int (*destroy_listen)(struct iw_cm_id *cm_id);
63596 -};
63597 +} __no_const;
63598
63599 /**
63600 * iw_create_cm_id - Create an IW CM identifier.
63601 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63602 index 8f9dfba..610ab6c 100644
63603 --- a/include/scsi/libfc.h
63604 +++ b/include/scsi/libfc.h
63605 @@ -756,6 +756,7 @@ struct libfc_function_template {
63606 */
63607 void (*disc_stop_final) (struct fc_lport *);
63608 };
63609 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63610
63611 /**
63612 * struct fc_disc - Discovery context
63613 @@ -861,7 +862,7 @@ struct fc_lport {
63614 struct fc_vport *vport;
63615
63616 /* Operational Information */
63617 - struct libfc_function_template tt;
63618 + libfc_function_template_no_const tt;
63619 u8 link_up;
63620 u8 qfull;
63621 enum fc_lport_state state;
63622 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63623 index 6efb2e1..cdad57f 100644
63624 --- a/include/scsi/scsi_device.h
63625 +++ b/include/scsi/scsi_device.h
63626 @@ -162,9 +162,9 @@ struct scsi_device {
63627 unsigned int max_device_blocked; /* what device_blocked counts down from */
63628 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63629
63630 - atomic_t iorequest_cnt;
63631 - atomic_t iodone_cnt;
63632 - atomic_t ioerr_cnt;
63633 + atomic_unchecked_t iorequest_cnt;
63634 + atomic_unchecked_t iodone_cnt;
63635 + atomic_unchecked_t ioerr_cnt;
63636
63637 struct device sdev_gendev,
63638 sdev_dev;
63639 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63640 index 719faf1..d1154d4 100644
63641 --- a/include/scsi/scsi_transport_fc.h
63642 +++ b/include/scsi/scsi_transport_fc.h
63643 @@ -739,7 +739,7 @@ struct fc_function_template {
63644 unsigned long show_host_system_hostname:1;
63645
63646 unsigned long disable_target_scan:1;
63647 -};
63648 +} __do_const;
63649
63650
63651 /**
63652 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63653 index 030b87c..98a6954 100644
63654 --- a/include/sound/ak4xxx-adda.h
63655 +++ b/include/sound/ak4xxx-adda.h
63656 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63657 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63658 unsigned char val);
63659 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63660 -};
63661 +} __no_const;
63662
63663 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63664
63665 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63666 index 8c05e47..2b5df97 100644
63667 --- a/include/sound/hwdep.h
63668 +++ b/include/sound/hwdep.h
63669 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63670 struct snd_hwdep_dsp_status *status);
63671 int (*dsp_load)(struct snd_hwdep *hw,
63672 struct snd_hwdep_dsp_image *image);
63673 -};
63674 +} __no_const;
63675
63676 struct snd_hwdep {
63677 struct snd_card *card;
63678 diff --git a/include/sound/info.h b/include/sound/info.h
63679 index 9ca1a49..aba1728 100644
63680 --- a/include/sound/info.h
63681 +++ b/include/sound/info.h
63682 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
63683 struct snd_info_buffer *buffer);
63684 void (*write)(struct snd_info_entry *entry,
63685 struct snd_info_buffer *buffer);
63686 -};
63687 +} __no_const;
63688
63689 struct snd_info_entry_ops {
63690 int (*open)(struct snd_info_entry *entry,
63691 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63692 index 0d11128..814178e 100644
63693 --- a/include/sound/pcm.h
63694 +++ b/include/sound/pcm.h
63695 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
63696 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63697 int (*ack)(struct snd_pcm_substream *substream);
63698 };
63699 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63700
63701 /*
63702 *
63703 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63704 index af1b49e..a5d55a5 100644
63705 --- a/include/sound/sb16_csp.h
63706 +++ b/include/sound/sb16_csp.h
63707 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63708 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63709 int (*csp_stop) (struct snd_sb_csp * p);
63710 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63711 -};
63712 +} __no_const;
63713
63714 /*
63715 * CSP private data
63716 diff --git a/include/sound/soc.h b/include/sound/soc.h
63717 index 2ebf787..0276839 100644
63718 --- a/include/sound/soc.h
63719 +++ b/include/sound/soc.h
63720 @@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
63721 /* platform IO - used for platform DAPM */
63722 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63723 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63724 -};
63725 +} __do_const;
63726
63727 struct snd_soc_platform {
63728 const char *name;
63729 @@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
63730 struct snd_soc_dai_link *dai_link;
63731 struct mutex pcm_mutex;
63732 enum snd_soc_pcm_subclass pcm_subclass;
63733 - struct snd_pcm_ops ops;
63734 + snd_pcm_ops_no_const ops;
63735
63736 unsigned int complete:1;
63737 unsigned int dev_registered:1;
63738 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63739 index 4119966..1a4671c 100644
63740 --- a/include/sound/ymfpci.h
63741 +++ b/include/sound/ymfpci.h
63742 @@ -358,7 +358,7 @@ struct snd_ymfpci {
63743 spinlock_t reg_lock;
63744 spinlock_t voice_lock;
63745 wait_queue_head_t interrupt_sleep;
63746 - atomic_t interrupt_sleep_count;
63747 + atomic_unchecked_t interrupt_sleep_count;
63748 struct snd_info_entry *proc_entry;
63749 const struct firmware *dsp_microcode;
63750 const struct firmware *controller_microcode;
63751 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63752 index aaccc5f..092d568 100644
63753 --- a/include/target/target_core_base.h
63754 +++ b/include/target/target_core_base.h
63755 @@ -447,7 +447,7 @@ struct t10_reservation_ops {
63756 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63757 int (*t10_pr_register)(struct se_cmd *);
63758 int (*t10_pr_clear)(struct se_cmd *);
63759 -};
63760 +} __no_const;
63761
63762 struct t10_reservation {
63763 /* Reservation effects all target ports */
63764 @@ -576,7 +576,7 @@ struct se_cmd {
63765 atomic_t t_se_count;
63766 atomic_t t_task_cdbs_left;
63767 atomic_t t_task_cdbs_ex_left;
63768 - atomic_t t_task_cdbs_sent;
63769 + atomic_unchecked_t t_task_cdbs_sent;
63770 unsigned int transport_state;
63771 #define CMD_T_ABORTED (1 << 0)
63772 #define CMD_T_ACTIVE (1 << 1)
63773 @@ -802,7 +802,7 @@ struct se_device {
63774 spinlock_t stats_lock;
63775 /* Active commands on this virtual SE device */
63776 atomic_t simple_cmds;
63777 - atomic_t dev_ordered_id;
63778 + atomic_unchecked_t dev_ordered_id;
63779 atomic_t execute_tasks;
63780 atomic_t dev_ordered_sync;
63781 atomic_t dev_qf_count;
63782 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
63783 new file mode 100644
63784 index 0000000..2efe49d
63785 --- /dev/null
63786 +++ b/include/trace/events/fs.h
63787 @@ -0,0 +1,53 @@
63788 +#undef TRACE_SYSTEM
63789 +#define TRACE_SYSTEM fs
63790 +
63791 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
63792 +#define _TRACE_FS_H
63793 +
63794 +#include <linux/fs.h>
63795 +#include <linux/tracepoint.h>
63796 +
63797 +TRACE_EVENT(do_sys_open,
63798 +
63799 + TP_PROTO(char *filename, int flags, int mode),
63800 +
63801 + TP_ARGS(filename, flags, mode),
63802 +
63803 + TP_STRUCT__entry(
63804 + __string( filename, filename )
63805 + __field( int, flags )
63806 + __field( int, mode )
63807 + ),
63808 +
63809 + TP_fast_assign(
63810 + __assign_str(filename, filename);
63811 + __entry->flags = flags;
63812 + __entry->mode = mode;
63813 + ),
63814 +
63815 + TP_printk("\"%s\" %x %o",
63816 + __get_str(filename), __entry->flags, __entry->mode)
63817 +);
63818 +
63819 +TRACE_EVENT(open_exec,
63820 +
63821 + TP_PROTO(const char *filename),
63822 +
63823 + TP_ARGS(filename),
63824 +
63825 + TP_STRUCT__entry(
63826 + __string( filename, filename )
63827 + ),
63828 +
63829 + TP_fast_assign(
63830 + __assign_str(filename, filename);
63831 + ),
63832 +
63833 + TP_printk("\"%s\"",
63834 + __get_str(filename))
63835 +);
63836 +
63837 +#endif /* _TRACE_FS_H */
63838 +
63839 +/* This part must be outside protection */
63840 +#include <trace/define_trace.h>
63841 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63842 index 1c09820..7f5ec79 100644
63843 --- a/include/trace/events/irq.h
63844 +++ b/include/trace/events/irq.h
63845 @@ -36,7 +36,7 @@ struct softirq_action;
63846 */
63847 TRACE_EVENT(irq_handler_entry,
63848
63849 - TP_PROTO(int irq, struct irqaction *action),
63850 + TP_PROTO(int irq, const struct irqaction *action),
63851
63852 TP_ARGS(irq, action),
63853
63854 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63855 */
63856 TRACE_EVENT(irq_handler_exit,
63857
63858 - TP_PROTO(int irq, struct irqaction *action, int ret),
63859 + TP_PROTO(int irq, const struct irqaction *action, int ret),
63860
63861 TP_ARGS(irq, action, ret),
63862
63863 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63864 index f9466fa..f4e2b81 100644
63865 --- a/include/video/udlfb.h
63866 +++ b/include/video/udlfb.h
63867 @@ -53,10 +53,10 @@ struct dlfb_data {
63868 u32 pseudo_palette[256];
63869 int blank_mode; /*one of FB_BLANK_ */
63870 /* blit-only rendering path metrics, exposed through sysfs */
63871 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63872 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63873 - atomic_t bytes_sent; /* to usb, after compression including overhead */
63874 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63875 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63876 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63877 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63878 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63879 };
63880
63881 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63882 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63883 index 0993a22..32ba2fe 100644
63884 --- a/include/video/uvesafb.h
63885 +++ b/include/video/uvesafb.h
63886 @@ -177,6 +177,7 @@ struct uvesafb_par {
63887 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63888 u8 pmi_setpal; /* PMI for palette changes */
63889 u16 *pmi_base; /* protected mode interface location */
63890 + u8 *pmi_code; /* protected mode code location */
63891 void *pmi_start;
63892 void *pmi_pal;
63893 u8 *vbe_state_orig; /*
63894 diff --git a/init/Kconfig b/init/Kconfig
63895 index 6cfd71d..73cb68d 100644
63896 --- a/init/Kconfig
63897 +++ b/init/Kconfig
63898 @@ -790,6 +790,7 @@ endif # CGROUPS
63899
63900 config CHECKPOINT_RESTORE
63901 bool "Checkpoint/restore support" if EXPERT
63902 + depends on !GRKERNSEC
63903 default n
63904 help
63905 Enables additional kernel features in a sake of checkpoint/restore.
63906 @@ -1240,7 +1241,7 @@ config SLUB_DEBUG
63907
63908 config COMPAT_BRK
63909 bool "Disable heap randomization"
63910 - default y
63911 + default n
63912 help
63913 Randomizing heap placement makes heap exploits harder, but it
63914 also breaks ancient binaries (including anything libc5 based).
63915 diff --git a/init/do_mounts.c b/init/do_mounts.c
63916 index 42b0707..c06eef4 100644
63917 --- a/init/do_mounts.c
63918 +++ b/init/do_mounts.c
63919 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
63920 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
63921 {
63922 struct super_block *s;
63923 - int err = sys_mount(name, "/root", fs, flags, data);
63924 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
63925 if (err)
63926 return err;
63927
63928 - sys_chdir((const char __user __force *)"/root");
63929 + sys_chdir((const char __force_user *)"/root");
63930 s = current->fs->pwd.dentry->d_sb;
63931 ROOT_DEV = s->s_dev;
63932 printk(KERN_INFO
63933 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
63934 va_start(args, fmt);
63935 vsprintf(buf, fmt, args);
63936 va_end(args);
63937 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
63938 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
63939 if (fd >= 0) {
63940 sys_ioctl(fd, FDEJECT, 0);
63941 sys_close(fd);
63942 }
63943 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
63944 - fd = sys_open("/dev/console", O_RDWR, 0);
63945 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
63946 if (fd >= 0) {
63947 sys_ioctl(fd, TCGETS, (long)&termios);
63948 termios.c_lflag &= ~ICANON;
63949 sys_ioctl(fd, TCSETSF, (long)&termios);
63950 - sys_read(fd, &c, 1);
63951 + sys_read(fd, (char __user *)&c, 1);
63952 termios.c_lflag |= ICANON;
63953 sys_ioctl(fd, TCSETSF, (long)&termios);
63954 sys_close(fd);
63955 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
63956 mount_root();
63957 out:
63958 devtmpfs_mount("dev");
63959 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63960 - sys_chroot((const char __user __force *)".");
63961 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63962 + sys_chroot((const char __force_user *)".");
63963 }
63964 diff --git a/init/do_mounts.h b/init/do_mounts.h
63965 index f5b978a..69dbfe8 100644
63966 --- a/init/do_mounts.h
63967 +++ b/init/do_mounts.h
63968 @@ -15,15 +15,15 @@ extern int root_mountflags;
63969
63970 static inline int create_dev(char *name, dev_t dev)
63971 {
63972 - sys_unlink(name);
63973 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
63974 + sys_unlink((char __force_user *)name);
63975 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
63976 }
63977
63978 #if BITS_PER_LONG == 32
63979 static inline u32 bstat(char *name)
63980 {
63981 struct stat64 stat;
63982 - if (sys_stat64(name, &stat) != 0)
63983 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
63984 return 0;
63985 if (!S_ISBLK(stat.st_mode))
63986 return 0;
63987 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
63988 static inline u32 bstat(char *name)
63989 {
63990 struct stat stat;
63991 - if (sys_newstat(name, &stat) != 0)
63992 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
63993 return 0;
63994 if (!S_ISBLK(stat.st_mode))
63995 return 0;
63996 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
63997 index 9047330..de0d1fb 100644
63998 --- a/init/do_mounts_initrd.c
63999 +++ b/init/do_mounts_initrd.c
64000 @@ -43,13 +43,13 @@ static void __init handle_initrd(void)
64001 create_dev("/dev/root.old", Root_RAM0);
64002 /* mount initrd on rootfs' /root */
64003 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64004 - sys_mkdir("/old", 0700);
64005 - root_fd = sys_open("/", 0, 0);
64006 - old_fd = sys_open("/old", 0, 0);
64007 + sys_mkdir((const char __force_user *)"/old", 0700);
64008 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
64009 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64010 /* move initrd over / and chdir/chroot in initrd root */
64011 - sys_chdir("/root");
64012 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64013 - sys_chroot(".");
64014 + sys_chdir((const char __force_user *)"/root");
64015 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64016 + sys_chroot((const char __force_user *)".");
64017
64018 /*
64019 * In case that a resume from disk is carried out by linuxrc or one of
64020 @@ -66,15 +66,15 @@ static void __init handle_initrd(void)
64021
64022 /* move initrd to rootfs' /old */
64023 sys_fchdir(old_fd);
64024 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
64025 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64026 /* switch root and cwd back to / of rootfs */
64027 sys_fchdir(root_fd);
64028 - sys_chroot(".");
64029 + sys_chroot((const char __force_user *)".");
64030 sys_close(old_fd);
64031 sys_close(root_fd);
64032
64033 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64034 - sys_chdir("/old");
64035 + sys_chdir((const char __force_user *)"/old");
64036 return;
64037 }
64038
64039 @@ -82,17 +82,17 @@ static void __init handle_initrd(void)
64040 mount_root();
64041
64042 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64043 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64044 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64045 if (!error)
64046 printk("okay\n");
64047 else {
64048 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
64049 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64050 if (error == -ENOENT)
64051 printk("/initrd does not exist. Ignored.\n");
64052 else
64053 printk("failed\n");
64054 printk(KERN_NOTICE "Unmounting old root\n");
64055 - sys_umount("/old", MNT_DETACH);
64056 + sys_umount((char __force_user *)"/old", MNT_DETACH);
64057 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64058 if (fd < 0) {
64059 error = fd;
64060 @@ -115,11 +115,11 @@ int __init initrd_load(void)
64061 * mounted in the normal path.
64062 */
64063 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64064 - sys_unlink("/initrd.image");
64065 + sys_unlink((const char __force_user *)"/initrd.image");
64066 handle_initrd();
64067 return 1;
64068 }
64069 }
64070 - sys_unlink("/initrd.image");
64071 + sys_unlink((const char __force_user *)"/initrd.image");
64072 return 0;
64073 }
64074 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64075 index 32c4799..c27ee74 100644
64076 --- a/init/do_mounts_md.c
64077 +++ b/init/do_mounts_md.c
64078 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64079 partitioned ? "_d" : "", minor,
64080 md_setup_args[ent].device_names);
64081
64082 - fd = sys_open(name, 0, 0);
64083 + fd = sys_open((char __force_user *)name, 0, 0);
64084 if (fd < 0) {
64085 printk(KERN_ERR "md: open failed - cannot start "
64086 "array %s\n", name);
64087 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64088 * array without it
64089 */
64090 sys_close(fd);
64091 - fd = sys_open(name, 0, 0);
64092 + fd = sys_open((char __force_user *)name, 0, 0);
64093 sys_ioctl(fd, BLKRRPART, 0);
64094 }
64095 sys_close(fd);
64096 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64097
64098 wait_for_device_probe();
64099
64100 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64101 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64102 if (fd >= 0) {
64103 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64104 sys_close(fd);
64105 diff --git a/init/initramfs.c b/init/initramfs.c
64106 index 8216c30..25e8e32 100644
64107 --- a/init/initramfs.c
64108 +++ b/init/initramfs.c
64109 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64110 }
64111 }
64112
64113 -static long __init do_utime(char __user *filename, time_t mtime)
64114 +static long __init do_utime(__force char __user *filename, time_t mtime)
64115 {
64116 struct timespec t[2];
64117
64118 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64119 struct dir_entry *de, *tmp;
64120 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64121 list_del(&de->list);
64122 - do_utime(de->name, de->mtime);
64123 + do_utime((char __force_user *)de->name, de->mtime);
64124 kfree(de->name);
64125 kfree(de);
64126 }
64127 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64128 if (nlink >= 2) {
64129 char *old = find_link(major, minor, ino, mode, collected);
64130 if (old)
64131 - return (sys_link(old, collected) < 0) ? -1 : 1;
64132 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64133 }
64134 return 0;
64135 }
64136 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64137 {
64138 struct stat st;
64139
64140 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64141 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64142 if (S_ISDIR(st.st_mode))
64143 - sys_rmdir(path);
64144 + sys_rmdir((char __force_user *)path);
64145 else
64146 - sys_unlink(path);
64147 + sys_unlink((char __force_user *)path);
64148 }
64149 }
64150
64151 @@ -305,7 +305,7 @@ static int __init do_name(void)
64152 int openflags = O_WRONLY|O_CREAT;
64153 if (ml != 1)
64154 openflags |= O_TRUNC;
64155 - wfd = sys_open(collected, openflags, mode);
64156 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64157
64158 if (wfd >= 0) {
64159 sys_fchown(wfd, uid, gid);
64160 @@ -317,17 +317,17 @@ static int __init do_name(void)
64161 }
64162 }
64163 } else if (S_ISDIR(mode)) {
64164 - sys_mkdir(collected, mode);
64165 - sys_chown(collected, uid, gid);
64166 - sys_chmod(collected, mode);
64167 + sys_mkdir((char __force_user *)collected, mode);
64168 + sys_chown((char __force_user *)collected, uid, gid);
64169 + sys_chmod((char __force_user *)collected, mode);
64170 dir_add(collected, mtime);
64171 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64172 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64173 if (maybe_link() == 0) {
64174 - sys_mknod(collected, mode, rdev);
64175 - sys_chown(collected, uid, gid);
64176 - sys_chmod(collected, mode);
64177 - do_utime(collected, mtime);
64178 + sys_mknod((char __force_user *)collected, mode, rdev);
64179 + sys_chown((char __force_user *)collected, uid, gid);
64180 + sys_chmod((char __force_user *)collected, mode);
64181 + do_utime((char __force_user *)collected, mtime);
64182 }
64183 }
64184 return 0;
64185 @@ -336,15 +336,15 @@ static int __init do_name(void)
64186 static int __init do_copy(void)
64187 {
64188 if (count >= body_len) {
64189 - sys_write(wfd, victim, body_len);
64190 + sys_write(wfd, (char __force_user *)victim, body_len);
64191 sys_close(wfd);
64192 - do_utime(vcollected, mtime);
64193 + do_utime((char __force_user *)vcollected, mtime);
64194 kfree(vcollected);
64195 eat(body_len);
64196 state = SkipIt;
64197 return 0;
64198 } else {
64199 - sys_write(wfd, victim, count);
64200 + sys_write(wfd, (char __force_user *)victim, count);
64201 body_len -= count;
64202 eat(count);
64203 return 1;
64204 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64205 {
64206 collected[N_ALIGN(name_len) + body_len] = '\0';
64207 clean_path(collected, 0);
64208 - sys_symlink(collected + N_ALIGN(name_len), collected);
64209 - sys_lchown(collected, uid, gid);
64210 - do_utime(collected, mtime);
64211 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64212 + sys_lchown((char __force_user *)collected, uid, gid);
64213 + do_utime((char __force_user *)collected, mtime);
64214 state = SkipIt;
64215 next_state = Reset;
64216 return 0;
64217 diff --git a/init/main.c b/init/main.c
64218 index cb54cd3..8773e3c 100644
64219 --- a/init/main.c
64220 +++ b/init/main.c
64221 @@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
64222 extern void tc_init(void);
64223 #endif
64224
64225 +extern void grsecurity_init(void);
64226 +
64227 /*
64228 * Debug helper: via this flag we know that we are in 'early bootup code'
64229 * where only the boot processor is running with IRQ disabled. This means
64230 @@ -148,6 +150,49 @@ static int __init set_reset_devices(char *str)
64231
64232 __setup("reset_devices", set_reset_devices);
64233
64234 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64235 +extern char pax_enter_kernel_user[];
64236 +extern char pax_exit_kernel_user[];
64237 +extern pgdval_t clone_pgd_mask;
64238 +#endif
64239 +
64240 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64241 +static int __init setup_pax_nouderef(char *str)
64242 +{
64243 +#ifdef CONFIG_X86_32
64244 + unsigned int cpu;
64245 + struct desc_struct *gdt;
64246 +
64247 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64248 + gdt = get_cpu_gdt_table(cpu);
64249 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64250 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64251 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64252 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64253 + }
64254 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64255 +#else
64256 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64257 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64258 + clone_pgd_mask = ~(pgdval_t)0UL;
64259 +#endif
64260 +
64261 + return 0;
64262 +}
64263 +early_param("pax_nouderef", setup_pax_nouderef);
64264 +#endif
64265 +
64266 +#ifdef CONFIG_PAX_SOFTMODE
64267 +int pax_softmode;
64268 +
64269 +static int __init setup_pax_softmode(char *str)
64270 +{
64271 + get_option(&str, &pax_softmode);
64272 + return 1;
64273 +}
64274 +__setup("pax_softmode=", setup_pax_softmode);
64275 +#endif
64276 +
64277 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64278 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64279 static const char *panic_later, *panic_param;
64280 @@ -674,6 +719,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64281 {
64282 int count = preempt_count();
64283 int ret;
64284 + const char *msg1 = "", *msg2 = "";
64285
64286 if (initcall_debug)
64287 ret = do_one_initcall_debug(fn);
64288 @@ -686,15 +732,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64289 sprintf(msgbuf, "error code %d ", ret);
64290
64291 if (preempt_count() != count) {
64292 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64293 + msg1 = " preemption imbalance";
64294 preempt_count() = count;
64295 }
64296 if (irqs_disabled()) {
64297 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64298 + msg2 = " disabled interrupts";
64299 local_irq_enable();
64300 }
64301 - if (msgbuf[0]) {
64302 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64303 + if (msgbuf[0] || *msg1 || *msg2) {
64304 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64305 }
64306
64307 return ret;
64308 @@ -865,7 +911,7 @@ static int __init kernel_init(void * unused)
64309 do_basic_setup();
64310
64311 /* Open the /dev/console on the rootfs, this should never fail */
64312 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64313 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64314 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64315
64316 (void) sys_dup(0);
64317 @@ -878,11 +924,13 @@ static int __init kernel_init(void * unused)
64318 if (!ramdisk_execute_command)
64319 ramdisk_execute_command = "/init";
64320
64321 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64322 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64323 ramdisk_execute_command = NULL;
64324 prepare_namespace();
64325 }
64326
64327 + grsecurity_init();
64328 +
64329 /*
64330 * Ok, we have completed the initial bootup, and
64331 * we're essentially up and running. Get rid of the
64332 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64333 index 28bd64d..c66b72a 100644
64334 --- a/ipc/mqueue.c
64335 +++ b/ipc/mqueue.c
64336 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64337 mq_bytes = (mq_msg_tblsz +
64338 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64339
64340 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64341 spin_lock(&mq_lock);
64342 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64343 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
64344 diff --git a/ipc/msg.c b/ipc/msg.c
64345 index 7385de2..a8180e08 100644
64346 --- a/ipc/msg.c
64347 +++ b/ipc/msg.c
64348 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64349 return security_msg_queue_associate(msq, msgflg);
64350 }
64351
64352 +static struct ipc_ops msg_ops = {
64353 + .getnew = newque,
64354 + .associate = msg_security,
64355 + .more_checks = NULL
64356 +};
64357 +
64358 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64359 {
64360 struct ipc_namespace *ns;
64361 - struct ipc_ops msg_ops;
64362 struct ipc_params msg_params;
64363
64364 ns = current->nsproxy->ipc_ns;
64365
64366 - msg_ops.getnew = newque;
64367 - msg_ops.associate = msg_security;
64368 - msg_ops.more_checks = NULL;
64369 -
64370 msg_params.key = key;
64371 msg_params.flg = msgflg;
64372
64373 diff --git a/ipc/sem.c b/ipc/sem.c
64374 index 5215a81..cfc0cac 100644
64375 --- a/ipc/sem.c
64376 +++ b/ipc/sem.c
64377 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64378 return 0;
64379 }
64380
64381 +static struct ipc_ops sem_ops = {
64382 + .getnew = newary,
64383 + .associate = sem_security,
64384 + .more_checks = sem_more_checks
64385 +};
64386 +
64387 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64388 {
64389 struct ipc_namespace *ns;
64390 - struct ipc_ops sem_ops;
64391 struct ipc_params sem_params;
64392
64393 ns = current->nsproxy->ipc_ns;
64394 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64395 if (nsems < 0 || nsems > ns->sc_semmsl)
64396 return -EINVAL;
64397
64398 - sem_ops.getnew = newary;
64399 - sem_ops.associate = sem_security;
64400 - sem_ops.more_checks = sem_more_checks;
64401 -
64402 sem_params.key = key;
64403 sem_params.flg = semflg;
64404 sem_params.u.nsems = nsems;
64405 diff --git a/ipc/shm.c b/ipc/shm.c
64406 index 406c5b2..bc66d67 100644
64407 --- a/ipc/shm.c
64408 +++ b/ipc/shm.c
64409 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64410 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64411 #endif
64412
64413 +#ifdef CONFIG_GRKERNSEC
64414 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64415 + const time_t shm_createtime, const uid_t cuid,
64416 + const int shmid);
64417 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64418 + const time_t shm_createtime);
64419 +#endif
64420 +
64421 void shm_init_ns(struct ipc_namespace *ns)
64422 {
64423 ns->shm_ctlmax = SHMMAX;
64424 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64425 shp->shm_lprid = 0;
64426 shp->shm_atim = shp->shm_dtim = 0;
64427 shp->shm_ctim = get_seconds();
64428 +#ifdef CONFIG_GRKERNSEC
64429 + {
64430 + struct timespec timeval;
64431 + do_posix_clock_monotonic_gettime(&timeval);
64432 +
64433 + shp->shm_createtime = timeval.tv_sec;
64434 + }
64435 +#endif
64436 shp->shm_segsz = size;
64437 shp->shm_nattch = 0;
64438 shp->shm_file = file;
64439 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64440 return 0;
64441 }
64442
64443 +static struct ipc_ops shm_ops = {
64444 + .getnew = newseg,
64445 + .associate = shm_security,
64446 + .more_checks = shm_more_checks
64447 +};
64448 +
64449 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64450 {
64451 struct ipc_namespace *ns;
64452 - struct ipc_ops shm_ops;
64453 struct ipc_params shm_params;
64454
64455 ns = current->nsproxy->ipc_ns;
64456
64457 - shm_ops.getnew = newseg;
64458 - shm_ops.associate = shm_security;
64459 - shm_ops.more_checks = shm_more_checks;
64460 -
64461 shm_params.key = key;
64462 shm_params.flg = shmflg;
64463 shm_params.u.size = size;
64464 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64465 f_mode = FMODE_READ | FMODE_WRITE;
64466 }
64467 if (shmflg & SHM_EXEC) {
64468 +
64469 +#ifdef CONFIG_PAX_MPROTECT
64470 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
64471 + goto out;
64472 +#endif
64473 +
64474 prot |= PROT_EXEC;
64475 acc_mode |= S_IXUGO;
64476 }
64477 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64478 if (err)
64479 goto out_unlock;
64480
64481 +#ifdef CONFIG_GRKERNSEC
64482 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64483 + shp->shm_perm.cuid, shmid) ||
64484 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64485 + err = -EACCES;
64486 + goto out_unlock;
64487 + }
64488 +#endif
64489 +
64490 path = shp->shm_file->f_path;
64491 path_get(&path);
64492 shp->shm_nattch++;
64493 +#ifdef CONFIG_GRKERNSEC
64494 + shp->shm_lapid = current->pid;
64495 +#endif
64496 size = i_size_read(path.dentry->d_inode);
64497 shm_unlock(shp);
64498
64499 diff --git a/kernel/acct.c b/kernel/acct.c
64500 index 02e6167..54824f7 100644
64501 --- a/kernel/acct.c
64502 +++ b/kernel/acct.c
64503 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64504 */
64505 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64506 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64507 - file->f_op->write(file, (char *)&ac,
64508 + file->f_op->write(file, (char __force_user *)&ac,
64509 sizeof(acct_t), &file->f_pos);
64510 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64511 set_fs(fs);
64512 diff --git a/kernel/audit.c b/kernel/audit.c
64513 index 1c7f2c6..9ba5359 100644
64514 --- a/kernel/audit.c
64515 +++ b/kernel/audit.c
64516 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64517 3) suppressed due to audit_rate_limit
64518 4) suppressed due to audit_backlog_limit
64519 */
64520 -static atomic_t audit_lost = ATOMIC_INIT(0);
64521 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64522
64523 /* The netlink socket. */
64524 static struct sock *audit_sock;
64525 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64526 unsigned long now;
64527 int print;
64528
64529 - atomic_inc(&audit_lost);
64530 + atomic_inc_unchecked(&audit_lost);
64531
64532 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64533
64534 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64535 printk(KERN_WARNING
64536 "audit: audit_lost=%d audit_rate_limit=%d "
64537 "audit_backlog_limit=%d\n",
64538 - atomic_read(&audit_lost),
64539 + atomic_read_unchecked(&audit_lost),
64540 audit_rate_limit,
64541 audit_backlog_limit);
64542 audit_panic(message);
64543 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64544 status_set.pid = audit_pid;
64545 status_set.rate_limit = audit_rate_limit;
64546 status_set.backlog_limit = audit_backlog_limit;
64547 - status_set.lost = atomic_read(&audit_lost);
64548 + status_set.lost = atomic_read_unchecked(&audit_lost);
64549 status_set.backlog = skb_queue_len(&audit_skb_queue);
64550 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64551 &status_set, sizeof(status_set));
64552 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64553 index af1de0f..06dfe57 100644
64554 --- a/kernel/auditsc.c
64555 +++ b/kernel/auditsc.c
64556 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64557 }
64558
64559 /* global counter which is incremented every time something logs in */
64560 -static atomic_t session_id = ATOMIC_INIT(0);
64561 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64562
64563 /**
64564 * audit_set_loginuid - set current task's audit_context loginuid
64565 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
64566 return -EPERM;
64567 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
64568
64569 - sessionid = atomic_inc_return(&session_id);
64570 + sessionid = atomic_inc_return_unchecked(&session_id);
64571 if (context && context->in_syscall) {
64572 struct audit_buffer *ab;
64573
64574 diff --git a/kernel/capability.c b/kernel/capability.c
64575 index 3f1adb6..c564db0 100644
64576 --- a/kernel/capability.c
64577 +++ b/kernel/capability.c
64578 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64579 * before modification is attempted and the application
64580 * fails.
64581 */
64582 + if (tocopy > ARRAY_SIZE(kdata))
64583 + return -EFAULT;
64584 +
64585 if (copy_to_user(dataptr, kdata, tocopy
64586 * sizeof(struct __user_cap_data_struct))) {
64587 return -EFAULT;
64588 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
64589 int ret;
64590
64591 rcu_read_lock();
64592 - ret = security_capable(__task_cred(t), ns, cap);
64593 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
64594 + gr_task_is_capable(t, __task_cred(t), cap);
64595 rcu_read_unlock();
64596
64597 - return (ret == 0);
64598 + return ret;
64599 }
64600
64601 /**
64602 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
64603 int ret;
64604
64605 rcu_read_lock();
64606 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
64607 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
64608 rcu_read_unlock();
64609
64610 - return (ret == 0);
64611 + return ret;
64612 }
64613
64614 /**
64615 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64616 BUG();
64617 }
64618
64619 - if (security_capable(current_cred(), ns, cap) == 0) {
64620 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
64621 current->flags |= PF_SUPERPRIV;
64622 return true;
64623 }
64624 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
64625 }
64626 EXPORT_SYMBOL(ns_capable);
64627
64628 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
64629 +{
64630 + if (unlikely(!cap_valid(cap))) {
64631 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64632 + BUG();
64633 + }
64634 +
64635 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
64636 + current->flags |= PF_SUPERPRIV;
64637 + return true;
64638 + }
64639 + return false;
64640 +}
64641 +EXPORT_SYMBOL(ns_capable_nolog);
64642 +
64643 /**
64644 * capable - Determine if the current task has a superior capability in effect
64645 * @cap: The capability to be tested for
64646 @@ -408,6 +427,12 @@ bool capable(int cap)
64647 }
64648 EXPORT_SYMBOL(capable);
64649
64650 +bool capable_nolog(int cap)
64651 +{
64652 + return ns_capable_nolog(&init_user_ns, cap);
64653 +}
64654 +EXPORT_SYMBOL(capable_nolog);
64655 +
64656 /**
64657 * nsown_capable - Check superior capability to one's own user_ns
64658 * @cap: The capability in question
64659 diff --git a/kernel/compat.c b/kernel/compat.c
64660 index d2c67aa..a629b2e 100644
64661 --- a/kernel/compat.c
64662 +++ b/kernel/compat.c
64663 @@ -13,6 +13,7 @@
64664
64665 #include <linux/linkage.h>
64666 #include <linux/compat.h>
64667 +#include <linux/module.h>
64668 #include <linux/errno.h>
64669 #include <linux/time.h>
64670 #include <linux/signal.h>
64671 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64672 mm_segment_t oldfs;
64673 long ret;
64674
64675 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64676 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64677 oldfs = get_fs();
64678 set_fs(KERNEL_DS);
64679 ret = hrtimer_nanosleep_restart(restart);
64680 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64681 oldfs = get_fs();
64682 set_fs(KERNEL_DS);
64683 ret = hrtimer_nanosleep(&tu,
64684 - rmtp ? (struct timespec __user *)&rmt : NULL,
64685 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
64686 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64687 set_fs(oldfs);
64688
64689 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64690 mm_segment_t old_fs = get_fs();
64691
64692 set_fs(KERNEL_DS);
64693 - ret = sys_sigpending((old_sigset_t __user *) &s);
64694 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
64695 set_fs(old_fs);
64696 if (ret == 0)
64697 ret = put_user(s, set);
64698 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64699 mm_segment_t old_fs = get_fs();
64700
64701 set_fs(KERNEL_DS);
64702 - ret = sys_old_getrlimit(resource, &r);
64703 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64704 set_fs(old_fs);
64705
64706 if (!ret) {
64707 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64708 mm_segment_t old_fs = get_fs();
64709
64710 set_fs(KERNEL_DS);
64711 - ret = sys_getrusage(who, (struct rusage __user *) &r);
64712 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64713 set_fs(old_fs);
64714
64715 if (ret)
64716 @@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64717 set_fs (KERNEL_DS);
64718 ret = sys_wait4(pid,
64719 (stat_addr ?
64720 - (unsigned int __user *) &status : NULL),
64721 - options, (struct rusage __user *) &r);
64722 + (unsigned int __force_user *) &status : NULL),
64723 + options, (struct rusage __force_user *) &r);
64724 set_fs (old_fs);
64725
64726 if (ret > 0) {
64727 @@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64728 memset(&info, 0, sizeof(info));
64729
64730 set_fs(KERNEL_DS);
64731 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64732 - uru ? (struct rusage __user *)&ru : NULL);
64733 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64734 + uru ? (struct rusage __force_user *)&ru : NULL);
64735 set_fs(old_fs);
64736
64737 if ((ret < 0) || (info.si_signo == 0))
64738 @@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64739 oldfs = get_fs();
64740 set_fs(KERNEL_DS);
64741 err = sys_timer_settime(timer_id, flags,
64742 - (struct itimerspec __user *) &newts,
64743 - (struct itimerspec __user *) &oldts);
64744 + (struct itimerspec __force_user *) &newts,
64745 + (struct itimerspec __force_user *) &oldts);
64746 set_fs(oldfs);
64747 if (!err && old && put_compat_itimerspec(old, &oldts))
64748 return -EFAULT;
64749 @@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64750 oldfs = get_fs();
64751 set_fs(KERNEL_DS);
64752 err = sys_timer_gettime(timer_id,
64753 - (struct itimerspec __user *) &ts);
64754 + (struct itimerspec __force_user *) &ts);
64755 set_fs(oldfs);
64756 if (!err && put_compat_itimerspec(setting, &ts))
64757 return -EFAULT;
64758 @@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64759 oldfs = get_fs();
64760 set_fs(KERNEL_DS);
64761 err = sys_clock_settime(which_clock,
64762 - (struct timespec __user *) &ts);
64763 + (struct timespec __force_user *) &ts);
64764 set_fs(oldfs);
64765 return err;
64766 }
64767 @@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64768 oldfs = get_fs();
64769 set_fs(KERNEL_DS);
64770 err = sys_clock_gettime(which_clock,
64771 - (struct timespec __user *) &ts);
64772 + (struct timespec __force_user *) &ts);
64773 set_fs(oldfs);
64774 if (!err && put_compat_timespec(&ts, tp))
64775 return -EFAULT;
64776 @@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64777
64778 oldfs = get_fs();
64779 set_fs(KERNEL_DS);
64780 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64781 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64782 set_fs(oldfs);
64783
64784 err = compat_put_timex(utp, &txc);
64785 @@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64786 oldfs = get_fs();
64787 set_fs(KERNEL_DS);
64788 err = sys_clock_getres(which_clock,
64789 - (struct timespec __user *) &ts);
64790 + (struct timespec __force_user *) &ts);
64791 set_fs(oldfs);
64792 if (!err && tp && put_compat_timespec(&ts, tp))
64793 return -EFAULT;
64794 @@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64795 long err;
64796 mm_segment_t oldfs;
64797 struct timespec tu;
64798 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64799 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64800
64801 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64802 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64803 oldfs = get_fs();
64804 set_fs(KERNEL_DS);
64805 err = clock_nanosleep_restart(restart);
64806 @@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64807 oldfs = get_fs();
64808 set_fs(KERNEL_DS);
64809 err = sys_clock_nanosleep(which_clock, flags,
64810 - (struct timespec __user *) &in,
64811 - (struct timespec __user *) &out);
64812 + (struct timespec __force_user *) &in,
64813 + (struct timespec __force_user *) &out);
64814 set_fs(oldfs);
64815
64816 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64817 diff --git a/kernel/configs.c b/kernel/configs.c
64818 index 42e8fa0..9e7406b 100644
64819 --- a/kernel/configs.c
64820 +++ b/kernel/configs.c
64821 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64822 struct proc_dir_entry *entry;
64823
64824 /* create the current config file */
64825 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64826 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64827 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64828 + &ikconfig_file_ops);
64829 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64830 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64831 + &ikconfig_file_ops);
64832 +#endif
64833 +#else
64834 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64835 &ikconfig_file_ops);
64836 +#endif
64837 +
64838 if (!entry)
64839 return -ENOMEM;
64840
64841 diff --git a/kernel/cred.c b/kernel/cred.c
64842 index e70683d..27761b6 100644
64843 --- a/kernel/cred.c
64844 +++ b/kernel/cred.c
64845 @@ -205,6 +205,15 @@ void exit_creds(struct task_struct *tsk)
64846 validate_creds(cred);
64847 put_cred(cred);
64848 }
64849 +
64850 +#ifdef CONFIG_GRKERNSEC_SETXID
64851 + cred = (struct cred *) tsk->delayed_cred;
64852 + if (cred) {
64853 + tsk->delayed_cred = NULL;
64854 + validate_creds(cred);
64855 + put_cred(cred);
64856 + }
64857 +#endif
64858 }
64859
64860 /**
64861 @@ -473,7 +482,7 @@ error_put:
64862 * Always returns 0 thus allowing this function to be tail-called at the end
64863 * of, say, sys_setgid().
64864 */
64865 -int commit_creds(struct cred *new)
64866 +static int __commit_creds(struct cred *new)
64867 {
64868 struct task_struct *task = current;
64869 const struct cred *old = task->real_cred;
64870 @@ -492,6 +501,8 @@ int commit_creds(struct cred *new)
64871
64872 get_cred(new); /* we will require a ref for the subj creds too */
64873
64874 + gr_set_role_label(task, new->uid, new->gid);
64875 +
64876 /* dumpability changes */
64877 if (old->euid != new->euid ||
64878 old->egid != new->egid ||
64879 @@ -541,6 +552,101 @@ int commit_creds(struct cred *new)
64880 put_cred(old);
64881 return 0;
64882 }
64883 +#ifdef CONFIG_GRKERNSEC_SETXID
64884 +extern int set_user(struct cred *new);
64885 +
64886 +void gr_delayed_cred_worker(void)
64887 +{
64888 + const struct cred *new = current->delayed_cred;
64889 + struct cred *ncred;
64890 +
64891 + current->delayed_cred = NULL;
64892 +
64893 + if (current_uid() && new != NULL) {
64894 + // from doing get_cred on it when queueing this
64895 + put_cred(new);
64896 + return;
64897 + } else if (new == NULL)
64898 + return;
64899 +
64900 + ncred = prepare_creds();
64901 + if (!ncred)
64902 + goto die;
64903 + // uids
64904 + ncred->uid = new->uid;
64905 + ncred->euid = new->euid;
64906 + ncred->suid = new->suid;
64907 + ncred->fsuid = new->fsuid;
64908 + // gids
64909 + ncred->gid = new->gid;
64910 + ncred->egid = new->egid;
64911 + ncred->sgid = new->sgid;
64912 + ncred->fsgid = new->fsgid;
64913 + // groups
64914 + if (set_groups(ncred, new->group_info) < 0) {
64915 + abort_creds(ncred);
64916 + goto die;
64917 + }
64918 + // caps
64919 + ncred->securebits = new->securebits;
64920 + ncred->cap_inheritable = new->cap_inheritable;
64921 + ncred->cap_permitted = new->cap_permitted;
64922 + ncred->cap_effective = new->cap_effective;
64923 + ncred->cap_bset = new->cap_bset;
64924 +
64925 + if (set_user(ncred)) {
64926 + abort_creds(ncred);
64927 + goto die;
64928 + }
64929 +
64930 + // from doing get_cred on it when queueing this
64931 + put_cred(new);
64932 +
64933 + __commit_creds(ncred);
64934 + return;
64935 +die:
64936 + // from doing get_cred on it when queueing this
64937 + put_cred(new);
64938 + do_group_exit(SIGKILL);
64939 +}
64940 +#endif
64941 +
64942 +int commit_creds(struct cred *new)
64943 +{
64944 +#ifdef CONFIG_GRKERNSEC_SETXID
64945 + int ret;
64946 + int schedule_it = 0;
64947 + struct task_struct *t;
64948 +
64949 + /* we won't get called with tasklist_lock held for writing
64950 + and interrupts disabled as the cred struct in that case is
64951 + init_cred
64952 + */
64953 + if (grsec_enable_setxid && !current_is_single_threaded() &&
64954 + !current_uid() && new->uid) {
64955 + schedule_it = 1;
64956 + }
64957 + ret = __commit_creds(new);
64958 + if (schedule_it) {
64959 + rcu_read_lock();
64960 + read_lock(&tasklist_lock);
64961 + for (t = next_thread(current); t != current;
64962 + t = next_thread(t)) {
64963 + if (t->delayed_cred == NULL) {
64964 + t->delayed_cred = get_cred(new);
64965 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
64966 + set_tsk_need_resched(t);
64967 + }
64968 + }
64969 + read_unlock(&tasklist_lock);
64970 + rcu_read_unlock();
64971 + }
64972 + return ret;
64973 +#else
64974 + return __commit_creds(new);
64975 +#endif
64976 +}
64977 +
64978 EXPORT_SYMBOL(commit_creds);
64979
64980 /**
64981 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
64982 index 0557f24..1a00d9a 100644
64983 --- a/kernel/debug/debug_core.c
64984 +++ b/kernel/debug/debug_core.c
64985 @@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
64986 */
64987 static atomic_t masters_in_kgdb;
64988 static atomic_t slaves_in_kgdb;
64989 -static atomic_t kgdb_break_tasklet_var;
64990 +static atomic_unchecked_t kgdb_break_tasklet_var;
64991 atomic_t kgdb_setting_breakpoint;
64992
64993 struct task_struct *kgdb_usethread;
64994 @@ -132,7 +132,7 @@ int kgdb_single_step;
64995 static pid_t kgdb_sstep_pid;
64996
64997 /* to keep track of the CPU which is doing the single stepping*/
64998 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64999 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65000
65001 /*
65002 * If you are debugging a problem where roundup (the collection of
65003 @@ -540,7 +540,7 @@ return_normal:
65004 * kernel will only try for the value of sstep_tries before
65005 * giving up and continuing on.
65006 */
65007 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65008 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65009 (kgdb_info[cpu].task &&
65010 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65011 atomic_set(&kgdb_active, -1);
65012 @@ -634,8 +634,8 @@ cpu_master_loop:
65013 }
65014
65015 kgdb_restore:
65016 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65017 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65018 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65019 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65020 if (kgdb_info[sstep_cpu].task)
65021 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65022 else
65023 @@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
65024 static void kgdb_tasklet_bpt(unsigned long ing)
65025 {
65026 kgdb_breakpoint();
65027 - atomic_set(&kgdb_break_tasklet_var, 0);
65028 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65029 }
65030
65031 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65032
65033 void kgdb_schedule_breakpoint(void)
65034 {
65035 - if (atomic_read(&kgdb_break_tasklet_var) ||
65036 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65037 atomic_read(&kgdb_active) != -1 ||
65038 atomic_read(&kgdb_setting_breakpoint))
65039 return;
65040 - atomic_inc(&kgdb_break_tasklet_var);
65041 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
65042 tasklet_schedule(&kgdb_tasklet_breakpoint);
65043 }
65044 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65045 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65046 index 67b847d..93834dd 100644
65047 --- a/kernel/debug/kdb/kdb_main.c
65048 +++ b/kernel/debug/kdb/kdb_main.c
65049 @@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
65050 list_for_each_entry(mod, kdb_modules, list) {
65051
65052 kdb_printf("%-20s%8u 0x%p ", mod->name,
65053 - mod->core_size, (void *)mod);
65054 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
65055 #ifdef CONFIG_MODULE_UNLOAD
65056 kdb_printf("%4ld ", module_refcount(mod));
65057 #endif
65058 @@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
65059 kdb_printf(" (Loading)");
65060 else
65061 kdb_printf(" (Live)");
65062 - kdb_printf(" 0x%p", mod->module_core);
65063 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65064
65065 #ifdef CONFIG_MODULE_UNLOAD
65066 {
65067 diff --git a/kernel/events/core.c b/kernel/events/core.c
65068 index fd126f8..70b755b 100644
65069 --- a/kernel/events/core.c
65070 +++ b/kernel/events/core.c
65071 @@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65072 return 0;
65073 }
65074
65075 -static atomic64_t perf_event_id;
65076 +static atomic64_unchecked_t perf_event_id;
65077
65078 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65079 enum event_type_t event_type);
65080 @@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info)
65081
65082 static inline u64 perf_event_count(struct perf_event *event)
65083 {
65084 - return local64_read(&event->count) + atomic64_read(&event->child_count);
65085 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65086 }
65087
65088 static u64 perf_event_read(struct perf_event *event)
65089 @@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65090 mutex_lock(&event->child_mutex);
65091 total += perf_event_read(event);
65092 *enabled += event->total_time_enabled +
65093 - atomic64_read(&event->child_total_time_enabled);
65094 + atomic64_read_unchecked(&event->child_total_time_enabled);
65095 *running += event->total_time_running +
65096 - atomic64_read(&event->child_total_time_running);
65097 + atomic64_read_unchecked(&event->child_total_time_running);
65098
65099 list_for_each_entry(child, &event->child_list, child_list) {
65100 total += perf_event_read(child);
65101 @@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct perf_event *event)
65102 userpg->offset -= local64_read(&event->hw.prev_count);
65103
65104 userpg->time_enabled = enabled +
65105 - atomic64_read(&event->child_total_time_enabled);
65106 + atomic64_read_unchecked(&event->child_total_time_enabled);
65107
65108 userpg->time_running = running +
65109 - atomic64_read(&event->child_total_time_running);
65110 + atomic64_read_unchecked(&event->child_total_time_running);
65111
65112 arch_perf_update_userpage(userpg, now);
65113
65114 @@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65115 values[n++] = perf_event_count(event);
65116 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65117 values[n++] = enabled +
65118 - atomic64_read(&event->child_total_time_enabled);
65119 + atomic64_read_unchecked(&event->child_total_time_enabled);
65120 }
65121 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65122 values[n++] = running +
65123 - atomic64_read(&event->child_total_time_running);
65124 + atomic64_read_unchecked(&event->child_total_time_running);
65125 }
65126 if (read_format & PERF_FORMAT_ID)
65127 values[n++] = primary_event_id(event);
65128 @@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65129 * need to add enough zero bytes after the string to handle
65130 * the 64bit alignment we do later.
65131 */
65132 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65133 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65134 if (!buf) {
65135 name = strncpy(tmp, "//enomem", sizeof(tmp));
65136 goto got_name;
65137 }
65138 - name = d_path(&file->f_path, buf, PATH_MAX);
65139 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65140 if (IS_ERR(name)) {
65141 name = strncpy(tmp, "//toolong", sizeof(tmp));
65142 goto got_name;
65143 @@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65144 event->parent = parent_event;
65145
65146 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65147 - event->id = atomic64_inc_return(&perf_event_id);
65148 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65149
65150 event->state = PERF_EVENT_STATE_INACTIVE;
65151
65152 @@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf_event *child_event,
65153 /*
65154 * Add back the child's count to the parent's count:
65155 */
65156 - atomic64_add(child_val, &parent_event->child_count);
65157 - atomic64_add(child_event->total_time_enabled,
65158 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65159 + atomic64_add_unchecked(child_event->total_time_enabled,
65160 &parent_event->child_total_time_enabled);
65161 - atomic64_add(child_event->total_time_running,
65162 + atomic64_add_unchecked(child_event->total_time_running,
65163 &parent_event->child_total_time_running);
65164
65165 /*
65166 diff --git a/kernel/exit.c b/kernel/exit.c
65167 index d8bd3b42..26bd8dc 100644
65168 --- a/kernel/exit.c
65169 +++ b/kernel/exit.c
65170 @@ -59,6 +59,10 @@
65171 #include <asm/pgtable.h>
65172 #include <asm/mmu_context.h>
65173
65174 +#ifdef CONFIG_GRKERNSEC
65175 +extern rwlock_t grsec_exec_file_lock;
65176 +#endif
65177 +
65178 static void exit_mm(struct task_struct * tsk);
65179
65180 static void __unhash_process(struct task_struct *p, bool group_dead)
65181 @@ -170,6 +174,10 @@ void release_task(struct task_struct * p)
65182 struct task_struct *leader;
65183 int zap_leader;
65184 repeat:
65185 +#ifdef CONFIG_NET
65186 + gr_del_task_from_ip_table(p);
65187 +#endif
65188 +
65189 /* don't need to get the RCU readlock here - the process is dead and
65190 * can't be modifying its own credentials. But shut RCU-lockdep up */
65191 rcu_read_lock();
65192 @@ -382,7 +390,7 @@ int allow_signal(int sig)
65193 * know it'll be handled, so that they don't get converted to
65194 * SIGKILL or just silently dropped.
65195 */
65196 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65197 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65198 recalc_sigpending();
65199 spin_unlock_irq(&current->sighand->siglock);
65200 return 0;
65201 @@ -418,6 +426,17 @@ void daemonize(const char *name, ...)
65202 vsnprintf(current->comm, sizeof(current->comm), name, args);
65203 va_end(args);
65204
65205 +#ifdef CONFIG_GRKERNSEC
65206 + write_lock(&grsec_exec_file_lock);
65207 + if (current->exec_file) {
65208 + fput(current->exec_file);
65209 + current->exec_file = NULL;
65210 + }
65211 + write_unlock(&grsec_exec_file_lock);
65212 +#endif
65213 +
65214 + gr_set_kernel_label(current);
65215 +
65216 /*
65217 * If we were started as result of loading a module, close all of the
65218 * user space pages. We don't need them, and if we didn't close them
65219 @@ -900,6 +919,8 @@ void do_exit(long code)
65220 struct task_struct *tsk = current;
65221 int group_dead;
65222
65223 + set_fs(USER_DS);
65224 +
65225 profile_task_exit(tsk);
65226
65227 WARN_ON(blk_needs_flush_plug(tsk));
65228 @@ -916,7 +937,6 @@ void do_exit(long code)
65229 * mm_release()->clear_child_tid() from writing to a user-controlled
65230 * kernel address.
65231 */
65232 - set_fs(USER_DS);
65233
65234 ptrace_event(PTRACE_EVENT_EXIT, code);
65235
65236 @@ -977,6 +997,9 @@ void do_exit(long code)
65237 tsk->exit_code = code;
65238 taskstats_exit(tsk, group_dead);
65239
65240 + gr_acl_handle_psacct(tsk, code);
65241 + gr_acl_handle_exit();
65242 +
65243 exit_mm(tsk);
65244
65245 if (group_dead)
65246 @@ -1093,7 +1116,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65247 * Take down every thread in the group. This is called by fatal signals
65248 * as well as by sys_exit_group (below).
65249 */
65250 -void
65251 +__noreturn void
65252 do_group_exit(int exit_code)
65253 {
65254 struct signal_struct *sig = current->signal;
65255 diff --git a/kernel/fork.c b/kernel/fork.c
65256 index 8163333..efb4692 100644
65257 --- a/kernel/fork.c
65258 +++ b/kernel/fork.c
65259 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65260 *stackend = STACK_END_MAGIC; /* for overflow detection */
65261
65262 #ifdef CONFIG_CC_STACKPROTECTOR
65263 - tsk->stack_canary = get_random_int();
65264 + tsk->stack_canary = pax_get_random_long();
65265 #endif
65266
65267 /*
65268 @@ -310,13 +310,78 @@ out:
65269 }
65270
65271 #ifdef CONFIG_MMU
65272 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
65273 +{
65274 + struct vm_area_struct *tmp;
65275 + unsigned long charge;
65276 + struct mempolicy *pol;
65277 + struct file *file;
65278 +
65279 + charge = 0;
65280 + if (mpnt->vm_flags & VM_ACCOUNT) {
65281 + unsigned long len;
65282 + len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65283 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65284 + goto fail_nomem;
65285 + charge = len;
65286 + }
65287 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65288 + if (!tmp)
65289 + goto fail_nomem;
65290 + *tmp = *mpnt;
65291 + tmp->vm_mm = mm;
65292 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
65293 + pol = mpol_dup(vma_policy(mpnt));
65294 + if (IS_ERR(pol))
65295 + goto fail_nomem_policy;
65296 + vma_set_policy(tmp, pol);
65297 + if (anon_vma_fork(tmp, mpnt))
65298 + goto fail_nomem_anon_vma_fork;
65299 + tmp->vm_flags &= ~VM_LOCKED;
65300 + tmp->vm_next = tmp->vm_prev = NULL;
65301 + tmp->vm_mirror = NULL;
65302 + file = tmp->vm_file;
65303 + if (file) {
65304 + struct inode *inode = file->f_path.dentry->d_inode;
65305 + struct address_space *mapping = file->f_mapping;
65306 +
65307 + get_file(file);
65308 + if (tmp->vm_flags & VM_DENYWRITE)
65309 + atomic_dec(&inode->i_writecount);
65310 + mutex_lock(&mapping->i_mmap_mutex);
65311 + if (tmp->vm_flags & VM_SHARED)
65312 + mapping->i_mmap_writable++;
65313 + flush_dcache_mmap_lock(mapping);
65314 + /* insert tmp into the share list, just after mpnt */
65315 + vma_prio_tree_add(tmp, mpnt);
65316 + flush_dcache_mmap_unlock(mapping);
65317 + mutex_unlock(&mapping->i_mmap_mutex);
65318 + }
65319 +
65320 + /*
65321 + * Clear hugetlb-related page reserves for children. This only
65322 + * affects MAP_PRIVATE mappings. Faults generated by the child
65323 + * are not guaranteed to succeed, even if read-only
65324 + */
65325 + if (is_vm_hugetlb_page(tmp))
65326 + reset_vma_resv_huge_pages(tmp);
65327 +
65328 + return tmp;
65329 +
65330 +fail_nomem_anon_vma_fork:
65331 + mpol_put(pol);
65332 +fail_nomem_policy:
65333 + kmem_cache_free(vm_area_cachep, tmp);
65334 +fail_nomem:
65335 + vm_unacct_memory(charge);
65336 + return NULL;
65337 +}
65338 +
65339 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65340 {
65341 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65342 struct rb_node **rb_link, *rb_parent;
65343 int retval;
65344 - unsigned long charge;
65345 - struct mempolicy *pol;
65346
65347 down_write(&oldmm->mmap_sem);
65348 flush_cache_dup_mm(oldmm);
65349 @@ -328,8 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65350 mm->locked_vm = 0;
65351 mm->mmap = NULL;
65352 mm->mmap_cache = NULL;
65353 - mm->free_area_cache = oldmm->mmap_base;
65354 - mm->cached_hole_size = ~0UL;
65355 + mm->free_area_cache = oldmm->free_area_cache;
65356 + mm->cached_hole_size = oldmm->cached_hole_size;
65357 mm->map_count = 0;
65358 cpumask_clear(mm_cpumask(mm));
65359 mm->mm_rb = RB_ROOT;
65360 @@ -345,8 +410,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65361
65362 prev = NULL;
65363 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65364 - struct file *file;
65365 -
65366 if (mpnt->vm_flags & VM_DONTCOPY) {
65367 long pages = vma_pages(mpnt);
65368 mm->total_vm -= pages;
65369 @@ -354,54 +417,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65370 -pages);
65371 continue;
65372 }
65373 - charge = 0;
65374 - if (mpnt->vm_flags & VM_ACCOUNT) {
65375 - unsigned long len;
65376 - len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65377 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65378 - goto fail_nomem;
65379 - charge = len;
65380 + tmp = dup_vma(mm, oldmm, mpnt);
65381 + if (!tmp) {
65382 + retval = -ENOMEM;
65383 + goto out;
65384 }
65385 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65386 - if (!tmp)
65387 - goto fail_nomem;
65388 - *tmp = *mpnt;
65389 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
65390 - pol = mpol_dup(vma_policy(mpnt));
65391 - retval = PTR_ERR(pol);
65392 - if (IS_ERR(pol))
65393 - goto fail_nomem_policy;
65394 - vma_set_policy(tmp, pol);
65395 - tmp->vm_mm = mm;
65396 - if (anon_vma_fork(tmp, mpnt))
65397 - goto fail_nomem_anon_vma_fork;
65398 - tmp->vm_flags &= ~VM_LOCKED;
65399 - tmp->vm_next = tmp->vm_prev = NULL;
65400 - file = tmp->vm_file;
65401 - if (file) {
65402 - struct inode *inode = file->f_path.dentry->d_inode;
65403 - struct address_space *mapping = file->f_mapping;
65404 -
65405 - get_file(file);
65406 - if (tmp->vm_flags & VM_DENYWRITE)
65407 - atomic_dec(&inode->i_writecount);
65408 - mutex_lock(&mapping->i_mmap_mutex);
65409 - if (tmp->vm_flags & VM_SHARED)
65410 - mapping->i_mmap_writable++;
65411 - flush_dcache_mmap_lock(mapping);
65412 - /* insert tmp into the share list, just after mpnt */
65413 - vma_prio_tree_add(tmp, mpnt);
65414 - flush_dcache_mmap_unlock(mapping);
65415 - mutex_unlock(&mapping->i_mmap_mutex);
65416 - }
65417 -
65418 - /*
65419 - * Clear hugetlb-related page reserves for children. This only
65420 - * affects MAP_PRIVATE mappings. Faults generated by the child
65421 - * are not guaranteed to succeed, even if read-only
65422 - */
65423 - if (is_vm_hugetlb_page(tmp))
65424 - reset_vma_resv_huge_pages(tmp);
65425
65426 /*
65427 * Link in the new vma and copy the page table entries.
65428 @@ -424,6 +444,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65429 if (retval)
65430 goto out;
65431 }
65432 +
65433 +#ifdef CONFIG_PAX_SEGMEXEC
65434 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65435 + struct vm_area_struct *mpnt_m;
65436 +
65437 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65438 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65439 +
65440 + if (!mpnt->vm_mirror)
65441 + continue;
65442 +
65443 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65444 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65445 + mpnt->vm_mirror = mpnt_m;
65446 + } else {
65447 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65448 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65449 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65450 + mpnt->vm_mirror->vm_mirror = mpnt;
65451 + }
65452 + }
65453 + BUG_ON(mpnt_m);
65454 + }
65455 +#endif
65456 +
65457 /* a new mm has just been created */
65458 arch_dup_mmap(oldmm, mm);
65459 retval = 0;
65460 @@ -432,14 +477,6 @@ out:
65461 flush_tlb_mm(oldmm);
65462 up_write(&oldmm->mmap_sem);
65463 return retval;
65464 -fail_nomem_anon_vma_fork:
65465 - mpol_put(pol);
65466 -fail_nomem_policy:
65467 - kmem_cache_free(vm_area_cachep, tmp);
65468 -fail_nomem:
65469 - retval = -ENOMEM;
65470 - vm_unacct_memory(charge);
65471 - goto out;
65472 }
65473
65474 static inline int mm_alloc_pgd(struct mm_struct *mm)
65475 @@ -676,8 +713,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65476 return ERR_PTR(err);
65477
65478 mm = get_task_mm(task);
65479 - if (mm && mm != current->mm &&
65480 - !ptrace_may_access(task, mode)) {
65481 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65482 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65483 mmput(mm);
65484 mm = ERR_PTR(-EACCES);
65485 }
65486 @@ -899,13 +936,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65487 spin_unlock(&fs->lock);
65488 return -EAGAIN;
65489 }
65490 - fs->users++;
65491 + atomic_inc(&fs->users);
65492 spin_unlock(&fs->lock);
65493 return 0;
65494 }
65495 tsk->fs = copy_fs_struct(fs);
65496 if (!tsk->fs)
65497 return -ENOMEM;
65498 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65499 return 0;
65500 }
65501
65502 @@ -1172,6 +1210,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65503 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65504 #endif
65505 retval = -EAGAIN;
65506 +
65507 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65508 +
65509 if (atomic_read(&p->real_cred->user->processes) >=
65510 task_rlimit(p, RLIMIT_NPROC)) {
65511 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65512 @@ -1328,6 +1369,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65513 if (clone_flags & CLONE_THREAD)
65514 p->tgid = current->tgid;
65515
65516 + gr_copy_label(p);
65517 +
65518 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65519 /*
65520 * Clear TID on mm_release()?
65521 @@ -1502,6 +1545,8 @@ bad_fork_cleanup_count:
65522 bad_fork_free:
65523 free_task(p);
65524 fork_out:
65525 + gr_log_forkfail(retval);
65526 +
65527 return ERR_PTR(retval);
65528 }
65529
65530 @@ -1602,6 +1647,8 @@ long do_fork(unsigned long clone_flags,
65531 if (clone_flags & CLONE_PARENT_SETTID)
65532 put_user(nr, parent_tidptr);
65533
65534 + gr_handle_brute_check();
65535 +
65536 if (clone_flags & CLONE_VFORK) {
65537 p->vfork_done = &vfork;
65538 init_completion(&vfork);
65539 @@ -1700,7 +1747,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65540 return 0;
65541
65542 /* don't need lock here; in the worst case we'll do useless copy */
65543 - if (fs->users == 1)
65544 + if (atomic_read(&fs->users) == 1)
65545 return 0;
65546
65547 *new_fsp = copy_fs_struct(fs);
65548 @@ -1789,7 +1836,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65549 fs = current->fs;
65550 spin_lock(&fs->lock);
65551 current->fs = new_fs;
65552 - if (--fs->users)
65553 + gr_set_chroot_entries(current, &current->fs->root);
65554 + if (atomic_dec_return(&fs->users))
65555 new_fs = NULL;
65556 else
65557 new_fs = fs;
65558 diff --git a/kernel/futex.c b/kernel/futex.c
65559 index e2b0fb9..db818ac 100644
65560 --- a/kernel/futex.c
65561 +++ b/kernel/futex.c
65562 @@ -54,6 +54,7 @@
65563 #include <linux/mount.h>
65564 #include <linux/pagemap.h>
65565 #include <linux/syscalls.h>
65566 +#include <linux/ptrace.h>
65567 #include <linux/signal.h>
65568 #include <linux/export.h>
65569 #include <linux/magic.h>
65570 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65571 struct page *page, *page_head;
65572 int err, ro = 0;
65573
65574 +#ifdef CONFIG_PAX_SEGMEXEC
65575 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65576 + return -EFAULT;
65577 +#endif
65578 +
65579 /*
65580 * The futex address must be "naturally" aligned.
65581 */
65582 @@ -2711,6 +2717,7 @@ static int __init futex_init(void)
65583 {
65584 u32 curval;
65585 int i;
65586 + mm_segment_t oldfs;
65587
65588 /*
65589 * This will fail and we want it. Some arch implementations do
65590 @@ -2722,8 +2729,11 @@ static int __init futex_init(void)
65591 * implementation, the non-functional ones will return
65592 * -ENOSYS.
65593 */
65594 + oldfs = get_fs();
65595 + set_fs(USER_DS);
65596 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65597 futex_cmpxchg_enabled = 1;
65598 + set_fs(oldfs);
65599
65600 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65601 plist_head_init(&futex_queues[i].chain);
65602 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65603 index 9b22d03..6295b62 100644
65604 --- a/kernel/gcov/base.c
65605 +++ b/kernel/gcov/base.c
65606 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
65607 }
65608
65609 #ifdef CONFIG_MODULES
65610 -static inline int within(void *addr, void *start, unsigned long size)
65611 -{
65612 - return ((addr >= start) && (addr < start + size));
65613 -}
65614 -
65615 /* Update list and generate events when modules are unloaded. */
65616 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65617 void *data)
65618 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65619 prev = NULL;
65620 /* Remove entries located in module from linked list. */
65621 for (info = gcov_info_head; info; info = info->next) {
65622 - if (within(info, mod->module_core, mod->core_size)) {
65623 + if (within_module_core_rw((unsigned long)info, mod)) {
65624 if (prev)
65625 prev->next = info->next;
65626 else
65627 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65628 index ae34bf5..4e2f3d0 100644
65629 --- a/kernel/hrtimer.c
65630 +++ b/kernel/hrtimer.c
65631 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65632 local_irq_restore(flags);
65633 }
65634
65635 -static void run_hrtimer_softirq(struct softirq_action *h)
65636 +static void run_hrtimer_softirq(void)
65637 {
65638 hrtimer_peek_ahead_timers();
65639 }
65640 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65641 index 4304919..bbc53fa 100644
65642 --- a/kernel/jump_label.c
65643 +++ b/kernel/jump_label.c
65644 @@ -50,7 +50,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65645
65646 size = (((unsigned long)stop - (unsigned long)start)
65647 / sizeof(struct jump_entry));
65648 + pax_open_kernel();
65649 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65650 + pax_close_kernel();
65651 }
65652
65653 static void jump_label_update(struct static_key *key, int enable);
65654 @@ -356,10 +358,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65655 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65656 struct jump_entry *iter;
65657
65658 + pax_open_kernel();
65659 for (iter = iter_start; iter < iter_stop; iter++) {
65660 if (within_module_init(iter->code, mod))
65661 iter->code = 0;
65662 }
65663 + pax_close_kernel();
65664 }
65665
65666 static int
65667 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65668 index 079f1d3..a407562 100644
65669 --- a/kernel/kallsyms.c
65670 +++ b/kernel/kallsyms.c
65671 @@ -11,6 +11,9 @@
65672 * Changed the compression method from stem compression to "table lookup"
65673 * compression (see scripts/kallsyms.c for a more complete description)
65674 */
65675 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65676 +#define __INCLUDED_BY_HIDESYM 1
65677 +#endif
65678 #include <linux/kallsyms.h>
65679 #include <linux/module.h>
65680 #include <linux/init.h>
65681 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65682
65683 static inline int is_kernel_inittext(unsigned long addr)
65684 {
65685 + if (system_state != SYSTEM_BOOTING)
65686 + return 0;
65687 +
65688 if (addr >= (unsigned long)_sinittext
65689 && addr <= (unsigned long)_einittext)
65690 return 1;
65691 return 0;
65692 }
65693
65694 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65695 +#ifdef CONFIG_MODULES
65696 +static inline int is_module_text(unsigned long addr)
65697 +{
65698 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65699 + return 1;
65700 +
65701 + addr = ktla_ktva(addr);
65702 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65703 +}
65704 +#else
65705 +static inline int is_module_text(unsigned long addr)
65706 +{
65707 + return 0;
65708 +}
65709 +#endif
65710 +#endif
65711 +
65712 static inline int is_kernel_text(unsigned long addr)
65713 {
65714 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65715 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65716
65717 static inline int is_kernel(unsigned long addr)
65718 {
65719 +
65720 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65721 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
65722 + return 1;
65723 +
65724 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65725 +#else
65726 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65727 +#endif
65728 +
65729 return 1;
65730 return in_gate_area_no_mm(addr);
65731 }
65732
65733 static int is_ksym_addr(unsigned long addr)
65734 {
65735 +
65736 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65737 + if (is_module_text(addr))
65738 + return 0;
65739 +#endif
65740 +
65741 if (all_var)
65742 return is_kernel(addr);
65743
65744 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65745
65746 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65747 {
65748 - iter->name[0] = '\0';
65749 iter->nameoff = get_symbol_offset(new_pos);
65750 iter->pos = new_pos;
65751 }
65752 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65753 {
65754 struct kallsym_iter *iter = m->private;
65755
65756 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65757 + if (current_uid())
65758 + return 0;
65759 +#endif
65760 +
65761 /* Some debugging symbols have no name. Ignore them. */
65762 if (!iter->name[0])
65763 return 0;
65764 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65765 struct kallsym_iter *iter;
65766 int ret;
65767
65768 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65769 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65770 if (!iter)
65771 return -ENOMEM;
65772 reset_iter(iter, 0);
65773 diff --git a/kernel/kexec.c b/kernel/kexec.c
65774 index 4e2e472..cd0c7ae 100644
65775 --- a/kernel/kexec.c
65776 +++ b/kernel/kexec.c
65777 @@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65778 unsigned long flags)
65779 {
65780 struct compat_kexec_segment in;
65781 - struct kexec_segment out, __user *ksegments;
65782 + struct kexec_segment out;
65783 + struct kexec_segment __user *ksegments;
65784 unsigned long i, result;
65785
65786 /* Don't allow clients that don't understand the native
65787 diff --git a/kernel/kmod.c b/kernel/kmod.c
65788 index 05698a7..a4c1e3a 100644
65789 --- a/kernel/kmod.c
65790 +++ b/kernel/kmod.c
65791 @@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
65792 kfree(info->argv);
65793 }
65794
65795 -static int call_modprobe(char *module_name, int wait)
65796 +static int call_modprobe(char *module_name, char *module_param, int wait)
65797 {
65798 static char *envp[] = {
65799 "HOME=/",
65800 @@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
65801 NULL
65802 };
65803
65804 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
65805 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
65806 if (!argv)
65807 goto out;
65808
65809 @@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
65810 argv[1] = "-q";
65811 argv[2] = "--";
65812 argv[3] = module_name; /* check free_modprobe_argv() */
65813 - argv[4] = NULL;
65814 + argv[4] = module_param;
65815 + argv[5] = NULL;
65816
65817 return call_usermodehelper_fns(modprobe_path, argv, envp,
65818 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
65819 @@ -112,9 +113,8 @@ out:
65820 * If module auto-loading support is disabled then this function
65821 * becomes a no-operation.
65822 */
65823 -int __request_module(bool wait, const char *fmt, ...)
65824 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65825 {
65826 - va_list args;
65827 char module_name[MODULE_NAME_LEN];
65828 unsigned int max_modprobes;
65829 int ret;
65830 @@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
65831 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65832 static int kmod_loop_msg;
65833
65834 - va_start(args, fmt);
65835 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65836 - va_end(args);
65837 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65838 if (ret >= MODULE_NAME_LEN)
65839 return -ENAMETOOLONG;
65840
65841 @@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
65842 if (ret)
65843 return ret;
65844
65845 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65846 + if (!current_uid()) {
65847 + /* hack to workaround consolekit/udisks stupidity */
65848 + read_lock(&tasklist_lock);
65849 + if (!strcmp(current->comm, "mount") &&
65850 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65851 + read_unlock(&tasklist_lock);
65852 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65853 + return -EPERM;
65854 + }
65855 + read_unlock(&tasklist_lock);
65856 + }
65857 +#endif
65858 +
65859 /* If modprobe needs a service that is in a module, we get a recursive
65860 * loop. Limit the number of running kmod threads to max_threads/2 or
65861 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65862 @@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
65863
65864 trace_module_request(module_name, wait, _RET_IP_);
65865
65866 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65867 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65868
65869 atomic_dec(&kmod_concurrent);
65870 return ret;
65871 }
65872 +
65873 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65874 +{
65875 + va_list args;
65876 + int ret;
65877 +
65878 + va_start(args, fmt);
65879 + ret = ____request_module(wait, module_param, fmt, args);
65880 + va_end(args);
65881 +
65882 + return ret;
65883 +}
65884 +
65885 +int __request_module(bool wait, const char *fmt, ...)
65886 +{
65887 + va_list args;
65888 + int ret;
65889 +
65890 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65891 + if (current_uid()) {
65892 + char module_param[MODULE_NAME_LEN];
65893 +
65894 + memset(module_param, 0, sizeof(module_param));
65895 +
65896 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
65897 +
65898 + va_start(args, fmt);
65899 + ret = ____request_module(wait, module_param, fmt, args);
65900 + va_end(args);
65901 +
65902 + return ret;
65903 + }
65904 +#endif
65905 +
65906 + va_start(args, fmt);
65907 + ret = ____request_module(wait, NULL, fmt, args);
65908 + va_end(args);
65909 +
65910 + return ret;
65911 +}
65912 +
65913 EXPORT_SYMBOL(__request_module);
65914 #endif /* CONFIG_MODULES */
65915
65916 @@ -267,7 +320,7 @@ static int wait_for_helper(void *data)
65917 *
65918 * Thus the __user pointer cast is valid here.
65919 */
65920 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
65921 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
65922
65923 /*
65924 * If ret is 0, either ____call_usermodehelper failed and the
65925 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
65926 index c62b854..cb67968 100644
65927 --- a/kernel/kprobes.c
65928 +++ b/kernel/kprobes.c
65929 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
65930 * kernel image and loaded module images reside. This is required
65931 * so x86_64 can correctly handle the %rip-relative fixups.
65932 */
65933 - kip->insns = module_alloc(PAGE_SIZE);
65934 + kip->insns = module_alloc_exec(PAGE_SIZE);
65935 if (!kip->insns) {
65936 kfree(kip);
65937 return NULL;
65938 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
65939 */
65940 if (!list_is_singular(&kip->list)) {
65941 list_del(&kip->list);
65942 - module_free(NULL, kip->insns);
65943 + module_free_exec(NULL, kip->insns);
65944 kfree(kip);
65945 }
65946 return 1;
65947 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
65948 {
65949 int i, err = 0;
65950 unsigned long offset = 0, size = 0;
65951 - char *modname, namebuf[128];
65952 + char *modname, namebuf[KSYM_NAME_LEN];
65953 const char *symbol_name;
65954 void *addr;
65955 struct kprobe_blackpoint *kb;
65956 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
65957 const char *sym = NULL;
65958 unsigned int i = *(loff_t *) v;
65959 unsigned long offset = 0;
65960 - char *modname, namebuf[128];
65961 + char *modname, namebuf[KSYM_NAME_LEN];
65962
65963 head = &kprobe_table[i];
65964 preempt_disable();
65965 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
65966 index 4e316e1..5501eef 100644
65967 --- a/kernel/ksysfs.c
65968 +++ b/kernel/ksysfs.c
65969 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
65970 {
65971 if (count+1 > UEVENT_HELPER_PATH_LEN)
65972 return -ENOENT;
65973 + if (!capable(CAP_SYS_ADMIN))
65974 + return -EPERM;
65975 memcpy(uevent_helper, buf, count);
65976 uevent_helper[count] = '\0';
65977 if (count && uevent_helper[count-1] == '\n')
65978 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
65979 index ea9ee45..67ebc8f 100644
65980 --- a/kernel/lockdep.c
65981 +++ b/kernel/lockdep.c
65982 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
65983 end = (unsigned long) &_end,
65984 addr = (unsigned long) obj;
65985
65986 +#ifdef CONFIG_PAX_KERNEXEC
65987 + start = ktla_ktva(start);
65988 +#endif
65989 +
65990 /*
65991 * static variable?
65992 */
65993 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
65994 if (!static_obj(lock->key)) {
65995 debug_locks_off();
65996 printk("INFO: trying to register non-static key.\n");
65997 + printk("lock:%pS key:%pS.\n", lock, lock->key);
65998 printk("the code is fine but needs lockdep annotation.\n");
65999 printk("turning off the locking correctness validator.\n");
66000 dump_stack();
66001 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66002 if (!class)
66003 return 0;
66004 }
66005 - atomic_inc((atomic_t *)&class->ops);
66006 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66007 if (very_verbose(class)) {
66008 printk("\nacquire class [%p] %s", class->key, class->name);
66009 if (class->name_version > 1)
66010 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66011 index 91c32a0..b2c71c5 100644
66012 --- a/kernel/lockdep_proc.c
66013 +++ b/kernel/lockdep_proc.c
66014 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66015
66016 static void print_name(struct seq_file *m, struct lock_class *class)
66017 {
66018 - char str[128];
66019 + char str[KSYM_NAME_LEN];
66020 const char *name = class->name;
66021
66022 if (!name) {
66023 diff --git a/kernel/module.c b/kernel/module.c
66024 index 78ac6ec..e87db0e 100644
66025 --- a/kernel/module.c
66026 +++ b/kernel/module.c
66027 @@ -58,6 +58,7 @@
66028 #include <linux/jump_label.h>
66029 #include <linux/pfn.h>
66030 #include <linux/bsearch.h>
66031 +#include <linux/grsecurity.h>
66032
66033 #define CREATE_TRACE_POINTS
66034 #include <trace/events/module.h>
66035 @@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66036
66037 /* Bounds of module allocation, for speeding __module_address.
66038 * Protected by module_mutex. */
66039 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66040 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66041 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66042
66043 int register_module_notifier(struct notifier_block * nb)
66044 {
66045 @@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66046 return true;
66047
66048 list_for_each_entry_rcu(mod, &modules, list) {
66049 - struct symsearch arr[] = {
66050 + struct symsearch modarr[] = {
66051 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66052 NOT_GPL_ONLY, false },
66053 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66054 @@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66055 #endif
66056 };
66057
66058 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66059 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66060 return true;
66061 }
66062 return false;
66063 @@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66064 static int percpu_modalloc(struct module *mod,
66065 unsigned long size, unsigned long align)
66066 {
66067 - if (align > PAGE_SIZE) {
66068 + if (align-1 >= PAGE_SIZE) {
66069 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66070 mod->name, align, PAGE_SIZE);
66071 align = PAGE_SIZE;
66072 @@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
66073 static ssize_t show_coresize(struct module_attribute *mattr,
66074 struct module_kobject *mk, char *buffer)
66075 {
66076 - return sprintf(buffer, "%u\n", mk->mod->core_size);
66077 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66078 }
66079
66080 static struct module_attribute modinfo_coresize =
66081 @@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
66082 static ssize_t show_initsize(struct module_attribute *mattr,
66083 struct module_kobject *mk, char *buffer)
66084 {
66085 - return sprintf(buffer, "%u\n", mk->mod->init_size);
66086 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66087 }
66088
66089 static struct module_attribute modinfo_initsize =
66090 @@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
66091 */
66092 #ifdef CONFIG_SYSFS
66093
66094 -#ifdef CONFIG_KALLSYMS
66095 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66096 static inline bool sect_empty(const Elf_Shdr *sect)
66097 {
66098 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66099 @@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
66100
66101 static void unset_module_core_ro_nx(struct module *mod)
66102 {
66103 - set_page_attributes(mod->module_core + mod->core_text_size,
66104 - mod->module_core + mod->core_size,
66105 + set_page_attributes(mod->module_core_rw,
66106 + mod->module_core_rw + mod->core_size_rw,
66107 set_memory_x);
66108 - set_page_attributes(mod->module_core,
66109 - mod->module_core + mod->core_ro_size,
66110 + set_page_attributes(mod->module_core_rx,
66111 + mod->module_core_rx + mod->core_size_rx,
66112 set_memory_rw);
66113 }
66114
66115 static void unset_module_init_ro_nx(struct module *mod)
66116 {
66117 - set_page_attributes(mod->module_init + mod->init_text_size,
66118 - mod->module_init + mod->init_size,
66119 + set_page_attributes(mod->module_init_rw,
66120 + mod->module_init_rw + mod->init_size_rw,
66121 set_memory_x);
66122 - set_page_attributes(mod->module_init,
66123 - mod->module_init + mod->init_ro_size,
66124 + set_page_attributes(mod->module_init_rx,
66125 + mod->module_init_rx + mod->init_size_rx,
66126 set_memory_rw);
66127 }
66128
66129 @@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
66130
66131 mutex_lock(&module_mutex);
66132 list_for_each_entry_rcu(mod, &modules, list) {
66133 - if ((mod->module_core) && (mod->core_text_size)) {
66134 - set_page_attributes(mod->module_core,
66135 - mod->module_core + mod->core_text_size,
66136 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66137 + set_page_attributes(mod->module_core_rx,
66138 + mod->module_core_rx + mod->core_size_rx,
66139 set_memory_rw);
66140 }
66141 - if ((mod->module_init) && (mod->init_text_size)) {
66142 - set_page_attributes(mod->module_init,
66143 - mod->module_init + mod->init_text_size,
66144 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66145 + set_page_attributes(mod->module_init_rx,
66146 + mod->module_init_rx + mod->init_size_rx,
66147 set_memory_rw);
66148 }
66149 }
66150 @@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
66151
66152 mutex_lock(&module_mutex);
66153 list_for_each_entry_rcu(mod, &modules, list) {
66154 - if ((mod->module_core) && (mod->core_text_size)) {
66155 - set_page_attributes(mod->module_core,
66156 - mod->module_core + mod->core_text_size,
66157 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66158 + set_page_attributes(mod->module_core_rx,
66159 + mod->module_core_rx + mod->core_size_rx,
66160 set_memory_ro);
66161 }
66162 - if ((mod->module_init) && (mod->init_text_size)) {
66163 - set_page_attributes(mod->module_init,
66164 - mod->module_init + mod->init_text_size,
66165 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66166 + set_page_attributes(mod->module_init_rx,
66167 + mod->module_init_rx + mod->init_size_rx,
66168 set_memory_ro);
66169 }
66170 }
66171 @@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
66172
66173 /* This may be NULL, but that's OK */
66174 unset_module_init_ro_nx(mod);
66175 - module_free(mod, mod->module_init);
66176 + module_free(mod, mod->module_init_rw);
66177 + module_free_exec(mod, mod->module_init_rx);
66178 kfree(mod->args);
66179 percpu_modfree(mod);
66180
66181 /* Free lock-classes: */
66182 - lockdep_free_key_range(mod->module_core, mod->core_size);
66183 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66184 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66185
66186 /* Finally, free the core (containing the module structure) */
66187 unset_module_core_ro_nx(mod);
66188 - module_free(mod, mod->module_core);
66189 + module_free_exec(mod, mod->module_core_rx);
66190 + module_free(mod, mod->module_core_rw);
66191
66192 #ifdef CONFIG_MPU
66193 update_protections(current->mm);
66194 @@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66195 int ret = 0;
66196 const struct kernel_symbol *ksym;
66197
66198 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66199 + int is_fs_load = 0;
66200 + int register_filesystem_found = 0;
66201 + char *p;
66202 +
66203 + p = strstr(mod->args, "grsec_modharden_fs");
66204 + if (p) {
66205 + char *endptr = p + strlen("grsec_modharden_fs");
66206 + /* copy \0 as well */
66207 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66208 + is_fs_load = 1;
66209 + }
66210 +#endif
66211 +
66212 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66213 const char *name = info->strtab + sym[i].st_name;
66214
66215 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66216 + /* it's a real shame this will never get ripped and copied
66217 + upstream! ;(
66218 + */
66219 + if (is_fs_load && !strcmp(name, "register_filesystem"))
66220 + register_filesystem_found = 1;
66221 +#endif
66222 +
66223 switch (sym[i].st_shndx) {
66224 case SHN_COMMON:
66225 /* We compiled with -fno-common. These are not
66226 @@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66227 ksym = resolve_symbol_wait(mod, info, name);
66228 /* Ok if resolved. */
66229 if (ksym && !IS_ERR(ksym)) {
66230 + pax_open_kernel();
66231 sym[i].st_value = ksym->value;
66232 + pax_close_kernel();
66233 break;
66234 }
66235
66236 @@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66237 secbase = (unsigned long)mod_percpu(mod);
66238 else
66239 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66240 + pax_open_kernel();
66241 sym[i].st_value += secbase;
66242 + pax_close_kernel();
66243 break;
66244 }
66245 }
66246
66247 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66248 + if (is_fs_load && !register_filesystem_found) {
66249 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66250 + ret = -EPERM;
66251 + }
66252 +#endif
66253 +
66254 return ret;
66255 }
66256
66257 @@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66258 || s->sh_entsize != ~0UL
66259 || strstarts(sname, ".init"))
66260 continue;
66261 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66262 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66263 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66264 + else
66265 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66266 pr_debug("\t%s\n", sname);
66267 }
66268 - switch (m) {
66269 - case 0: /* executable */
66270 - mod->core_size = debug_align(mod->core_size);
66271 - mod->core_text_size = mod->core_size;
66272 - break;
66273 - case 1: /* RO: text and ro-data */
66274 - mod->core_size = debug_align(mod->core_size);
66275 - mod->core_ro_size = mod->core_size;
66276 - break;
66277 - case 3: /* whole core */
66278 - mod->core_size = debug_align(mod->core_size);
66279 - break;
66280 - }
66281 }
66282
66283 pr_debug("Init section allocation order:\n");
66284 @@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66285 || s->sh_entsize != ~0UL
66286 || !strstarts(sname, ".init"))
66287 continue;
66288 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66289 - | INIT_OFFSET_MASK);
66290 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66291 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66292 + else
66293 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66294 + s->sh_entsize |= INIT_OFFSET_MASK;
66295 pr_debug("\t%s\n", sname);
66296 }
66297 - switch (m) {
66298 - case 0: /* executable */
66299 - mod->init_size = debug_align(mod->init_size);
66300 - mod->init_text_size = mod->init_size;
66301 - break;
66302 - case 1: /* RO: text and ro-data */
66303 - mod->init_size = debug_align(mod->init_size);
66304 - mod->init_ro_size = mod->init_size;
66305 - break;
66306 - case 3: /* whole init */
66307 - mod->init_size = debug_align(mod->init_size);
66308 - break;
66309 - }
66310 }
66311 }
66312
66313 @@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66314
66315 /* Put symbol section at end of init part of module. */
66316 symsect->sh_flags |= SHF_ALLOC;
66317 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66318 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66319 info->index.sym) | INIT_OFFSET_MASK;
66320 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
66321
66322 @@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66323 }
66324
66325 /* Append room for core symbols at end of core part. */
66326 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66327 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66328 - mod->core_size += strtab_size;
66329 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66330 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66331 + mod->core_size_rx += strtab_size;
66332
66333 /* Put string table section at end of init part of module. */
66334 strsect->sh_flags |= SHF_ALLOC;
66335 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66336 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66337 info->index.str) | INIT_OFFSET_MASK;
66338 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
66339 }
66340 @@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66341 /* Make sure we get permanent strtab: don't use info->strtab. */
66342 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66343
66344 + pax_open_kernel();
66345 +
66346 /* Set types up while we still have access to sections. */
66347 for (i = 0; i < mod->num_symtab; i++)
66348 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66349
66350 - mod->core_symtab = dst = mod->module_core + info->symoffs;
66351 - mod->core_strtab = s = mod->module_core + info->stroffs;
66352 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66353 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66354 src = mod->symtab;
66355 *dst = *src;
66356 *s++ = 0;
66357 @@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66358 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
66359 }
66360 mod->core_num_syms = ndst;
66361 +
66362 + pax_close_kernel();
66363 }
66364 #else
66365 static inline void layout_symtab(struct module *mod, struct load_info *info)
66366 @@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
66367 return size == 0 ? NULL : vmalloc_exec(size);
66368 }
66369
66370 -static void *module_alloc_update_bounds(unsigned long size)
66371 +static void *module_alloc_update_bounds_rw(unsigned long size)
66372 {
66373 void *ret = module_alloc(size);
66374
66375 if (ret) {
66376 mutex_lock(&module_mutex);
66377 /* Update module bounds. */
66378 - if ((unsigned long)ret < module_addr_min)
66379 - module_addr_min = (unsigned long)ret;
66380 - if ((unsigned long)ret + size > module_addr_max)
66381 - module_addr_max = (unsigned long)ret + size;
66382 + if ((unsigned long)ret < module_addr_min_rw)
66383 + module_addr_min_rw = (unsigned long)ret;
66384 + if ((unsigned long)ret + size > module_addr_max_rw)
66385 + module_addr_max_rw = (unsigned long)ret + size;
66386 + mutex_unlock(&module_mutex);
66387 + }
66388 + return ret;
66389 +}
66390 +
66391 +static void *module_alloc_update_bounds_rx(unsigned long size)
66392 +{
66393 + void *ret = module_alloc_exec(size);
66394 +
66395 + if (ret) {
66396 + mutex_lock(&module_mutex);
66397 + /* Update module bounds. */
66398 + if ((unsigned long)ret < module_addr_min_rx)
66399 + module_addr_min_rx = (unsigned long)ret;
66400 + if ((unsigned long)ret + size > module_addr_max_rx)
66401 + module_addr_max_rx = (unsigned long)ret + size;
66402 mutex_unlock(&module_mutex);
66403 }
66404 return ret;
66405 @@ -2543,8 +2581,14 @@ static struct module *setup_load_info(struct load_info *info)
66406 static int check_modinfo(struct module *mod, struct load_info *info)
66407 {
66408 const char *modmagic = get_modinfo(info, "vermagic");
66409 + const char *license = get_modinfo(info, "license");
66410 int err;
66411
66412 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66413 + if (!license || !license_is_gpl_compatible(license))
66414 + return -ENOEXEC;
66415 +#endif
66416 +
66417 /* This is allowed: modprobe --force will invalidate it. */
66418 if (!modmagic) {
66419 err = try_to_force_load(mod, "bad vermagic");
66420 @@ -2567,7 +2611,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66421 }
66422
66423 /* Set up license info based on the info section */
66424 - set_license(mod, get_modinfo(info, "license"));
66425 + set_license(mod, license);
66426
66427 return 0;
66428 }
66429 @@ -2661,7 +2705,7 @@ static int move_module(struct module *mod, struct load_info *info)
66430 void *ptr;
66431
66432 /* Do the allocs. */
66433 - ptr = module_alloc_update_bounds(mod->core_size);
66434 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66435 /*
66436 * The pointer to this block is stored in the module structure
66437 * which is inside the block. Just mark it as not being a
66438 @@ -2671,23 +2715,50 @@ static int move_module(struct module *mod, struct load_info *info)
66439 if (!ptr)
66440 return -ENOMEM;
66441
66442 - memset(ptr, 0, mod->core_size);
66443 - mod->module_core = ptr;
66444 + memset(ptr, 0, mod->core_size_rw);
66445 + mod->module_core_rw = ptr;
66446
66447 - ptr = module_alloc_update_bounds(mod->init_size);
66448 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66449 /*
66450 * The pointer to this block is stored in the module structure
66451 * which is inside the block. This block doesn't need to be
66452 * scanned as it contains data and code that will be freed
66453 * after the module is initialized.
66454 */
66455 - kmemleak_ignore(ptr);
66456 - if (!ptr && mod->init_size) {
66457 - module_free(mod, mod->module_core);
66458 + kmemleak_not_leak(ptr);
66459 + if (!ptr && mod->init_size_rw) {
66460 + module_free(mod, mod->module_core_rw);
66461 return -ENOMEM;
66462 }
66463 - memset(ptr, 0, mod->init_size);
66464 - mod->module_init = ptr;
66465 + memset(ptr, 0, mod->init_size_rw);
66466 + mod->module_init_rw = ptr;
66467 +
66468 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66469 + kmemleak_not_leak(ptr);
66470 + if (!ptr) {
66471 + module_free(mod, mod->module_init_rw);
66472 + module_free(mod, mod->module_core_rw);
66473 + return -ENOMEM;
66474 + }
66475 +
66476 + pax_open_kernel();
66477 + memset(ptr, 0, mod->core_size_rx);
66478 + pax_close_kernel();
66479 + mod->module_core_rx = ptr;
66480 +
66481 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66482 + kmemleak_not_leak(ptr);
66483 + if (!ptr && mod->init_size_rx) {
66484 + module_free_exec(mod, mod->module_core_rx);
66485 + module_free(mod, mod->module_init_rw);
66486 + module_free(mod, mod->module_core_rw);
66487 + return -ENOMEM;
66488 + }
66489 +
66490 + pax_open_kernel();
66491 + memset(ptr, 0, mod->init_size_rx);
66492 + pax_close_kernel();
66493 + mod->module_init_rx = ptr;
66494
66495 /* Transfer each section which specifies SHF_ALLOC */
66496 pr_debug("final section addresses:\n");
66497 @@ -2698,16 +2769,45 @@ static int move_module(struct module *mod, struct load_info *info)
66498 if (!(shdr->sh_flags & SHF_ALLOC))
66499 continue;
66500
66501 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
66502 - dest = mod->module_init
66503 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66504 - else
66505 - dest = mod->module_core + shdr->sh_entsize;
66506 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66507 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66508 + dest = mod->module_init_rw
66509 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66510 + else
66511 + dest = mod->module_init_rx
66512 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66513 + } else {
66514 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66515 + dest = mod->module_core_rw + shdr->sh_entsize;
66516 + else
66517 + dest = mod->module_core_rx + shdr->sh_entsize;
66518 + }
66519 +
66520 + if (shdr->sh_type != SHT_NOBITS) {
66521 +
66522 +#ifdef CONFIG_PAX_KERNEXEC
66523 +#ifdef CONFIG_X86_64
66524 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66525 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66526 +#endif
66527 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66528 + pax_open_kernel();
66529 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66530 + pax_close_kernel();
66531 + } else
66532 +#endif
66533
66534 - if (shdr->sh_type != SHT_NOBITS)
66535 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66536 + }
66537 /* Update sh_addr to point to copy in image. */
66538 - shdr->sh_addr = (unsigned long)dest;
66539 +
66540 +#ifdef CONFIG_PAX_KERNEXEC
66541 + if (shdr->sh_flags & SHF_EXECINSTR)
66542 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
66543 + else
66544 +#endif
66545 +
66546 + shdr->sh_addr = (unsigned long)dest;
66547 pr_debug("\t0x%lx %s\n",
66548 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
66549 }
66550 @@ -2758,12 +2858,12 @@ static void flush_module_icache(const struct module *mod)
66551 * Do it before processing of module parameters, so the module
66552 * can provide parameter accessor functions of its own.
66553 */
66554 - if (mod->module_init)
66555 - flush_icache_range((unsigned long)mod->module_init,
66556 - (unsigned long)mod->module_init
66557 - + mod->init_size);
66558 - flush_icache_range((unsigned long)mod->module_core,
66559 - (unsigned long)mod->module_core + mod->core_size);
66560 + if (mod->module_init_rx)
66561 + flush_icache_range((unsigned long)mod->module_init_rx,
66562 + (unsigned long)mod->module_init_rx
66563 + + mod->init_size_rx);
66564 + flush_icache_range((unsigned long)mod->module_core_rx,
66565 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
66566
66567 set_fs(old_fs);
66568 }
66569 @@ -2833,8 +2933,10 @@ out:
66570 static void module_deallocate(struct module *mod, struct load_info *info)
66571 {
66572 percpu_modfree(mod);
66573 - module_free(mod, mod->module_init);
66574 - module_free(mod, mod->module_core);
66575 + module_free_exec(mod, mod->module_init_rx);
66576 + module_free_exec(mod, mod->module_core_rx);
66577 + module_free(mod, mod->module_init_rw);
66578 + module_free(mod, mod->module_core_rw);
66579 }
66580
66581 int __weak module_finalize(const Elf_Ehdr *hdr,
66582 @@ -2898,9 +3000,38 @@ static struct module *load_module(void __user *umod,
66583 if (err)
66584 goto free_unload;
66585
66586 + /* Now copy in args */
66587 + mod->args = strndup_user(uargs, ~0UL >> 1);
66588 + if (IS_ERR(mod->args)) {
66589 + err = PTR_ERR(mod->args);
66590 + goto free_unload;
66591 + }
66592 +
66593 /* Set up MODINFO_ATTR fields */
66594 setup_modinfo(mod, &info);
66595
66596 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66597 + {
66598 + char *p, *p2;
66599 +
66600 + if (strstr(mod->args, "grsec_modharden_netdev")) {
66601 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66602 + err = -EPERM;
66603 + goto free_modinfo;
66604 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66605 + p += strlen("grsec_modharden_normal");
66606 + p2 = strstr(p, "_");
66607 + if (p2) {
66608 + *p2 = '\0';
66609 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66610 + *p2 = '_';
66611 + }
66612 + err = -EPERM;
66613 + goto free_modinfo;
66614 + }
66615 + }
66616 +#endif
66617 +
66618 /* Fix up syms, so that st_value is a pointer to location. */
66619 err = simplify_symbols(mod, &info);
66620 if (err < 0)
66621 @@ -2916,13 +3047,6 @@ static struct module *load_module(void __user *umod,
66622
66623 flush_module_icache(mod);
66624
66625 - /* Now copy in args */
66626 - mod->args = strndup_user(uargs, ~0UL >> 1);
66627 - if (IS_ERR(mod->args)) {
66628 - err = PTR_ERR(mod->args);
66629 - goto free_arch_cleanup;
66630 - }
66631 -
66632 /* Mark state as coming so strong_try_module_get() ignores us. */
66633 mod->state = MODULE_STATE_COMING;
66634
66635 @@ -2980,11 +3104,10 @@ static struct module *load_module(void __user *umod,
66636 unlock:
66637 mutex_unlock(&module_mutex);
66638 synchronize_sched();
66639 - kfree(mod->args);
66640 - free_arch_cleanup:
66641 module_arch_cleanup(mod);
66642 free_modinfo:
66643 free_modinfo(mod);
66644 + kfree(mod->args);
66645 free_unload:
66646 module_unload_free(mod);
66647 free_module:
66648 @@ -3025,16 +3148,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66649 MODULE_STATE_COMING, mod);
66650
66651 /* Set RO and NX regions for core */
66652 - set_section_ro_nx(mod->module_core,
66653 - mod->core_text_size,
66654 - mod->core_ro_size,
66655 - mod->core_size);
66656 + set_section_ro_nx(mod->module_core_rx,
66657 + mod->core_size_rx,
66658 + mod->core_size_rx,
66659 + mod->core_size_rx);
66660
66661 /* Set RO and NX regions for init */
66662 - set_section_ro_nx(mod->module_init,
66663 - mod->init_text_size,
66664 - mod->init_ro_size,
66665 - mod->init_size);
66666 + set_section_ro_nx(mod->module_init_rx,
66667 + mod->init_size_rx,
66668 + mod->init_size_rx,
66669 + mod->init_size_rx);
66670
66671 do_mod_ctors(mod);
66672 /* Start the module */
66673 @@ -3080,11 +3203,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66674 mod->strtab = mod->core_strtab;
66675 #endif
66676 unset_module_init_ro_nx(mod);
66677 - module_free(mod, mod->module_init);
66678 - mod->module_init = NULL;
66679 - mod->init_size = 0;
66680 - mod->init_ro_size = 0;
66681 - mod->init_text_size = 0;
66682 + module_free(mod, mod->module_init_rw);
66683 + module_free_exec(mod, mod->module_init_rx);
66684 + mod->module_init_rw = NULL;
66685 + mod->module_init_rx = NULL;
66686 + mod->init_size_rw = 0;
66687 + mod->init_size_rx = 0;
66688 mutex_unlock(&module_mutex);
66689
66690 return 0;
66691 @@ -3115,10 +3239,16 @@ static const char *get_ksymbol(struct module *mod,
66692 unsigned long nextval;
66693
66694 /* At worse, next value is at end of module */
66695 - if (within_module_init(addr, mod))
66696 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
66697 + if (within_module_init_rx(addr, mod))
66698 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66699 + else if (within_module_init_rw(addr, mod))
66700 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66701 + else if (within_module_core_rx(addr, mod))
66702 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66703 + else if (within_module_core_rw(addr, mod))
66704 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66705 else
66706 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
66707 + return NULL;
66708
66709 /* Scan for closest preceding symbol, and next symbol. (ELF
66710 starts real symbols at 1). */
66711 @@ -3353,7 +3483,7 @@ static int m_show(struct seq_file *m, void *p)
66712 char buf[8];
66713
66714 seq_printf(m, "%s %u",
66715 - mod->name, mod->init_size + mod->core_size);
66716 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66717 print_unload_info(m, mod);
66718
66719 /* Informative for users. */
66720 @@ -3362,7 +3492,7 @@ static int m_show(struct seq_file *m, void *p)
66721 mod->state == MODULE_STATE_COMING ? "Loading":
66722 "Live");
66723 /* Used by oprofile and other similar tools. */
66724 - seq_printf(m, " 0x%pK", mod->module_core);
66725 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66726
66727 /* Taints info */
66728 if (mod->taints)
66729 @@ -3398,7 +3528,17 @@ static const struct file_operations proc_modules_operations = {
66730
66731 static int __init proc_modules_init(void)
66732 {
66733 +#ifndef CONFIG_GRKERNSEC_HIDESYM
66734 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66735 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66736 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66737 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66738 +#else
66739 proc_create("modules", 0, NULL, &proc_modules_operations);
66740 +#endif
66741 +#else
66742 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66743 +#endif
66744 return 0;
66745 }
66746 module_init(proc_modules_init);
66747 @@ -3457,12 +3597,12 @@ struct module *__module_address(unsigned long addr)
66748 {
66749 struct module *mod;
66750
66751 - if (addr < module_addr_min || addr > module_addr_max)
66752 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66753 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
66754 return NULL;
66755
66756 list_for_each_entry_rcu(mod, &modules, list)
66757 - if (within_module_core(addr, mod)
66758 - || within_module_init(addr, mod))
66759 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
66760 return mod;
66761 return NULL;
66762 }
66763 @@ -3496,11 +3636,20 @@ bool is_module_text_address(unsigned long addr)
66764 */
66765 struct module *__module_text_address(unsigned long addr)
66766 {
66767 - struct module *mod = __module_address(addr);
66768 + struct module *mod;
66769 +
66770 +#ifdef CONFIG_X86_32
66771 + addr = ktla_ktva(addr);
66772 +#endif
66773 +
66774 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66775 + return NULL;
66776 +
66777 + mod = __module_address(addr);
66778 +
66779 if (mod) {
66780 /* Make sure it's within the text section. */
66781 - if (!within(addr, mod->module_init, mod->init_text_size)
66782 - && !within(addr, mod->module_core, mod->core_text_size))
66783 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66784 mod = NULL;
66785 }
66786 return mod;
66787 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66788 index 7e3443f..b2a1e6b 100644
66789 --- a/kernel/mutex-debug.c
66790 +++ b/kernel/mutex-debug.c
66791 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66792 }
66793
66794 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66795 - struct thread_info *ti)
66796 + struct task_struct *task)
66797 {
66798 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66799
66800 /* Mark the current thread as blocked on the lock: */
66801 - ti->task->blocked_on = waiter;
66802 + task->blocked_on = waiter;
66803 }
66804
66805 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66806 - struct thread_info *ti)
66807 + struct task_struct *task)
66808 {
66809 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66810 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66811 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66812 - ti->task->blocked_on = NULL;
66813 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
66814 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66815 + task->blocked_on = NULL;
66816
66817 list_del_init(&waiter->list);
66818 waiter->task = NULL;
66819 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66820 index 0799fd3..d06ae3b 100644
66821 --- a/kernel/mutex-debug.h
66822 +++ b/kernel/mutex-debug.h
66823 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66824 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66825 extern void debug_mutex_add_waiter(struct mutex *lock,
66826 struct mutex_waiter *waiter,
66827 - struct thread_info *ti);
66828 + struct task_struct *task);
66829 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66830 - struct thread_info *ti);
66831 + struct task_struct *task);
66832 extern void debug_mutex_unlock(struct mutex *lock);
66833 extern void debug_mutex_init(struct mutex *lock, const char *name,
66834 struct lock_class_key *key);
66835 diff --git a/kernel/mutex.c b/kernel/mutex.c
66836 index a307cc9..27fd2e9 100644
66837 --- a/kernel/mutex.c
66838 +++ b/kernel/mutex.c
66839 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66840 spin_lock_mutex(&lock->wait_lock, flags);
66841
66842 debug_mutex_lock_common(lock, &waiter);
66843 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66844 + debug_mutex_add_waiter(lock, &waiter, task);
66845
66846 /* add waiting tasks to the end of the waitqueue (FIFO): */
66847 list_add_tail(&waiter.list, &lock->wait_list);
66848 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66849 * TASK_UNINTERRUPTIBLE case.)
66850 */
66851 if (unlikely(signal_pending_state(state, task))) {
66852 - mutex_remove_waiter(lock, &waiter,
66853 - task_thread_info(task));
66854 + mutex_remove_waiter(lock, &waiter, task);
66855 mutex_release(&lock->dep_map, 1, ip);
66856 spin_unlock_mutex(&lock->wait_lock, flags);
66857
66858 @@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66859 done:
66860 lock_acquired(&lock->dep_map, ip);
66861 /* got the lock - rejoice! */
66862 - mutex_remove_waiter(lock, &waiter, current_thread_info());
66863 + mutex_remove_waiter(lock, &waiter, task);
66864 mutex_set_owner(lock);
66865
66866 /* set it to 0 if there are no waiters left: */
66867 diff --git a/kernel/panic.c b/kernel/panic.c
66868 index 8ed89a1..e83856a 100644
66869 --- a/kernel/panic.c
66870 +++ b/kernel/panic.c
66871 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66872 const char *board;
66873
66874 printk(KERN_WARNING "------------[ cut here ]------------\n");
66875 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66876 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66877 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66878 if (board)
66879 printk(KERN_WARNING "Hardware name: %s\n", board);
66880 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
66881 */
66882 void __stack_chk_fail(void)
66883 {
66884 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
66885 + dump_stack();
66886 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
66887 __builtin_return_address(0));
66888 }
66889 EXPORT_SYMBOL(__stack_chk_fail);
66890 diff --git a/kernel/pid.c b/kernel/pid.c
66891 index 9f08dfa..6765c40 100644
66892 --- a/kernel/pid.c
66893 +++ b/kernel/pid.c
66894 @@ -33,6 +33,7 @@
66895 #include <linux/rculist.h>
66896 #include <linux/bootmem.h>
66897 #include <linux/hash.h>
66898 +#include <linux/security.h>
66899 #include <linux/pid_namespace.h>
66900 #include <linux/init_task.h>
66901 #include <linux/syscalls.h>
66902 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
66903
66904 int pid_max = PID_MAX_DEFAULT;
66905
66906 -#define RESERVED_PIDS 300
66907 +#define RESERVED_PIDS 500
66908
66909 int pid_max_min = RESERVED_PIDS + 1;
66910 int pid_max_max = PID_MAX_LIMIT;
66911 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
66912 */
66913 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
66914 {
66915 + struct task_struct *task;
66916 +
66917 rcu_lockdep_assert(rcu_read_lock_held(),
66918 "find_task_by_pid_ns() needs rcu_read_lock()"
66919 " protection");
66920 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66921 +
66922 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66923 +
66924 + if (gr_pid_is_chrooted(task))
66925 + return NULL;
66926 +
66927 + return task;
66928 }
66929
66930 struct task_struct *find_task_by_vpid(pid_t vnr)
66931 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
66932 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
66933 }
66934
66935 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
66936 +{
66937 + rcu_lockdep_assert(rcu_read_lock_held(),
66938 + "find_task_by_pid_ns() needs rcu_read_lock()"
66939 + " protection");
66940 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
66941 +}
66942 +
66943 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
66944 {
66945 struct pid *pid;
66946 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
66947 index 125cb67..a4d1c30 100644
66948 --- a/kernel/posix-cpu-timers.c
66949 +++ b/kernel/posix-cpu-timers.c
66950 @@ -6,6 +6,7 @@
66951 #include <linux/posix-timers.h>
66952 #include <linux/errno.h>
66953 #include <linux/math64.h>
66954 +#include <linux/security.h>
66955 #include <asm/uaccess.h>
66956 #include <linux/kernel_stat.h>
66957 #include <trace/events/timer.h>
66958 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
66959
66960 static __init int init_posix_cpu_timers(void)
66961 {
66962 - struct k_clock process = {
66963 + static struct k_clock process = {
66964 .clock_getres = process_cpu_clock_getres,
66965 .clock_get = process_cpu_clock_get,
66966 .timer_create = process_cpu_timer_create,
66967 .nsleep = process_cpu_nsleep,
66968 .nsleep_restart = process_cpu_nsleep_restart,
66969 };
66970 - struct k_clock thread = {
66971 + static struct k_clock thread = {
66972 .clock_getres = thread_cpu_clock_getres,
66973 .clock_get = thread_cpu_clock_get,
66974 .timer_create = thread_cpu_timer_create,
66975 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
66976 index 69185ae..cc2847a 100644
66977 --- a/kernel/posix-timers.c
66978 +++ b/kernel/posix-timers.c
66979 @@ -43,6 +43,7 @@
66980 #include <linux/idr.h>
66981 #include <linux/posix-clock.h>
66982 #include <linux/posix-timers.h>
66983 +#include <linux/grsecurity.h>
66984 #include <linux/syscalls.h>
66985 #include <linux/wait.h>
66986 #include <linux/workqueue.h>
66987 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
66988 * which we beg off on and pass to do_sys_settimeofday().
66989 */
66990
66991 -static struct k_clock posix_clocks[MAX_CLOCKS];
66992 +static struct k_clock *posix_clocks[MAX_CLOCKS];
66993
66994 /*
66995 * These ones are defined below.
66996 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66997 */
66998 static __init int init_posix_timers(void)
66999 {
67000 - struct k_clock clock_realtime = {
67001 + static struct k_clock clock_realtime = {
67002 .clock_getres = hrtimer_get_res,
67003 .clock_get = posix_clock_realtime_get,
67004 .clock_set = posix_clock_realtime_set,
67005 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67006 .timer_get = common_timer_get,
67007 .timer_del = common_timer_del,
67008 };
67009 - struct k_clock clock_monotonic = {
67010 + static struct k_clock clock_monotonic = {
67011 .clock_getres = hrtimer_get_res,
67012 .clock_get = posix_ktime_get_ts,
67013 .nsleep = common_nsleep,
67014 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67015 .timer_get = common_timer_get,
67016 .timer_del = common_timer_del,
67017 };
67018 - struct k_clock clock_monotonic_raw = {
67019 + static struct k_clock clock_monotonic_raw = {
67020 .clock_getres = hrtimer_get_res,
67021 .clock_get = posix_get_monotonic_raw,
67022 };
67023 - struct k_clock clock_realtime_coarse = {
67024 + static struct k_clock clock_realtime_coarse = {
67025 .clock_getres = posix_get_coarse_res,
67026 .clock_get = posix_get_realtime_coarse,
67027 };
67028 - struct k_clock clock_monotonic_coarse = {
67029 + static struct k_clock clock_monotonic_coarse = {
67030 .clock_getres = posix_get_coarse_res,
67031 .clock_get = posix_get_monotonic_coarse,
67032 };
67033 - struct k_clock clock_boottime = {
67034 + static struct k_clock clock_boottime = {
67035 .clock_getres = hrtimer_get_res,
67036 .clock_get = posix_get_boottime,
67037 .nsleep = common_nsleep,
67038 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67039 return;
67040 }
67041
67042 - posix_clocks[clock_id] = *new_clock;
67043 + posix_clocks[clock_id] = new_clock;
67044 }
67045 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67046
67047 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67048 return (id & CLOCKFD_MASK) == CLOCKFD ?
67049 &clock_posix_dynamic : &clock_posix_cpu;
67050
67051 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67052 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67053 return NULL;
67054 - return &posix_clocks[id];
67055 + return posix_clocks[id];
67056 }
67057
67058 static int common_timer_create(struct k_itimer *new_timer)
67059 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67060 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67061 return -EFAULT;
67062
67063 + /* only the CLOCK_REALTIME clock can be set, all other clocks
67064 + have their clock_set fptr set to a nosettime dummy function
67065 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67066 + call common_clock_set, which calls do_sys_settimeofday, which
67067 + we hook
67068 + */
67069 +
67070 return kc->clock_set(which_clock, &new_tp);
67071 }
67072
67073 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67074 index d523593..68197a4 100644
67075 --- a/kernel/power/poweroff.c
67076 +++ b/kernel/power/poweroff.c
67077 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67078 .enable_mask = SYSRQ_ENABLE_BOOT,
67079 };
67080
67081 -static int pm_sysrq_init(void)
67082 +static int __init pm_sysrq_init(void)
67083 {
67084 register_sysrq_key('o', &sysrq_poweroff_op);
67085 return 0;
67086 diff --git a/kernel/power/process.c b/kernel/power/process.c
67087 index 19db29f..33b52b6 100644
67088 --- a/kernel/power/process.c
67089 +++ b/kernel/power/process.c
67090 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
67091 u64 elapsed_csecs64;
67092 unsigned int elapsed_csecs;
67093 bool wakeup = false;
67094 + bool timedout = false;
67095
67096 do_gettimeofday(&start);
67097
67098 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67099
67100 while (true) {
67101 todo = 0;
67102 + if (time_after(jiffies, end_time))
67103 + timedout = true;
67104 read_lock(&tasklist_lock);
67105 do_each_thread(g, p) {
67106 if (p == current || !freeze_task(p))
67107 @@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
67108 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
67109 * transition can't race with task state testing here.
67110 */
67111 - if (!task_is_stopped_or_traced(p) &&
67112 - !freezer_should_skip(p))
67113 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67114 todo++;
67115 + if (timedout) {
67116 + printk(KERN_ERR "Task refusing to freeze:\n");
67117 + sched_show_task(p);
67118 + }
67119 + }
67120 } while_each_thread(g, p);
67121 read_unlock(&tasklist_lock);
67122
67123 @@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
67124 todo += wq_busy;
67125 }
67126
67127 - if (!todo || time_after(jiffies, end_time))
67128 + if (!todo || timedout)
67129 break;
67130
67131 if (pm_wakeup_pending()) {
67132 diff --git a/kernel/printk.c b/kernel/printk.c
67133 index b663c2c..1d6ba7a 100644
67134 --- a/kernel/printk.c
67135 +++ b/kernel/printk.c
67136 @@ -316,6 +316,11 @@ static int check_syslog_permissions(int type, bool from_file)
67137 if (from_file && type != SYSLOG_ACTION_OPEN)
67138 return 0;
67139
67140 +#ifdef CONFIG_GRKERNSEC_DMESG
67141 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67142 + return -EPERM;
67143 +#endif
67144 +
67145 if (syslog_action_restricted(type)) {
67146 if (capable(CAP_SYSLOG))
67147 return 0;
67148 diff --git a/kernel/profile.c b/kernel/profile.c
67149 index 76b8e77..a2930e8 100644
67150 --- a/kernel/profile.c
67151 +++ b/kernel/profile.c
67152 @@ -39,7 +39,7 @@ struct profile_hit {
67153 /* Oprofile timer tick hook */
67154 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67155
67156 -static atomic_t *prof_buffer;
67157 +static atomic_unchecked_t *prof_buffer;
67158 static unsigned long prof_len, prof_shift;
67159
67160 int prof_on __read_mostly;
67161 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67162 hits[i].pc = 0;
67163 continue;
67164 }
67165 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67166 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67167 hits[i].hits = hits[i].pc = 0;
67168 }
67169 }
67170 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67171 * Add the current hit(s) and flush the write-queue out
67172 * to the global buffer:
67173 */
67174 - atomic_add(nr_hits, &prof_buffer[pc]);
67175 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67176 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67177 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67178 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67179 hits[i].pc = hits[i].hits = 0;
67180 }
67181 out:
67182 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67183 {
67184 unsigned long pc;
67185 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67186 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67187 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67188 }
67189 #endif /* !CONFIG_SMP */
67190
67191 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67192 return -EFAULT;
67193 buf++; p++; count--; read++;
67194 }
67195 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67196 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67197 if (copy_to_user(buf, (void *)pnt, count))
67198 return -EFAULT;
67199 read += count;
67200 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67201 }
67202 #endif
67203 profile_discard_flip_buffers();
67204 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67205 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67206 return count;
67207 }
67208
67209 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67210 index ee8d49b..bd3d790 100644
67211 --- a/kernel/ptrace.c
67212 +++ b/kernel/ptrace.c
67213 @@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67214
67215 if (seize)
67216 flags |= PT_SEIZED;
67217 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
67218 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
67219 flags |= PT_PTRACE_CAP;
67220 task->ptrace = flags;
67221
67222 @@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67223 break;
67224 return -EIO;
67225 }
67226 - if (copy_to_user(dst, buf, retval))
67227 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67228 return -EFAULT;
67229 copied += retval;
67230 src += retval;
67231 @@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
67232 bool seized = child->ptrace & PT_SEIZED;
67233 int ret = -EIO;
67234 siginfo_t siginfo, *si;
67235 - void __user *datavp = (void __user *) data;
67236 + void __user *datavp = (__force void __user *) data;
67237 unsigned long __user *datalp = datavp;
67238 unsigned long flags;
67239
67240 @@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67241 goto out;
67242 }
67243
67244 + if (gr_handle_ptrace(child, request)) {
67245 + ret = -EPERM;
67246 + goto out_put_task_struct;
67247 + }
67248 +
67249 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67250 ret = ptrace_attach(child, request, addr, data);
67251 /*
67252 * Some architectures need to do book-keeping after
67253 * a ptrace attach.
67254 */
67255 - if (!ret)
67256 + if (!ret) {
67257 arch_ptrace_attach(child);
67258 + gr_audit_ptrace(child);
67259 + }
67260 goto out_put_task_struct;
67261 }
67262
67263 @@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67264 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67265 if (copied != sizeof(tmp))
67266 return -EIO;
67267 - return put_user(tmp, (unsigned long __user *)data);
67268 + return put_user(tmp, (__force unsigned long __user *)data);
67269 }
67270
67271 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67272 @@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67273 goto out;
67274 }
67275
67276 + if (gr_handle_ptrace(child, request)) {
67277 + ret = -EPERM;
67278 + goto out_put_task_struct;
67279 + }
67280 +
67281 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67282 ret = ptrace_attach(child, request, addr, data);
67283 /*
67284 * Some architectures need to do book-keeping after
67285 * a ptrace attach.
67286 */
67287 - if (!ret)
67288 + if (!ret) {
67289 arch_ptrace_attach(child);
67290 + gr_audit_ptrace(child);
67291 + }
67292 goto out_put_task_struct;
67293 }
67294
67295 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
67296 index 37a5444..eec170a 100644
67297 --- a/kernel/rcutiny.c
67298 +++ b/kernel/rcutiny.c
67299 @@ -46,7 +46,7 @@
67300 struct rcu_ctrlblk;
67301 static void invoke_rcu_callbacks(void);
67302 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
67303 -static void rcu_process_callbacks(struct softirq_action *unused);
67304 +static void rcu_process_callbacks(void);
67305 static void __call_rcu(struct rcu_head *head,
67306 void (*func)(struct rcu_head *rcu),
67307 struct rcu_ctrlblk *rcp);
67308 @@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
67309 rcu_is_callbacks_kthread()));
67310 }
67311
67312 -static void rcu_process_callbacks(struct softirq_action *unused)
67313 +static void rcu_process_callbacks(void)
67314 {
67315 __rcu_process_callbacks(&rcu_sched_ctrlblk);
67316 __rcu_process_callbacks(&rcu_bh_ctrlblk);
67317 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
67318 index 22ecea0..3789898 100644
67319 --- a/kernel/rcutiny_plugin.h
67320 +++ b/kernel/rcutiny_plugin.h
67321 @@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
67322 have_rcu_kthread_work = morework;
67323 local_irq_restore(flags);
67324 if (work)
67325 - rcu_process_callbacks(NULL);
67326 + rcu_process_callbacks();
67327 schedule_timeout_interruptible(1); /* Leave CPU for others. */
67328 }
67329
67330 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67331 index a89b381..efdcad8 100644
67332 --- a/kernel/rcutorture.c
67333 +++ b/kernel/rcutorture.c
67334 @@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67335 { 0 };
67336 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67337 { 0 };
67338 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67339 -static atomic_t n_rcu_torture_alloc;
67340 -static atomic_t n_rcu_torture_alloc_fail;
67341 -static atomic_t n_rcu_torture_free;
67342 -static atomic_t n_rcu_torture_mberror;
67343 -static atomic_t n_rcu_torture_error;
67344 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67345 +static atomic_unchecked_t n_rcu_torture_alloc;
67346 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
67347 +static atomic_unchecked_t n_rcu_torture_free;
67348 +static atomic_unchecked_t n_rcu_torture_mberror;
67349 +static atomic_unchecked_t n_rcu_torture_error;
67350 static long n_rcu_torture_boost_ktrerror;
67351 static long n_rcu_torture_boost_rterror;
67352 static long n_rcu_torture_boost_failure;
67353 @@ -253,11 +253,11 @@ rcu_torture_alloc(void)
67354
67355 spin_lock_bh(&rcu_torture_lock);
67356 if (list_empty(&rcu_torture_freelist)) {
67357 - atomic_inc(&n_rcu_torture_alloc_fail);
67358 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67359 spin_unlock_bh(&rcu_torture_lock);
67360 return NULL;
67361 }
67362 - atomic_inc(&n_rcu_torture_alloc);
67363 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67364 p = rcu_torture_freelist.next;
67365 list_del_init(p);
67366 spin_unlock_bh(&rcu_torture_lock);
67367 @@ -270,7 +270,7 @@ rcu_torture_alloc(void)
67368 static void
67369 rcu_torture_free(struct rcu_torture *p)
67370 {
67371 - atomic_inc(&n_rcu_torture_free);
67372 + atomic_inc_unchecked(&n_rcu_torture_free);
67373 spin_lock_bh(&rcu_torture_lock);
67374 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67375 spin_unlock_bh(&rcu_torture_lock);
67376 @@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
67377 i = rp->rtort_pipe_count;
67378 if (i > RCU_TORTURE_PIPE_LEN)
67379 i = RCU_TORTURE_PIPE_LEN;
67380 - atomic_inc(&rcu_torture_wcount[i]);
67381 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67382 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67383 rp->rtort_mbtest = 0;
67384 rcu_torture_free(rp);
67385 @@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67386 i = rp->rtort_pipe_count;
67387 if (i > RCU_TORTURE_PIPE_LEN)
67388 i = RCU_TORTURE_PIPE_LEN;
67389 - atomic_inc(&rcu_torture_wcount[i]);
67390 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67391 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67392 rp->rtort_mbtest = 0;
67393 list_del(&rp->rtort_free);
67394 @@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
67395 i = old_rp->rtort_pipe_count;
67396 if (i > RCU_TORTURE_PIPE_LEN)
67397 i = RCU_TORTURE_PIPE_LEN;
67398 - atomic_inc(&rcu_torture_wcount[i]);
67399 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67400 old_rp->rtort_pipe_count++;
67401 cur_ops->deferred_free(old_rp);
67402 }
67403 @@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
67404 }
67405 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67406 if (p->rtort_mbtest == 0)
67407 - atomic_inc(&n_rcu_torture_mberror);
67408 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67409 spin_lock(&rand_lock);
67410 cur_ops->read_delay(&rand);
67411 n_rcu_torture_timers++;
67412 @@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
67413 }
67414 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67415 if (p->rtort_mbtest == 0)
67416 - atomic_inc(&n_rcu_torture_mberror);
67417 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67418 cur_ops->read_delay(&rand);
67419 preempt_disable();
67420 pipe_count = p->rtort_pipe_count;
67421 @@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
67422 rcu_torture_current,
67423 rcu_torture_current_version,
67424 list_empty(&rcu_torture_freelist),
67425 - atomic_read(&n_rcu_torture_alloc),
67426 - atomic_read(&n_rcu_torture_alloc_fail),
67427 - atomic_read(&n_rcu_torture_free),
67428 - atomic_read(&n_rcu_torture_mberror),
67429 + atomic_read_unchecked(&n_rcu_torture_alloc),
67430 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67431 + atomic_read_unchecked(&n_rcu_torture_free),
67432 + atomic_read_unchecked(&n_rcu_torture_mberror),
67433 n_rcu_torture_boost_ktrerror,
67434 n_rcu_torture_boost_rterror,
67435 n_rcu_torture_boost_failure,
67436 @@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
67437 n_online_attempts,
67438 n_offline_successes,
67439 n_offline_attempts);
67440 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67441 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67442 n_rcu_torture_boost_ktrerror != 0 ||
67443 n_rcu_torture_boost_rterror != 0 ||
67444 n_rcu_torture_boost_failure != 0)
67445 @@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
67446 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67447 if (i > 1) {
67448 cnt += sprintf(&page[cnt], "!!! ");
67449 - atomic_inc(&n_rcu_torture_error);
67450 + atomic_inc_unchecked(&n_rcu_torture_error);
67451 WARN_ON_ONCE(1);
67452 }
67453 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67454 @@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
67455 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67456 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67457 cnt += sprintf(&page[cnt], " %d",
67458 - atomic_read(&rcu_torture_wcount[i]));
67459 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67460 }
67461 cnt += sprintf(&page[cnt], "\n");
67462 if (cur_ops->stats)
67463 @@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
67464
67465 if (cur_ops->cleanup)
67466 cur_ops->cleanup();
67467 - if (atomic_read(&n_rcu_torture_error))
67468 + if (atomic_read_unchecked(&n_rcu_torture_error))
67469 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67470 else if (n_online_successes != n_online_attempts ||
67471 n_offline_successes != n_offline_attempts)
67472 @@ -1744,17 +1744,17 @@ rcu_torture_init(void)
67473
67474 rcu_torture_current = NULL;
67475 rcu_torture_current_version = 0;
67476 - atomic_set(&n_rcu_torture_alloc, 0);
67477 - atomic_set(&n_rcu_torture_alloc_fail, 0);
67478 - atomic_set(&n_rcu_torture_free, 0);
67479 - atomic_set(&n_rcu_torture_mberror, 0);
67480 - atomic_set(&n_rcu_torture_error, 0);
67481 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67482 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67483 + atomic_set_unchecked(&n_rcu_torture_free, 0);
67484 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67485 + atomic_set_unchecked(&n_rcu_torture_error, 0);
67486 n_rcu_torture_boost_ktrerror = 0;
67487 n_rcu_torture_boost_rterror = 0;
67488 n_rcu_torture_boost_failure = 0;
67489 n_rcu_torture_boosts = 0;
67490 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67491 - atomic_set(&rcu_torture_wcount[i], 0);
67492 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67493 for_each_possible_cpu(cpu) {
67494 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67495 per_cpu(rcu_torture_count, cpu)[i] = 0;
67496 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67497 index d0c5baf..109b2e7 100644
67498 --- a/kernel/rcutree.c
67499 +++ b/kernel/rcutree.c
67500 @@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
67501 rcu_prepare_for_idle(smp_processor_id());
67502 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67503 smp_mb__before_atomic_inc(); /* See above. */
67504 - atomic_inc(&rdtp->dynticks);
67505 + atomic_inc_unchecked(&rdtp->dynticks);
67506 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67507 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67508 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67509
67510 /*
67511 * The idle task is not permitted to enter the idle loop while
67512 @@ -448,10 +448,10 @@ void rcu_irq_exit(void)
67513 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
67514 {
67515 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67516 - atomic_inc(&rdtp->dynticks);
67517 + atomic_inc_unchecked(&rdtp->dynticks);
67518 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67519 smp_mb__after_atomic_inc(); /* See above. */
67520 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67521 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67522 rcu_cleanup_after_idle(smp_processor_id());
67523 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
67524 if (!is_idle_task(current)) {
67525 @@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
67526 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67527
67528 if (rdtp->dynticks_nmi_nesting == 0 &&
67529 - (atomic_read(&rdtp->dynticks) & 0x1))
67530 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67531 return;
67532 rdtp->dynticks_nmi_nesting++;
67533 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67534 - atomic_inc(&rdtp->dynticks);
67535 + atomic_inc_unchecked(&rdtp->dynticks);
67536 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67537 smp_mb__after_atomic_inc(); /* See above. */
67538 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67539 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67540 }
67541
67542 /**
67543 @@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
67544 return;
67545 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67546 smp_mb__before_atomic_inc(); /* See above. */
67547 - atomic_inc(&rdtp->dynticks);
67548 + atomic_inc_unchecked(&rdtp->dynticks);
67549 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67550 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67551 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67552 }
67553
67554 #ifdef CONFIG_PROVE_RCU
67555 @@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
67556 int ret;
67557
67558 preempt_disable();
67559 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67560 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67561 preempt_enable();
67562 return ret;
67563 }
67564 @@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
67565 */
67566 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67567 {
67568 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67569 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67570 return (rdp->dynticks_snap & 0x1) == 0;
67571 }
67572
67573 @@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67574 unsigned int curr;
67575 unsigned int snap;
67576
67577 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67578 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67579 snap = (unsigned int)rdp->dynticks_snap;
67580
67581 /*
67582 @@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void)
67583 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
67584 */
67585 if (till_stall_check < 3) {
67586 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
67587 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
67588 till_stall_check = 3;
67589 } else if (till_stall_check > 300) {
67590 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
67591 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
67592 till_stall_check = 300;
67593 }
67594 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
67595 @@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67596 /*
67597 * Do RCU core processing for the current CPU.
67598 */
67599 -static void rcu_process_callbacks(struct softirq_action *unused)
67600 +static void rcu_process_callbacks(void)
67601 {
67602 trace_rcu_utilization("Start RCU core");
67603 __rcu_process_callbacks(&rcu_sched_state,
67604 @@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
67605 }
67606 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
67607
67608 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67609 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67610 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67611 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67612
67613 static int synchronize_sched_expedited_cpu_stop(void *data)
67614 {
67615 @@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
67616 int firstsnap, s, snap, trycount = 0;
67617
67618 /* Note that atomic_inc_return() implies full memory barrier. */
67619 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67620 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67621 get_online_cpus();
67622 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
67623
67624 @@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
67625 }
67626
67627 /* Check to see if someone else did our work for us. */
67628 - s = atomic_read(&sync_sched_expedited_done);
67629 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67630 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67631 smp_mb(); /* ensure test happens before caller kfree */
67632 return;
67633 @@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
67634 * grace period works for us.
67635 */
67636 get_online_cpus();
67637 - snap = atomic_read(&sync_sched_expedited_started);
67638 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
67639 smp_mb(); /* ensure read is before try_stop_cpus(). */
67640 }
67641
67642 @@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
67643 * than we did beat us to the punch.
67644 */
67645 do {
67646 - s = atomic_read(&sync_sched_expedited_done);
67647 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67648 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67649 smp_mb(); /* ensure test happens before caller kfree */
67650 break;
67651 }
67652 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67653 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67654
67655 put_online_cpus();
67656 }
67657 @@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
67658 rdp->qlen = 0;
67659 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
67660 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
67661 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
67662 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
67663 rdp->cpu = cpu;
67664 rdp->rsp = rsp;
67665 raw_spin_unlock_irqrestore(&rnp->lock, flags);
67666 @@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
67667 rdp->n_force_qs_snap = rsp->n_force_qs;
67668 rdp->blimit = blimit;
67669 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
67670 - atomic_set(&rdp->dynticks->dynticks,
67671 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
67672 + atomic_set_unchecked(&rdp->dynticks->dynticks,
67673 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
67674 rcu_prepare_for_idle_init(cpu);
67675 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
67676
67677 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67678 index cdd1be0..5b2efb4 100644
67679 --- a/kernel/rcutree.h
67680 +++ b/kernel/rcutree.h
67681 @@ -87,7 +87,7 @@ struct rcu_dynticks {
67682 long long dynticks_nesting; /* Track irq/process nesting level. */
67683 /* Process level is worth LLONG_MAX/2. */
67684 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67685 - atomic_t dynticks; /* Even value for idle, else odd. */
67686 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
67687 };
67688
67689 /* RCU's kthread states for tracing. */
67690 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67691 index c023464..7f57225 100644
67692 --- a/kernel/rcutree_plugin.h
67693 +++ b/kernel/rcutree_plugin.h
67694 @@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
67695
67696 /* Clean up and exit. */
67697 smp_mb(); /* ensure expedited GP seen before counter increment. */
67698 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67699 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67700 unlock_mb_ret:
67701 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67702 mb_ret:
67703 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67704 index ed459ed..a03c3fa 100644
67705 --- a/kernel/rcutree_trace.c
67706 +++ b/kernel/rcutree_trace.c
67707 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67708 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67709 rdp->qs_pending);
67710 seq_printf(m, " dt=%d/%llx/%d df=%lu",
67711 - atomic_read(&rdp->dynticks->dynticks),
67712 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67713 rdp->dynticks->dynticks_nesting,
67714 rdp->dynticks->dynticks_nmi_nesting,
67715 rdp->dynticks_fqs);
67716 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67717 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67718 rdp->qs_pending);
67719 seq_printf(m, ",%d,%llx,%d,%lu",
67720 - atomic_read(&rdp->dynticks->dynticks),
67721 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67722 rdp->dynticks->dynticks_nesting,
67723 rdp->dynticks->dynticks_nmi_nesting,
67724 rdp->dynticks_fqs);
67725 diff --git a/kernel/resource.c b/kernel/resource.c
67726 index 7e8ea66..1efd11f 100644
67727 --- a/kernel/resource.c
67728 +++ b/kernel/resource.c
67729 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67730
67731 static int __init ioresources_init(void)
67732 {
67733 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67734 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67735 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67736 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67737 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67738 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67739 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67740 +#endif
67741 +#else
67742 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67743 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67744 +#endif
67745 return 0;
67746 }
67747 __initcall(ioresources_init);
67748 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67749 index 98ec494..4241d6d 100644
67750 --- a/kernel/rtmutex-tester.c
67751 +++ b/kernel/rtmutex-tester.c
67752 @@ -20,7 +20,7 @@
67753 #define MAX_RT_TEST_MUTEXES 8
67754
67755 static spinlock_t rttest_lock;
67756 -static atomic_t rttest_event;
67757 +static atomic_unchecked_t rttest_event;
67758
67759 struct test_thread_data {
67760 int opcode;
67761 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67762
67763 case RTTEST_LOCKCONT:
67764 td->mutexes[td->opdata] = 1;
67765 - td->event = atomic_add_return(1, &rttest_event);
67766 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67767 return 0;
67768
67769 case RTTEST_RESET:
67770 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67771 return 0;
67772
67773 case RTTEST_RESETEVENT:
67774 - atomic_set(&rttest_event, 0);
67775 + atomic_set_unchecked(&rttest_event, 0);
67776 return 0;
67777
67778 default:
67779 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67780 return ret;
67781
67782 td->mutexes[id] = 1;
67783 - td->event = atomic_add_return(1, &rttest_event);
67784 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67785 rt_mutex_lock(&mutexes[id]);
67786 - td->event = atomic_add_return(1, &rttest_event);
67787 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67788 td->mutexes[id] = 4;
67789 return 0;
67790
67791 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67792 return ret;
67793
67794 td->mutexes[id] = 1;
67795 - td->event = atomic_add_return(1, &rttest_event);
67796 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67797 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67798 - td->event = atomic_add_return(1, &rttest_event);
67799 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67800 td->mutexes[id] = ret ? 0 : 4;
67801 return ret ? -EINTR : 0;
67802
67803 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67804 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67805 return ret;
67806
67807 - td->event = atomic_add_return(1, &rttest_event);
67808 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67809 rt_mutex_unlock(&mutexes[id]);
67810 - td->event = atomic_add_return(1, &rttest_event);
67811 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67812 td->mutexes[id] = 0;
67813 return 0;
67814
67815 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67816 break;
67817
67818 td->mutexes[dat] = 2;
67819 - td->event = atomic_add_return(1, &rttest_event);
67820 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67821 break;
67822
67823 default:
67824 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67825 return;
67826
67827 td->mutexes[dat] = 3;
67828 - td->event = atomic_add_return(1, &rttest_event);
67829 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67830 break;
67831
67832 case RTTEST_LOCKNOWAIT:
67833 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67834 return;
67835
67836 td->mutexes[dat] = 1;
67837 - td->event = atomic_add_return(1, &rttest_event);
67838 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67839 return;
67840
67841 default:
67842 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
67843 index 0984a21..939f183 100644
67844 --- a/kernel/sched/auto_group.c
67845 +++ b/kernel/sched/auto_group.c
67846 @@ -11,7 +11,7 @@
67847
67848 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67849 static struct autogroup autogroup_default;
67850 -static atomic_t autogroup_seq_nr;
67851 +static atomic_unchecked_t autogroup_seq_nr;
67852
67853 void __init autogroup_init(struct task_struct *init_task)
67854 {
67855 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67856
67857 kref_init(&ag->kref);
67858 init_rwsem(&ag->lock);
67859 - ag->id = atomic_inc_return(&autogroup_seq_nr);
67860 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67861 ag->tg = tg;
67862 #ifdef CONFIG_RT_GROUP_SCHED
67863 /*
67864 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
67865 index e5212ae..2fcf98d 100644
67866 --- a/kernel/sched/core.c
67867 +++ b/kernel/sched/core.c
67868 @@ -3907,6 +3907,8 @@ int can_nice(const struct task_struct *p, const int nice)
67869 /* convert nice value [19,-20] to rlimit style value [1,40] */
67870 int nice_rlim = 20 - nice;
67871
67872 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67873 +
67874 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67875 capable(CAP_SYS_NICE));
67876 }
67877 @@ -3940,7 +3942,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67878 if (nice > 19)
67879 nice = 19;
67880
67881 - if (increment < 0 && !can_nice(current, nice))
67882 + if (increment < 0 && (!can_nice(current, nice) ||
67883 + gr_handle_chroot_nice()))
67884 return -EPERM;
67885
67886 retval = security_task_setnice(current, nice);
67887 @@ -4097,6 +4100,7 @@ recheck:
67888 unsigned long rlim_rtprio =
67889 task_rlimit(p, RLIMIT_RTPRIO);
67890
67891 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
67892 /* can't set/change the rt policy */
67893 if (policy != p->policy && !rlim_rtprio)
67894 return -EPERM;
67895 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
67896 index e955364..eacd2a4 100644
67897 --- a/kernel/sched/fair.c
67898 +++ b/kernel/sched/fair.c
67899 @@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
67900 * run_rebalance_domains is triggered when needed from the scheduler tick.
67901 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
67902 */
67903 -static void run_rebalance_domains(struct softirq_action *h)
67904 +static void run_rebalance_domains(void)
67905 {
67906 int this_cpu = smp_processor_id();
67907 struct rq *this_rq = cpu_rq(this_cpu);
67908 diff --git a/kernel/signal.c b/kernel/signal.c
67909 index 17afcaf..4500b05 100644
67910 --- a/kernel/signal.c
67911 +++ b/kernel/signal.c
67912 @@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
67913
67914 int print_fatal_signals __read_mostly;
67915
67916 -static void __user *sig_handler(struct task_struct *t, int sig)
67917 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
67918 {
67919 return t->sighand->action[sig - 1].sa.sa_handler;
67920 }
67921
67922 -static int sig_handler_ignored(void __user *handler, int sig)
67923 +static int sig_handler_ignored(__sighandler_t handler, int sig)
67924 {
67925 /* Is it explicitly or implicitly ignored? */
67926 return handler == SIG_IGN ||
67927 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
67928
67929 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
67930 {
67931 - void __user *handler;
67932 + __sighandler_t handler;
67933
67934 handler = sig_handler(t, sig);
67935
67936 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
67937 atomic_inc(&user->sigpending);
67938 rcu_read_unlock();
67939
67940 + if (!override_rlimit)
67941 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
67942 +
67943 if (override_rlimit ||
67944 atomic_read(&user->sigpending) <=
67945 task_rlimit(t, RLIMIT_SIGPENDING)) {
67946 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
67947
67948 int unhandled_signal(struct task_struct *tsk, int sig)
67949 {
67950 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
67951 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
67952 if (is_global_init(tsk))
67953 return 1;
67954 if (handler != SIG_IGN && handler != SIG_DFL)
67955 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
67956 }
67957 }
67958
67959 + /* allow glibc communication via tgkill to other threads in our
67960 + thread group */
67961 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
67962 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
67963 + && gr_handle_signal(t, sig))
67964 + return -EPERM;
67965 +
67966 return security_task_kill(t, info, sig, 0);
67967 }
67968
67969 @@ -1204,7 +1214,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67970 return send_signal(sig, info, p, 1);
67971 }
67972
67973 -static int
67974 +int
67975 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67976 {
67977 return send_signal(sig, info, t, 0);
67978 @@ -1241,6 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67979 unsigned long int flags;
67980 int ret, blocked, ignored;
67981 struct k_sigaction *action;
67982 + int is_unhandled = 0;
67983
67984 spin_lock_irqsave(&t->sighand->siglock, flags);
67985 action = &t->sighand->action[sig-1];
67986 @@ -1255,9 +1266,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67987 }
67988 if (action->sa.sa_handler == SIG_DFL)
67989 t->signal->flags &= ~SIGNAL_UNKILLABLE;
67990 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
67991 + is_unhandled = 1;
67992 ret = specific_send_sig_info(sig, info, t);
67993 spin_unlock_irqrestore(&t->sighand->siglock, flags);
67994
67995 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
67996 + normal operation */
67997 + if (is_unhandled) {
67998 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
67999 + gr_handle_crash(t, sig);
68000 + }
68001 +
68002 return ret;
68003 }
68004
68005 @@ -1324,8 +1344,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68006 ret = check_kill_permission(sig, info, p);
68007 rcu_read_unlock();
68008
68009 - if (!ret && sig)
68010 + if (!ret && sig) {
68011 ret = do_send_sig_info(sig, info, p, true);
68012 + if (!ret)
68013 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68014 + }
68015
68016 return ret;
68017 }
68018 @@ -2840,7 +2863,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68019 int error = -ESRCH;
68020
68021 rcu_read_lock();
68022 - p = find_task_by_vpid(pid);
68023 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68024 + /* allow glibc communication via tgkill to other threads in our
68025 + thread group */
68026 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68027 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
68028 + p = find_task_by_vpid_unrestricted(pid);
68029 + else
68030 +#endif
68031 + p = find_task_by_vpid(pid);
68032 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68033 error = check_kill_permission(sig, info, p);
68034 /*
68035 diff --git a/kernel/smp.c b/kernel/smp.c
68036 index 2f8b10e..a41bc14 100644
68037 --- a/kernel/smp.c
68038 +++ b/kernel/smp.c
68039 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68040 }
68041 EXPORT_SYMBOL(smp_call_function);
68042
68043 -void ipi_call_lock(void)
68044 +void ipi_call_lock(void) __acquires(call_function.lock)
68045 {
68046 raw_spin_lock(&call_function.lock);
68047 }
68048
68049 -void ipi_call_unlock(void)
68050 +void ipi_call_unlock(void) __releases(call_function.lock)
68051 {
68052 raw_spin_unlock(&call_function.lock);
68053 }
68054
68055 -void ipi_call_lock_irq(void)
68056 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68057 {
68058 raw_spin_lock_irq(&call_function.lock);
68059 }
68060
68061 -void ipi_call_unlock_irq(void)
68062 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68063 {
68064 raw_spin_unlock_irq(&call_function.lock);
68065 }
68066 diff --git a/kernel/softirq.c b/kernel/softirq.c
68067 index 671f959..91c51cb 100644
68068 --- a/kernel/softirq.c
68069 +++ b/kernel/softirq.c
68070 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68071
68072 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68073
68074 -char *softirq_to_name[NR_SOFTIRQS] = {
68075 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68076 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68077 "TASKLET", "SCHED", "HRTIMER", "RCU"
68078 };
68079 @@ -235,7 +235,7 @@ restart:
68080 kstat_incr_softirqs_this_cpu(vec_nr);
68081
68082 trace_softirq_entry(vec_nr);
68083 - h->action(h);
68084 + h->action();
68085 trace_softirq_exit(vec_nr);
68086 if (unlikely(prev_count != preempt_count())) {
68087 printk(KERN_ERR "huh, entered softirq %u %s %p"
68088 @@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
68089 or_softirq_pending(1UL << nr);
68090 }
68091
68092 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68093 +void open_softirq(int nr, void (*action)(void))
68094 {
68095 - softirq_vec[nr].action = action;
68096 + pax_open_kernel();
68097 + *(void **)&softirq_vec[nr].action = action;
68098 + pax_close_kernel();
68099 }
68100
68101 /*
68102 @@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68103
68104 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68105
68106 -static void tasklet_action(struct softirq_action *a)
68107 +static void tasklet_action(void)
68108 {
68109 struct tasklet_struct *list;
68110
68111 @@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
68112 }
68113 }
68114
68115 -static void tasklet_hi_action(struct softirq_action *a)
68116 +static void tasklet_hi_action(void)
68117 {
68118 struct tasklet_struct *list;
68119
68120 diff --git a/kernel/sys.c b/kernel/sys.c
68121 index e7006eb..8fb7c51 100644
68122 --- a/kernel/sys.c
68123 +++ b/kernel/sys.c
68124 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68125 error = -EACCES;
68126 goto out;
68127 }
68128 +
68129 + if (gr_handle_chroot_setpriority(p, niceval)) {
68130 + error = -EACCES;
68131 + goto out;
68132 + }
68133 +
68134 no_nice = security_task_setnice(p, niceval);
68135 if (no_nice) {
68136 error = no_nice;
68137 @@ -581,6 +587,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68138 goto error;
68139 }
68140
68141 + if (gr_check_group_change(new->gid, new->egid, -1))
68142 + goto error;
68143 +
68144 if (rgid != (gid_t) -1 ||
68145 (egid != (gid_t) -1 && egid != old->gid))
68146 new->sgid = new->egid;
68147 @@ -610,6 +619,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68148 old = current_cred();
68149
68150 retval = -EPERM;
68151 +
68152 + if (gr_check_group_change(gid, gid, gid))
68153 + goto error;
68154 +
68155 if (nsown_capable(CAP_SETGID))
68156 new->gid = new->egid = new->sgid = new->fsgid = gid;
68157 else if (gid == old->gid || gid == old->sgid)
68158 @@ -627,7 +640,7 @@ error:
68159 /*
68160 * change the user struct in a credentials set to match the new UID
68161 */
68162 -static int set_user(struct cred *new)
68163 +int set_user(struct cred *new)
68164 {
68165 struct user_struct *new_user;
68166
68167 @@ -697,6 +710,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68168 goto error;
68169 }
68170
68171 + if (gr_check_user_change(new->uid, new->euid, -1))
68172 + goto error;
68173 +
68174 if (new->uid != old->uid) {
68175 retval = set_user(new);
68176 if (retval < 0)
68177 @@ -741,6 +757,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68178 old = current_cred();
68179
68180 retval = -EPERM;
68181 +
68182 + if (gr_check_crash_uid(uid))
68183 + goto error;
68184 + if (gr_check_user_change(uid, uid, uid))
68185 + goto error;
68186 +
68187 if (nsown_capable(CAP_SETUID)) {
68188 new->suid = new->uid = uid;
68189 if (uid != old->uid) {
68190 @@ -795,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68191 goto error;
68192 }
68193
68194 + if (gr_check_user_change(ruid, euid, -1))
68195 + goto error;
68196 +
68197 if (ruid != (uid_t) -1) {
68198 new->uid = ruid;
68199 if (ruid != old->uid) {
68200 @@ -859,6 +884,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68201 goto error;
68202 }
68203
68204 + if (gr_check_group_change(rgid, egid, -1))
68205 + goto error;
68206 +
68207 if (rgid != (gid_t) -1)
68208 new->gid = rgid;
68209 if (egid != (gid_t) -1)
68210 @@ -905,6 +933,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68211 old = current_cred();
68212 old_fsuid = old->fsuid;
68213
68214 + if (gr_check_user_change(-1, -1, uid))
68215 + goto error;
68216 +
68217 if (uid == old->uid || uid == old->euid ||
68218 uid == old->suid || uid == old->fsuid ||
68219 nsown_capable(CAP_SETUID)) {
68220 @@ -915,6 +946,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68221 }
68222 }
68223
68224 +error:
68225 abort_creds(new);
68226 return old_fsuid;
68227
68228 @@ -941,12 +973,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68229 if (gid == old->gid || gid == old->egid ||
68230 gid == old->sgid || gid == old->fsgid ||
68231 nsown_capable(CAP_SETGID)) {
68232 + if (gr_check_group_change(-1, -1, gid))
68233 + goto error;
68234 +
68235 if (gid != old_fsgid) {
68236 new->fsgid = gid;
68237 goto change_okay;
68238 }
68239 }
68240
68241 +error:
68242 abort_creds(new);
68243 return old_fsgid;
68244
68245 @@ -1198,7 +1234,10 @@ static int override_release(char __user *release, int len)
68246 }
68247 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68248 snprintf(buf, len, "2.6.%u%s", v, rest);
68249 - ret = copy_to_user(release, buf, len);
68250 + if (len > sizeof(buf))
68251 + ret = -EFAULT;
68252 + else
68253 + ret = copy_to_user(release, buf, len);
68254 }
68255 return ret;
68256 }
68257 @@ -1252,19 +1291,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68258 return -EFAULT;
68259
68260 down_read(&uts_sem);
68261 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
68262 + error = __copy_to_user(name->sysname, &utsname()->sysname,
68263 __OLD_UTS_LEN);
68264 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68265 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68266 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
68267 __OLD_UTS_LEN);
68268 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68269 - error |= __copy_to_user(&name->release, &utsname()->release,
68270 + error |= __copy_to_user(name->release, &utsname()->release,
68271 __OLD_UTS_LEN);
68272 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68273 - error |= __copy_to_user(&name->version, &utsname()->version,
68274 + error |= __copy_to_user(name->version, &utsname()->version,
68275 __OLD_UTS_LEN);
68276 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68277 - error |= __copy_to_user(&name->machine, &utsname()->machine,
68278 + error |= __copy_to_user(name->machine, &utsname()->machine,
68279 __OLD_UTS_LEN);
68280 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68281 up_read(&uts_sem);
68282 @@ -1847,7 +1886,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68283 error = get_dumpable(me->mm);
68284 break;
68285 case PR_SET_DUMPABLE:
68286 - if (arg2 < 0 || arg2 > 1) {
68287 + if (arg2 > 1) {
68288 error = -EINVAL;
68289 break;
68290 }
68291 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68292 index 4ab1187..0b75ced 100644
68293 --- a/kernel/sysctl.c
68294 +++ b/kernel/sysctl.c
68295 @@ -91,7 +91,6 @@
68296
68297
68298 #if defined(CONFIG_SYSCTL)
68299 -
68300 /* External variables not in a header file. */
68301 extern int sysctl_overcommit_memory;
68302 extern int sysctl_overcommit_ratio;
68303 @@ -169,10 +168,8 @@ static int proc_taint(struct ctl_table *table, int write,
68304 void __user *buffer, size_t *lenp, loff_t *ppos);
68305 #endif
68306
68307 -#ifdef CONFIG_PRINTK
68308 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68309 void __user *buffer, size_t *lenp, loff_t *ppos);
68310 -#endif
68311
68312 #ifdef CONFIG_MAGIC_SYSRQ
68313 /* Note: sysrq code uses it's own private copy */
68314 @@ -196,6 +193,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68315
68316 #endif
68317
68318 +extern struct ctl_table grsecurity_table[];
68319 +
68320 static struct ctl_table kern_table[];
68321 static struct ctl_table vm_table[];
68322 static struct ctl_table fs_table[];
68323 @@ -210,6 +209,20 @@ extern struct ctl_table epoll_table[];
68324 int sysctl_legacy_va_layout;
68325 #endif
68326
68327 +#ifdef CONFIG_PAX_SOFTMODE
68328 +static ctl_table pax_table[] = {
68329 + {
68330 + .procname = "softmode",
68331 + .data = &pax_softmode,
68332 + .maxlen = sizeof(unsigned int),
68333 + .mode = 0600,
68334 + .proc_handler = &proc_dointvec,
68335 + },
68336 +
68337 + { }
68338 +};
68339 +#endif
68340 +
68341 /* The default sysctl tables: */
68342
68343 static struct ctl_table sysctl_base_table[] = {
68344 @@ -256,6 +269,22 @@ static int max_extfrag_threshold = 1000;
68345 #endif
68346
68347 static struct ctl_table kern_table[] = {
68348 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68349 + {
68350 + .procname = "grsecurity",
68351 + .mode = 0500,
68352 + .child = grsecurity_table,
68353 + },
68354 +#endif
68355 +
68356 +#ifdef CONFIG_PAX_SOFTMODE
68357 + {
68358 + .procname = "pax",
68359 + .mode = 0500,
68360 + .child = pax_table,
68361 + },
68362 +#endif
68363 +
68364 {
68365 .procname = "sched_child_runs_first",
68366 .data = &sysctl_sched_child_runs_first,
68367 @@ -540,7 +569,7 @@ static struct ctl_table kern_table[] = {
68368 .data = &modprobe_path,
68369 .maxlen = KMOD_PATH_LEN,
68370 .mode = 0644,
68371 - .proc_handler = proc_dostring,
68372 + .proc_handler = proc_dostring_modpriv,
68373 },
68374 {
68375 .procname = "modules_disabled",
68376 @@ -707,16 +736,20 @@ static struct ctl_table kern_table[] = {
68377 .extra1 = &zero,
68378 .extra2 = &one,
68379 },
68380 +#endif
68381 {
68382 .procname = "kptr_restrict",
68383 .data = &kptr_restrict,
68384 .maxlen = sizeof(int),
68385 .mode = 0644,
68386 .proc_handler = proc_dointvec_minmax_sysadmin,
68387 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68388 + .extra1 = &two,
68389 +#else
68390 .extra1 = &zero,
68391 +#endif
68392 .extra2 = &two,
68393 },
68394 -#endif
68395 {
68396 .procname = "ngroups_max",
68397 .data = &ngroups_max,
68398 @@ -1215,6 +1248,13 @@ static struct ctl_table vm_table[] = {
68399 .proc_handler = proc_dointvec_minmax,
68400 .extra1 = &zero,
68401 },
68402 + {
68403 + .procname = "heap_stack_gap",
68404 + .data = &sysctl_heap_stack_gap,
68405 + .maxlen = sizeof(sysctl_heap_stack_gap),
68406 + .mode = 0644,
68407 + .proc_handler = proc_doulongvec_minmax,
68408 + },
68409 #else
68410 {
68411 .procname = "nr_trim_pages",
68412 @@ -1645,6 +1685,16 @@ int proc_dostring(struct ctl_table *table, int write,
68413 buffer, lenp, ppos);
68414 }
68415
68416 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68417 + void __user *buffer, size_t *lenp, loff_t *ppos)
68418 +{
68419 + if (write && !capable(CAP_SYS_MODULE))
68420 + return -EPERM;
68421 +
68422 + return _proc_do_string(table->data, table->maxlen, write,
68423 + buffer, lenp, ppos);
68424 +}
68425 +
68426 static size_t proc_skip_spaces(char **buf)
68427 {
68428 size_t ret;
68429 @@ -1750,6 +1800,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68430 len = strlen(tmp);
68431 if (len > *size)
68432 len = *size;
68433 + if (len > sizeof(tmp))
68434 + len = sizeof(tmp);
68435 if (copy_to_user(*buf, tmp, len))
68436 return -EFAULT;
68437 *size -= len;
68438 @@ -1942,7 +1994,6 @@ static int proc_taint(struct ctl_table *table, int write,
68439 return err;
68440 }
68441
68442 -#ifdef CONFIG_PRINTK
68443 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68444 void __user *buffer, size_t *lenp, loff_t *ppos)
68445 {
68446 @@ -1951,7 +2002,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68447
68448 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
68449 }
68450 -#endif
68451
68452 struct do_proc_dointvec_minmax_conv_param {
68453 int *min;
68454 @@ -2066,8 +2116,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68455 *i = val;
68456 } else {
68457 val = convdiv * (*i) / convmul;
68458 - if (!first)
68459 + if (!first) {
68460 err = proc_put_char(&buffer, &left, '\t');
68461 + if (err)
68462 + break;
68463 + }
68464 err = proc_put_long(&buffer, &left, val, false);
68465 if (err)
68466 break;
68467 @@ -2459,6 +2512,12 @@ int proc_dostring(struct ctl_table *table, int write,
68468 return -ENOSYS;
68469 }
68470
68471 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68472 + void __user *buffer, size_t *lenp, loff_t *ppos)
68473 +{
68474 + return -ENOSYS;
68475 +}
68476 +
68477 int proc_dointvec(struct ctl_table *table, int write,
68478 void __user *buffer, size_t *lenp, loff_t *ppos)
68479 {
68480 @@ -2515,5 +2574,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68481 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68482 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68483 EXPORT_SYMBOL(proc_dostring);
68484 +EXPORT_SYMBOL(proc_dostring_modpriv);
68485 EXPORT_SYMBOL(proc_doulongvec_minmax);
68486 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68487 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68488 index a650694..aaeeb20 100644
68489 --- a/kernel/sysctl_binary.c
68490 +++ b/kernel/sysctl_binary.c
68491 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68492 int i;
68493
68494 set_fs(KERNEL_DS);
68495 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68496 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68497 set_fs(old_fs);
68498 if (result < 0)
68499 goto out_kfree;
68500 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68501 }
68502
68503 set_fs(KERNEL_DS);
68504 - result = vfs_write(file, buffer, str - buffer, &pos);
68505 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68506 set_fs(old_fs);
68507 if (result < 0)
68508 goto out_kfree;
68509 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68510 int i;
68511
68512 set_fs(KERNEL_DS);
68513 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68514 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68515 set_fs(old_fs);
68516 if (result < 0)
68517 goto out_kfree;
68518 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68519 }
68520
68521 set_fs(KERNEL_DS);
68522 - result = vfs_write(file, buffer, str - buffer, &pos);
68523 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68524 set_fs(old_fs);
68525 if (result < 0)
68526 goto out_kfree;
68527 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68528 int i;
68529
68530 set_fs(KERNEL_DS);
68531 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68532 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68533 set_fs(old_fs);
68534 if (result < 0)
68535 goto out;
68536 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68537 __le16 dnaddr;
68538
68539 set_fs(KERNEL_DS);
68540 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68541 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68542 set_fs(old_fs);
68543 if (result < 0)
68544 goto out;
68545 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68546 le16_to_cpu(dnaddr) & 0x3ff);
68547
68548 set_fs(KERNEL_DS);
68549 - result = vfs_write(file, buf, len, &pos);
68550 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68551 set_fs(old_fs);
68552 if (result < 0)
68553 goto out;
68554 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68555 index e660464..c8b9e67 100644
68556 --- a/kernel/taskstats.c
68557 +++ b/kernel/taskstats.c
68558 @@ -27,9 +27,12 @@
68559 #include <linux/cgroup.h>
68560 #include <linux/fs.h>
68561 #include <linux/file.h>
68562 +#include <linux/grsecurity.h>
68563 #include <net/genetlink.h>
68564 #include <linux/atomic.h>
68565
68566 +extern int gr_is_taskstats_denied(int pid);
68567 +
68568 /*
68569 * Maximum length of a cpumask that can be specified in
68570 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68571 @@ -556,6 +559,9 @@ err:
68572
68573 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68574 {
68575 + if (gr_is_taskstats_denied(current->pid))
68576 + return -EACCES;
68577 +
68578 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68579 return cmd_attr_register_cpumask(info);
68580 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68581 diff --git a/kernel/time.c b/kernel/time.c
68582 index ba744cf..267b7c5 100644
68583 --- a/kernel/time.c
68584 +++ b/kernel/time.c
68585 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68586 return error;
68587
68588 if (tz) {
68589 + /* we log in do_settimeofday called below, so don't log twice
68590 + */
68591 + if (!tv)
68592 + gr_log_timechange();
68593 +
68594 sys_tz = *tz;
68595 update_vsyscall_tz();
68596 if (firsttime) {
68597 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68598 index 8a538c5..def79d4 100644
68599 --- a/kernel/time/alarmtimer.c
68600 +++ b/kernel/time/alarmtimer.c
68601 @@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
68602 struct platform_device *pdev;
68603 int error = 0;
68604 int i;
68605 - struct k_clock alarm_clock = {
68606 + static struct k_clock alarm_clock = {
68607 .clock_getres = alarm_clock_getres,
68608 .clock_get = alarm_clock_get,
68609 .timer_create = alarm_timer_create,
68610 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68611 index f113755..ec24223 100644
68612 --- a/kernel/time/tick-broadcast.c
68613 +++ b/kernel/time/tick-broadcast.c
68614 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68615 * then clear the broadcast bit.
68616 */
68617 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68618 - int cpu = smp_processor_id();
68619 + cpu = smp_processor_id();
68620
68621 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68622 tick_broadcast_clear_oneshot(cpu);
68623 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68624 index d66b213..6947686 100644
68625 --- a/kernel/time/timekeeping.c
68626 +++ b/kernel/time/timekeeping.c
68627 @@ -14,6 +14,7 @@
68628 #include <linux/init.h>
68629 #include <linux/mm.h>
68630 #include <linux/sched.h>
68631 +#include <linux/grsecurity.h>
68632 #include <linux/syscore_ops.h>
68633 #include <linux/clocksource.h>
68634 #include <linux/jiffies.h>
68635 @@ -373,6 +374,8 @@ int do_settimeofday(const struct timespec *tv)
68636 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68637 return -EINVAL;
68638
68639 + gr_log_timechange();
68640 +
68641 write_seqlock_irqsave(&timekeeper.lock, flags);
68642
68643 timekeeping_forward_now();
68644 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68645 index 3258455..f35227d 100644
68646 --- a/kernel/time/timer_list.c
68647 +++ b/kernel/time/timer_list.c
68648 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68649
68650 static void print_name_offset(struct seq_file *m, void *sym)
68651 {
68652 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68653 + SEQ_printf(m, "<%p>", NULL);
68654 +#else
68655 char symname[KSYM_NAME_LEN];
68656
68657 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68658 SEQ_printf(m, "<%pK>", sym);
68659 else
68660 SEQ_printf(m, "%s", symname);
68661 +#endif
68662 }
68663
68664 static void
68665 @@ -112,7 +116,11 @@ next_one:
68666 static void
68667 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68668 {
68669 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68670 + SEQ_printf(m, " .base: %p\n", NULL);
68671 +#else
68672 SEQ_printf(m, " .base: %pK\n", base);
68673 +#endif
68674 SEQ_printf(m, " .index: %d\n",
68675 base->index);
68676 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68677 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68678 {
68679 struct proc_dir_entry *pe;
68680
68681 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68682 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68683 +#else
68684 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68685 +#endif
68686 if (!pe)
68687 return -ENOMEM;
68688 return 0;
68689 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68690 index 0b537f2..9e71eca 100644
68691 --- a/kernel/time/timer_stats.c
68692 +++ b/kernel/time/timer_stats.c
68693 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68694 static unsigned long nr_entries;
68695 static struct entry entries[MAX_ENTRIES];
68696
68697 -static atomic_t overflow_count;
68698 +static atomic_unchecked_t overflow_count;
68699
68700 /*
68701 * The entries are in a hash-table, for fast lookup:
68702 @@ -140,7 +140,7 @@ static void reset_entries(void)
68703 nr_entries = 0;
68704 memset(entries, 0, sizeof(entries));
68705 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68706 - atomic_set(&overflow_count, 0);
68707 + atomic_set_unchecked(&overflow_count, 0);
68708 }
68709
68710 static struct entry *alloc_entry(void)
68711 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68712 if (likely(entry))
68713 entry->count++;
68714 else
68715 - atomic_inc(&overflow_count);
68716 + atomic_inc_unchecked(&overflow_count);
68717
68718 out_unlock:
68719 raw_spin_unlock_irqrestore(lock, flags);
68720 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68721
68722 static void print_name_offset(struct seq_file *m, unsigned long addr)
68723 {
68724 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68725 + seq_printf(m, "<%p>", NULL);
68726 +#else
68727 char symname[KSYM_NAME_LEN];
68728
68729 if (lookup_symbol_name(addr, symname) < 0)
68730 seq_printf(m, "<%p>", (void *)addr);
68731 else
68732 seq_printf(m, "%s", symname);
68733 +#endif
68734 }
68735
68736 static int tstats_show(struct seq_file *m, void *v)
68737 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68738
68739 seq_puts(m, "Timer Stats Version: v0.2\n");
68740 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68741 - if (atomic_read(&overflow_count))
68742 + if (atomic_read_unchecked(&overflow_count))
68743 seq_printf(m, "Overflow: %d entries\n",
68744 - atomic_read(&overflow_count));
68745 + atomic_read_unchecked(&overflow_count));
68746
68747 for (i = 0; i < nr_entries; i++) {
68748 entry = entries + i;
68749 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68750 {
68751 struct proc_dir_entry *pe;
68752
68753 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68754 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68755 +#else
68756 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68757 +#endif
68758 if (!pe)
68759 return -ENOMEM;
68760 return 0;
68761 diff --git a/kernel/timer.c b/kernel/timer.c
68762 index a297ffc..5e16b0b 100644
68763 --- a/kernel/timer.c
68764 +++ b/kernel/timer.c
68765 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
68766 /*
68767 * This function runs timers and the timer-tq in bottom half context.
68768 */
68769 -static void run_timer_softirq(struct softirq_action *h)
68770 +static void run_timer_softirq(void)
68771 {
68772 struct tvec_base *base = __this_cpu_read(tvec_bases);
68773
68774 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68775 index c0bd030..62a1927 100644
68776 --- a/kernel/trace/blktrace.c
68777 +++ b/kernel/trace/blktrace.c
68778 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68779 struct blk_trace *bt = filp->private_data;
68780 char buf[16];
68781
68782 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68783 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68784
68785 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68786 }
68787 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68788 return 1;
68789
68790 bt = buf->chan->private_data;
68791 - atomic_inc(&bt->dropped);
68792 + atomic_inc_unchecked(&bt->dropped);
68793 return 0;
68794 }
68795
68796 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68797
68798 bt->dir = dir;
68799 bt->dev = dev;
68800 - atomic_set(&bt->dropped, 0);
68801 + atomic_set_unchecked(&bt->dropped, 0);
68802
68803 ret = -EIO;
68804 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68805 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68806 index 0fa92f6..89950b2 100644
68807 --- a/kernel/trace/ftrace.c
68808 +++ b/kernel/trace/ftrace.c
68809 @@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68810 if (unlikely(ftrace_disabled))
68811 return 0;
68812
68813 + ret = ftrace_arch_code_modify_prepare();
68814 + FTRACE_WARN_ON(ret);
68815 + if (ret)
68816 + return 0;
68817 +
68818 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68819 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68820 if (ret) {
68821 ftrace_bug(ret, ip);
68822 - return 0;
68823 }
68824 - return 1;
68825 + return ret ? 0 : 1;
68826 }
68827
68828 /*
68829 @@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68830
68831 int
68832 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68833 - void *data)
68834 + void *data)
68835 {
68836 struct ftrace_func_probe *entry;
68837 struct ftrace_page *pg;
68838 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68839 index 2a22255..cdcdd06 100644
68840 --- a/kernel/trace/trace.c
68841 +++ b/kernel/trace/trace.c
68842 @@ -4312,10 +4312,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68843 };
68844 #endif
68845
68846 -static struct dentry *d_tracer;
68847 -
68848 struct dentry *tracing_init_dentry(void)
68849 {
68850 + static struct dentry *d_tracer;
68851 static int once;
68852
68853 if (d_tracer)
68854 @@ -4335,10 +4334,9 @@ struct dentry *tracing_init_dentry(void)
68855 return d_tracer;
68856 }
68857
68858 -static struct dentry *d_percpu;
68859 -
68860 struct dentry *tracing_dentry_percpu(void)
68861 {
68862 + static struct dentry *d_percpu;
68863 static int once;
68864 struct dentry *d_tracer;
68865
68866 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68867 index 29111da..d190fe2 100644
68868 --- a/kernel/trace/trace_events.c
68869 +++ b/kernel/trace/trace_events.c
68870 @@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
68871 struct ftrace_module_file_ops {
68872 struct list_head list;
68873 struct module *mod;
68874 - struct file_operations id;
68875 - struct file_operations enable;
68876 - struct file_operations format;
68877 - struct file_operations filter;
68878 };
68879
68880 static struct ftrace_module_file_ops *
68881 @@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
68882
68883 file_ops->mod = mod;
68884
68885 - file_ops->id = ftrace_event_id_fops;
68886 - file_ops->id.owner = mod;
68887 -
68888 - file_ops->enable = ftrace_enable_fops;
68889 - file_ops->enable.owner = mod;
68890 -
68891 - file_ops->filter = ftrace_event_filter_fops;
68892 - file_ops->filter.owner = mod;
68893 -
68894 - file_ops->format = ftrace_event_format_fops;
68895 - file_ops->format.owner = mod;
68896 + pax_open_kernel();
68897 + *(void **)&mod->trace_id.owner = mod;
68898 + *(void **)&mod->trace_enable.owner = mod;
68899 + *(void **)&mod->trace_filter.owner = mod;
68900 + *(void **)&mod->trace_format.owner = mod;
68901 + pax_close_kernel();
68902
68903 list_add(&file_ops->list, &ftrace_module_file_list);
68904
68905 @@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
68906
68907 for_each_event(call, start, end) {
68908 __trace_add_event_call(*call, mod,
68909 - &file_ops->id, &file_ops->enable,
68910 - &file_ops->filter, &file_ops->format);
68911 + &mod->trace_id, &mod->trace_enable,
68912 + &mod->trace_filter, &mod->trace_format);
68913 }
68914 }
68915
68916 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
68917 index 580a05e..9b31acb 100644
68918 --- a/kernel/trace/trace_kprobe.c
68919 +++ b/kernel/trace/trace_kprobe.c
68920 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68921 long ret;
68922 int maxlen = get_rloc_len(*(u32 *)dest);
68923 u8 *dst = get_rloc_data(dest);
68924 - u8 *src = addr;
68925 + const u8 __user *src = (const u8 __force_user *)addr;
68926 mm_segment_t old_fs = get_fs();
68927 if (!maxlen)
68928 return;
68929 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68930 pagefault_disable();
68931 do
68932 ret = __copy_from_user_inatomic(dst++, src++, 1);
68933 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
68934 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
68935 dst[-1] = '\0';
68936 pagefault_enable();
68937 set_fs(old_fs);
68938 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68939 ((u8 *)get_rloc_data(dest))[0] = '\0';
68940 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
68941 } else
68942 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
68943 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
68944 get_rloc_offs(*(u32 *)dest));
68945 }
68946 /* Return the length of string -- including null terminal byte */
68947 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
68948 set_fs(KERNEL_DS);
68949 pagefault_disable();
68950 do {
68951 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
68952 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
68953 len++;
68954 } while (c && ret == 0 && len < MAX_STRING_SIZE);
68955 pagefault_enable();
68956 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
68957 index fd3c8aa..5f324a6 100644
68958 --- a/kernel/trace/trace_mmiotrace.c
68959 +++ b/kernel/trace/trace_mmiotrace.c
68960 @@ -24,7 +24,7 @@ struct header_iter {
68961 static struct trace_array *mmio_trace_array;
68962 static bool overrun_detected;
68963 static unsigned long prev_overruns;
68964 -static atomic_t dropped_count;
68965 +static atomic_unchecked_t dropped_count;
68966
68967 static void mmio_reset_data(struct trace_array *tr)
68968 {
68969 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
68970
68971 static unsigned long count_overruns(struct trace_iterator *iter)
68972 {
68973 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
68974 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
68975 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
68976
68977 if (over > prev_overruns)
68978 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
68979 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
68980 sizeof(*entry), 0, pc);
68981 if (!event) {
68982 - atomic_inc(&dropped_count);
68983 + atomic_inc_unchecked(&dropped_count);
68984 return;
68985 }
68986 entry = ring_buffer_event_data(event);
68987 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
68988 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
68989 sizeof(*entry), 0, pc);
68990 if (!event) {
68991 - atomic_inc(&dropped_count);
68992 + atomic_inc_unchecked(&dropped_count);
68993 return;
68994 }
68995 entry = ring_buffer_event_data(event);
68996 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
68997 index df611a0..10d8b32 100644
68998 --- a/kernel/trace/trace_output.c
68999 +++ b/kernel/trace/trace_output.c
69000 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
69001
69002 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69003 if (!IS_ERR(p)) {
69004 - p = mangle_path(s->buffer + s->len, p, "\n");
69005 + p = mangle_path(s->buffer + s->len, p, "\n\\");
69006 if (p) {
69007 s->len = p - s->buffer;
69008 return 1;
69009 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69010 index d4545f4..a9010a1 100644
69011 --- a/kernel/trace/trace_stack.c
69012 +++ b/kernel/trace/trace_stack.c
69013 @@ -53,7 +53,7 @@ static inline void check_stack(void)
69014 return;
69015
69016 /* we do not handle interrupt stacks yet */
69017 - if (!object_is_on_stack(&this_size))
69018 + if (!object_starts_on_stack(&this_size))
69019 return;
69020
69021 local_irq_save(flags);
69022 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69023 index 209b379..7f76423 100644
69024 --- a/kernel/trace/trace_workqueue.c
69025 +++ b/kernel/trace/trace_workqueue.c
69026 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69027 int cpu;
69028 pid_t pid;
69029 /* Can be inserted from interrupt or user context, need to be atomic */
69030 - atomic_t inserted;
69031 + atomic_unchecked_t inserted;
69032 /*
69033 * Don't need to be atomic, works are serialized in a single workqueue thread
69034 * on a single CPU.
69035 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69036 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69037 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69038 if (node->pid == wq_thread->pid) {
69039 - atomic_inc(&node->inserted);
69040 + atomic_inc_unchecked(&node->inserted);
69041 goto found;
69042 }
69043 }
69044 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69045 tsk = get_pid_task(pid, PIDTYPE_PID);
69046 if (tsk) {
69047 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69048 - atomic_read(&cws->inserted), cws->executed,
69049 + atomic_read_unchecked(&cws->inserted), cws->executed,
69050 tsk->comm);
69051 put_task_struct(tsk);
69052 }
69053 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69054 index 6777153..8519f60 100644
69055 --- a/lib/Kconfig.debug
69056 +++ b/lib/Kconfig.debug
69057 @@ -1132,6 +1132,7 @@ config LATENCYTOP
69058 depends on DEBUG_KERNEL
69059 depends on STACKTRACE_SUPPORT
69060 depends on PROC_FS
69061 + depends on !GRKERNSEC_HIDESYM
69062 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69063 select KALLSYMS
69064 select KALLSYMS_ALL
69065 diff --git a/lib/bitmap.c b/lib/bitmap.c
69066 index b5a8b6a..a69623c 100644
69067 --- a/lib/bitmap.c
69068 +++ b/lib/bitmap.c
69069 @@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69070 {
69071 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69072 u32 chunk;
69073 - const char __user __force *ubuf = (const char __user __force *)buf;
69074 + const char __user *ubuf = (const char __force_user *)buf;
69075
69076 bitmap_zero(maskp, nmaskbits);
69077
69078 @@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user *ubuf,
69079 {
69080 if (!access_ok(VERIFY_READ, ubuf, ulen))
69081 return -EFAULT;
69082 - return __bitmap_parse((const char __force *)ubuf,
69083 + return __bitmap_parse((const char __force_kernel *)ubuf,
69084 ulen, 1, maskp, nmaskbits);
69085
69086 }
69087 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69088 {
69089 unsigned a, b;
69090 int c, old_c, totaldigits;
69091 - const char __user __force *ubuf = (const char __user __force *)buf;
69092 + const char __user *ubuf = (const char __force_user *)buf;
69093 int exp_digit, in_range;
69094
69095 totaldigits = c = 0;
69096 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69097 {
69098 if (!access_ok(VERIFY_READ, ubuf, ulen))
69099 return -EFAULT;
69100 - return __bitmap_parselist((const char __force *)ubuf,
69101 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69102 ulen, 1, maskp, nmaskbits);
69103 }
69104 EXPORT_SYMBOL(bitmap_parselist_user);
69105 diff --git a/lib/bug.c b/lib/bug.c
69106 index a28c141..2bd3d95 100644
69107 --- a/lib/bug.c
69108 +++ b/lib/bug.c
69109 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69110 return BUG_TRAP_TYPE_NONE;
69111
69112 bug = find_bug(bugaddr);
69113 + if (!bug)
69114 + return BUG_TRAP_TYPE_NONE;
69115
69116 file = NULL;
69117 line = 0;
69118 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69119 index 0ab9ae8..f01ceca 100644
69120 --- a/lib/debugobjects.c
69121 +++ b/lib/debugobjects.c
69122 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69123 if (limit > 4)
69124 return;
69125
69126 - is_on_stack = object_is_on_stack(addr);
69127 + is_on_stack = object_starts_on_stack(addr);
69128 if (is_on_stack == onstack)
69129 return;
69130
69131 diff --git a/lib/devres.c b/lib/devres.c
69132 index 80b9c76..9e32279 100644
69133 --- a/lib/devres.c
69134 +++ b/lib/devres.c
69135 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69136 void devm_iounmap(struct device *dev, void __iomem *addr)
69137 {
69138 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69139 - (void *)addr));
69140 + (void __force *)addr));
69141 iounmap(addr);
69142 }
69143 EXPORT_SYMBOL(devm_iounmap);
69144 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69145 {
69146 ioport_unmap(addr);
69147 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69148 - devm_ioport_map_match, (void *)addr));
69149 + devm_ioport_map_match, (void __force *)addr));
69150 }
69151 EXPORT_SYMBOL(devm_ioport_unmap);
69152
69153 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69154 index 13ef233..5241683 100644
69155 --- a/lib/dma-debug.c
69156 +++ b/lib/dma-debug.c
69157 @@ -924,7 +924,7 @@ out:
69158
69159 static void check_for_stack(struct device *dev, void *addr)
69160 {
69161 - if (object_is_on_stack(addr))
69162 + if (object_starts_on_stack(addr))
69163 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69164 "stack [addr=%p]\n", addr);
69165 }
69166 diff --git a/lib/extable.c b/lib/extable.c
69167 index 4cac81e..63e9b8f 100644
69168 --- a/lib/extable.c
69169 +++ b/lib/extable.c
69170 @@ -13,6 +13,7 @@
69171 #include <linux/init.h>
69172 #include <linux/sort.h>
69173 #include <asm/uaccess.h>
69174 +#include <asm/pgtable.h>
69175
69176 #ifndef ARCH_HAS_SORT_EXTABLE
69177 /*
69178 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69179 void sort_extable(struct exception_table_entry *start,
69180 struct exception_table_entry *finish)
69181 {
69182 + pax_open_kernel();
69183 sort(start, finish - start, sizeof(struct exception_table_entry),
69184 cmp_ex, NULL);
69185 + pax_close_kernel();
69186 }
69187
69188 #ifdef CONFIG_MODULES
69189 diff --git a/lib/inflate.c b/lib/inflate.c
69190 index 013a761..c28f3fc 100644
69191 --- a/lib/inflate.c
69192 +++ b/lib/inflate.c
69193 @@ -269,7 +269,7 @@ static void free(void *where)
69194 malloc_ptr = free_mem_ptr;
69195 }
69196 #else
69197 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69198 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69199 #define free(a) kfree(a)
69200 #endif
69201
69202 diff --git a/lib/ioremap.c b/lib/ioremap.c
69203 index 0c9216c..863bd89 100644
69204 --- a/lib/ioremap.c
69205 +++ b/lib/ioremap.c
69206 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
69207 unsigned long next;
69208
69209 phys_addr -= addr;
69210 - pmd = pmd_alloc(&init_mm, pud, addr);
69211 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
69212 if (!pmd)
69213 return -ENOMEM;
69214 do {
69215 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
69216 unsigned long next;
69217
69218 phys_addr -= addr;
69219 - pud = pud_alloc(&init_mm, pgd, addr);
69220 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
69221 if (!pud)
69222 return -ENOMEM;
69223 do {
69224 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69225 index bd2bea9..6b3c95e 100644
69226 --- a/lib/is_single_threaded.c
69227 +++ b/lib/is_single_threaded.c
69228 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69229 struct task_struct *p, *t;
69230 bool ret;
69231
69232 + if (!mm)
69233 + return true;
69234 +
69235 if (atomic_read(&task->signal->live) != 1)
69236 return false;
69237
69238 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69239 index 3ac50dc..240bb7e 100644
69240 --- a/lib/radix-tree.c
69241 +++ b/lib/radix-tree.c
69242 @@ -79,7 +79,7 @@ struct radix_tree_preload {
69243 int nr;
69244 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69245 };
69246 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69247 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69248
69249 static inline void *ptr_to_indirect(void *ptr)
69250 {
69251 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69252 index abbabec..362988d 100644
69253 --- a/lib/vsprintf.c
69254 +++ b/lib/vsprintf.c
69255 @@ -16,6 +16,9 @@
69256 * - scnprintf and vscnprintf
69257 */
69258
69259 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69260 +#define __INCLUDED_BY_HIDESYM 1
69261 +#endif
69262 #include <stdarg.h>
69263 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
69264 #include <linux/types.h>
69265 @@ -433,7 +436,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69266 char sym[KSYM_SYMBOL_LEN];
69267 if (ext == 'B')
69268 sprint_backtrace(sym, value);
69269 - else if (ext != 'f' && ext != 's')
69270 + else if (ext != 'f' && ext != 's' && ext != 'a')
69271 sprint_symbol(sym, value);
69272 else
69273 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69274 @@ -809,7 +812,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
69275 return number(buf, end, *(const netdev_features_t *)addr, spec);
69276 }
69277
69278 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69279 +int kptr_restrict __read_mostly = 2;
69280 +#else
69281 int kptr_restrict __read_mostly;
69282 +#endif
69283
69284 /*
69285 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69286 @@ -823,6 +830,8 @@ int kptr_restrict __read_mostly;
69287 * - 'S' For symbolic direct pointers with offset
69288 * - 's' For symbolic direct pointers without offset
69289 * - 'B' For backtraced symbolic direct pointers with offset
69290 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69291 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69292 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69293 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69294 * - 'M' For a 6-byte MAC address, it prints the address in the
69295 @@ -868,12 +877,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69296 {
69297 if (!ptr && *fmt != 'K') {
69298 /*
69299 - * Print (null) with the same width as a pointer so it makes
69300 + * Print (nil) with the same width as a pointer so it makes
69301 * tabular output look nice.
69302 */
69303 if (spec.field_width == -1)
69304 spec.field_width = 2 * sizeof(void *);
69305 - return string(buf, end, "(null)", spec);
69306 + return string(buf, end, "(nil)", spec);
69307 }
69308
69309 switch (*fmt) {
69310 @@ -883,6 +892,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69311 /* Fallthrough */
69312 case 'S':
69313 case 's':
69314 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69315 + break;
69316 +#else
69317 + return symbol_string(buf, end, ptr, spec, *fmt);
69318 +#endif
69319 + case 'A':
69320 + case 'a':
69321 case 'B':
69322 return symbol_string(buf, end, ptr, spec, *fmt);
69323 case 'R':
69324 @@ -1653,11 +1669,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69325 typeof(type) value; \
69326 if (sizeof(type) == 8) { \
69327 args = PTR_ALIGN(args, sizeof(u32)); \
69328 - *(u32 *)&value = *(u32 *)args; \
69329 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69330 + *(u32 *)&value = *(const u32 *)args; \
69331 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69332 } else { \
69333 args = PTR_ALIGN(args, sizeof(type)); \
69334 - value = *(typeof(type) *)args; \
69335 + value = *(const typeof(type) *)args; \
69336 } \
69337 args += sizeof(type); \
69338 value; \
69339 @@ -1720,7 +1736,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69340 case FORMAT_TYPE_STR: {
69341 const char *str_arg = args;
69342 args += strlen(str_arg) + 1;
69343 - str = string(str, end, (char *)str_arg, spec);
69344 + str = string(str, end, str_arg, spec);
69345 break;
69346 }
69347
69348 diff --git a/localversion-grsec b/localversion-grsec
69349 new file mode 100644
69350 index 0000000..7cd6065
69351 --- /dev/null
69352 +++ b/localversion-grsec
69353 @@ -0,0 +1 @@
69354 +-grsec
69355 diff --git a/mm/Kconfig b/mm/Kconfig
69356 index e338407..4210331 100644
69357 --- a/mm/Kconfig
69358 +++ b/mm/Kconfig
69359 @@ -247,10 +247,10 @@ config KSM
69360 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69361
69362 config DEFAULT_MMAP_MIN_ADDR
69363 - int "Low address space to protect from user allocation"
69364 + int "Low address space to protect from user allocation"
69365 depends on MMU
69366 - default 4096
69367 - help
69368 + default 65536
69369 + help
69370 This is the portion of low virtual memory which should be protected
69371 from userspace allocation. Keeping a user from writing to low pages
69372 can help reduce the impact of kernel NULL pointer bugs.
69373 @@ -280,7 +280,7 @@ config MEMORY_FAILURE
69374
69375 config HWPOISON_INJECT
69376 tristate "HWPoison pages injector"
69377 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
69378 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
69379 select PROC_PAGE_MONITOR
69380
69381 config NOMMU_INITIAL_TRIM_EXCESS
69382 diff --git a/mm/filemap.c b/mm/filemap.c
69383 index 79c4b2b..596b417 100644
69384 --- a/mm/filemap.c
69385 +++ b/mm/filemap.c
69386 @@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69387 struct address_space *mapping = file->f_mapping;
69388
69389 if (!mapping->a_ops->readpage)
69390 - return -ENOEXEC;
69391 + return -ENODEV;
69392 file_accessed(file);
69393 vma->vm_ops = &generic_file_vm_ops;
69394 vma->vm_flags |= VM_CAN_NONLINEAR;
69395 @@ -2168,6 +2168,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69396 *pos = i_size_read(inode);
69397
69398 if (limit != RLIM_INFINITY) {
69399 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69400 if (*pos >= limit) {
69401 send_sig(SIGXFSZ, current, 0);
69402 return -EFBIG;
69403 diff --git a/mm/fremap.c b/mm/fremap.c
69404 index 9ed4fd4..c42648d 100644
69405 --- a/mm/fremap.c
69406 +++ b/mm/fremap.c
69407 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69408 retry:
69409 vma = find_vma(mm, start);
69410
69411 +#ifdef CONFIG_PAX_SEGMEXEC
69412 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69413 + goto out;
69414 +#endif
69415 +
69416 /*
69417 * Make sure the vma is shared, that it supports prefaulting,
69418 * and that the remapped range is valid and fully within
69419 diff --git a/mm/highmem.c b/mm/highmem.c
69420 index 57d82c6..e9e0552 100644
69421 --- a/mm/highmem.c
69422 +++ b/mm/highmem.c
69423 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69424 * So no dangers, even with speculative execution.
69425 */
69426 page = pte_page(pkmap_page_table[i]);
69427 + pax_open_kernel();
69428 pte_clear(&init_mm, (unsigned long)page_address(page),
69429 &pkmap_page_table[i]);
69430 -
69431 + pax_close_kernel();
69432 set_page_address(page, NULL);
69433 need_flush = 1;
69434 }
69435 @@ -186,9 +187,11 @@ start:
69436 }
69437 }
69438 vaddr = PKMAP_ADDR(last_pkmap_nr);
69439 +
69440 + pax_open_kernel();
69441 set_pte_at(&init_mm, vaddr,
69442 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69443 -
69444 + pax_close_kernel();
69445 pkmap_count[last_pkmap_nr] = 1;
69446 set_page_address(page, (void *)vaddr);
69447
69448 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69449 index f0e5306..cb9398e 100644
69450 --- a/mm/huge_memory.c
69451 +++ b/mm/huge_memory.c
69452 @@ -733,7 +733,7 @@ out:
69453 * run pte_offset_map on the pmd, if an huge pmd could
69454 * materialize from under us from a different thread.
69455 */
69456 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69457 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69458 return VM_FAULT_OOM;
69459 /* if an huge pmd materialized from under us just retry later */
69460 if (unlikely(pmd_trans_huge(*pmd)))
69461 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69462 index 263e177..3f36aec 100644
69463 --- a/mm/hugetlb.c
69464 +++ b/mm/hugetlb.c
69465 @@ -2446,6 +2446,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69466 return 1;
69467 }
69468
69469 +#ifdef CONFIG_PAX_SEGMEXEC
69470 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69471 +{
69472 + struct mm_struct *mm = vma->vm_mm;
69473 + struct vm_area_struct *vma_m;
69474 + unsigned long address_m;
69475 + pte_t *ptep_m;
69476 +
69477 + vma_m = pax_find_mirror_vma(vma);
69478 + if (!vma_m)
69479 + return;
69480 +
69481 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69482 + address_m = address + SEGMEXEC_TASK_SIZE;
69483 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69484 + get_page(page_m);
69485 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
69486 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69487 +}
69488 +#endif
69489 +
69490 /*
69491 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69492 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
69493 @@ -2558,6 +2579,11 @@ retry_avoidcopy:
69494 make_huge_pte(vma, new_page, 1));
69495 page_remove_rmap(old_page);
69496 hugepage_add_new_anon_rmap(new_page, vma, address);
69497 +
69498 +#ifdef CONFIG_PAX_SEGMEXEC
69499 + pax_mirror_huge_pte(vma, address, new_page);
69500 +#endif
69501 +
69502 /* Make the old page be freed below */
69503 new_page = old_page;
69504 mmu_notifier_invalidate_range_end(mm,
69505 @@ -2712,6 +2738,10 @@ retry:
69506 && (vma->vm_flags & VM_SHARED)));
69507 set_huge_pte_at(mm, address, ptep, new_pte);
69508
69509 +#ifdef CONFIG_PAX_SEGMEXEC
69510 + pax_mirror_huge_pte(vma, address, page);
69511 +#endif
69512 +
69513 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69514 /* Optimization, do the COW without a second fault */
69515 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69516 @@ -2741,6 +2771,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69517 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69518 struct hstate *h = hstate_vma(vma);
69519
69520 +#ifdef CONFIG_PAX_SEGMEXEC
69521 + struct vm_area_struct *vma_m;
69522 +#endif
69523 +
69524 address &= huge_page_mask(h);
69525
69526 ptep = huge_pte_offset(mm, address);
69527 @@ -2754,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69528 VM_FAULT_SET_HINDEX(h - hstates);
69529 }
69530
69531 +#ifdef CONFIG_PAX_SEGMEXEC
69532 + vma_m = pax_find_mirror_vma(vma);
69533 + if (vma_m) {
69534 + unsigned long address_m;
69535 +
69536 + if (vma->vm_start > vma_m->vm_start) {
69537 + address_m = address;
69538 + address -= SEGMEXEC_TASK_SIZE;
69539 + vma = vma_m;
69540 + h = hstate_vma(vma);
69541 + } else
69542 + address_m = address + SEGMEXEC_TASK_SIZE;
69543 +
69544 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69545 + return VM_FAULT_OOM;
69546 + address_m &= HPAGE_MASK;
69547 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69548 + }
69549 +#endif
69550 +
69551 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69552 if (!ptep)
69553 return VM_FAULT_OOM;
69554 diff --git a/mm/internal.h b/mm/internal.h
69555 index 2189af4..f2ca332 100644
69556 --- a/mm/internal.h
69557 +++ b/mm/internal.h
69558 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69559 * in mm/page_alloc.c
69560 */
69561 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69562 +extern void free_compound_page(struct page *page);
69563 extern void prep_compound_page(struct page *page, unsigned long order);
69564 #ifdef CONFIG_MEMORY_FAILURE
69565 extern bool is_free_buddy_page(struct page *page);
69566 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69567 index 45eb621..6ccd8ea 100644
69568 --- a/mm/kmemleak.c
69569 +++ b/mm/kmemleak.c
69570 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
69571
69572 for (i = 0; i < object->trace_len; i++) {
69573 void *ptr = (void *)object->trace[i];
69574 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69575 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69576 }
69577 }
69578
69579 diff --git a/mm/maccess.c b/mm/maccess.c
69580 index d53adf9..03a24bf 100644
69581 --- a/mm/maccess.c
69582 +++ b/mm/maccess.c
69583 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69584 set_fs(KERNEL_DS);
69585 pagefault_disable();
69586 ret = __copy_from_user_inatomic(dst,
69587 - (__force const void __user *)src, size);
69588 + (const void __force_user *)src, size);
69589 pagefault_enable();
69590 set_fs(old_fs);
69591
69592 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69593
69594 set_fs(KERNEL_DS);
69595 pagefault_disable();
69596 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69597 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69598 pagefault_enable();
69599 set_fs(old_fs);
69600
69601 diff --git a/mm/madvise.c b/mm/madvise.c
69602 index 1ccbba5..79e16f9 100644
69603 --- a/mm/madvise.c
69604 +++ b/mm/madvise.c
69605 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69606 pgoff_t pgoff;
69607 unsigned long new_flags = vma->vm_flags;
69608
69609 +#ifdef CONFIG_PAX_SEGMEXEC
69610 + struct vm_area_struct *vma_m;
69611 +#endif
69612 +
69613 switch (behavior) {
69614 case MADV_NORMAL:
69615 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69616 @@ -116,6 +120,13 @@ success:
69617 /*
69618 * vm_flags is protected by the mmap_sem held in write mode.
69619 */
69620 +
69621 +#ifdef CONFIG_PAX_SEGMEXEC
69622 + vma_m = pax_find_mirror_vma(vma);
69623 + if (vma_m)
69624 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69625 +#endif
69626 +
69627 vma->vm_flags = new_flags;
69628
69629 out:
69630 @@ -174,6 +185,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69631 struct vm_area_struct ** prev,
69632 unsigned long start, unsigned long end)
69633 {
69634 +
69635 +#ifdef CONFIG_PAX_SEGMEXEC
69636 + struct vm_area_struct *vma_m;
69637 +#endif
69638 +
69639 *prev = vma;
69640 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69641 return -EINVAL;
69642 @@ -186,6 +202,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69643 zap_page_range(vma, start, end - start, &details);
69644 } else
69645 zap_page_range(vma, start, end - start, NULL);
69646 +
69647 +#ifdef CONFIG_PAX_SEGMEXEC
69648 + vma_m = pax_find_mirror_vma(vma);
69649 + if (vma_m) {
69650 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69651 + struct zap_details details = {
69652 + .nonlinear_vma = vma_m,
69653 + .last_index = ULONG_MAX,
69654 + };
69655 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69656 + } else
69657 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69658 + }
69659 +#endif
69660 +
69661 return 0;
69662 }
69663
69664 @@ -384,6 +415,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69665 if (end < start)
69666 goto out;
69667
69668 +#ifdef CONFIG_PAX_SEGMEXEC
69669 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69670 + if (end > SEGMEXEC_TASK_SIZE)
69671 + goto out;
69672 + } else
69673 +#endif
69674 +
69675 + if (end > TASK_SIZE)
69676 + goto out;
69677 +
69678 error = 0;
69679 if (end == start)
69680 goto out;
69681 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69682 index 97cc273..6ed703f 100644
69683 --- a/mm/memory-failure.c
69684 +++ b/mm/memory-failure.c
69685 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69686
69687 int sysctl_memory_failure_recovery __read_mostly = 1;
69688
69689 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69690 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69691
69692 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69693
69694 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
69695 pfn, t->comm, t->pid);
69696 si.si_signo = SIGBUS;
69697 si.si_errno = 0;
69698 - si.si_addr = (void *)addr;
69699 + si.si_addr = (void __user *)addr;
69700 #ifdef __ARCH_SI_TRAPNO
69701 si.si_trapno = trapno;
69702 #endif
69703 @@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69704 }
69705
69706 nr_pages = 1 << compound_trans_order(hpage);
69707 - atomic_long_add(nr_pages, &mce_bad_pages);
69708 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69709
69710 /*
69711 * We need/can do nothing about count=0 pages.
69712 @@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69713 if (!PageHWPoison(hpage)
69714 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69715 || (p != hpage && TestSetPageHWPoison(hpage))) {
69716 - atomic_long_sub(nr_pages, &mce_bad_pages);
69717 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69718 return 0;
69719 }
69720 set_page_hwpoison_huge_page(hpage);
69721 @@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69722 }
69723 if (hwpoison_filter(p)) {
69724 if (TestClearPageHWPoison(p))
69725 - atomic_long_sub(nr_pages, &mce_bad_pages);
69726 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69727 unlock_page(hpage);
69728 put_page(hpage);
69729 return 0;
69730 @@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
69731 return 0;
69732 }
69733 if (TestClearPageHWPoison(p))
69734 - atomic_long_sub(nr_pages, &mce_bad_pages);
69735 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69736 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69737 return 0;
69738 }
69739 @@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
69740 */
69741 if (TestClearPageHWPoison(page)) {
69742 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69743 - atomic_long_sub(nr_pages, &mce_bad_pages);
69744 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69745 freeit = 1;
69746 if (PageHuge(page))
69747 clear_page_hwpoison_huge_page(page);
69748 @@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69749 }
69750 done:
69751 if (!PageHWPoison(hpage))
69752 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69753 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69754 set_page_hwpoison_huge_page(hpage);
69755 dequeue_hwpoisoned_huge_page(hpage);
69756 /* keep elevated page count for bad page */
69757 @@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page, int flags)
69758 return ret;
69759
69760 done:
69761 - atomic_long_add(1, &mce_bad_pages);
69762 + atomic_long_add_unchecked(1, &mce_bad_pages);
69763 SetPageHWPoison(page);
69764 /* keep elevated page count for bad page */
69765 return ret;
69766 diff --git a/mm/memory.c b/mm/memory.c
69767 index 6105f47..3363489 100644
69768 --- a/mm/memory.c
69769 +++ b/mm/memory.c
69770 @@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69771 return;
69772
69773 pmd = pmd_offset(pud, start);
69774 +
69775 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69776 pud_clear(pud);
69777 pmd_free_tlb(tlb, pmd, start);
69778 +#endif
69779 +
69780 }
69781
69782 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69783 @@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69784 if (end - 1 > ceiling - 1)
69785 return;
69786
69787 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69788 pud = pud_offset(pgd, start);
69789 pgd_clear(pgd);
69790 pud_free_tlb(tlb, pud, start);
69791 +#endif
69792 +
69793 }
69794
69795 /*
69796 @@ -1597,12 +1604,6 @@ no_page_table:
69797 return page;
69798 }
69799
69800 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69801 -{
69802 - return stack_guard_page_start(vma, addr) ||
69803 - stack_guard_page_end(vma, addr+PAGE_SIZE);
69804 -}
69805 -
69806 /**
69807 * __get_user_pages() - pin user pages in memory
69808 * @tsk: task_struct of target task
69809 @@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69810 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69811 i = 0;
69812
69813 - do {
69814 + while (nr_pages) {
69815 struct vm_area_struct *vma;
69816
69817 - vma = find_extend_vma(mm, start);
69818 + vma = find_vma(mm, start);
69819 if (!vma && in_gate_area(mm, start)) {
69820 unsigned long pg = start & PAGE_MASK;
69821 pgd_t *pgd;
69822 @@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69823 goto next_page;
69824 }
69825
69826 - if (!vma ||
69827 + if (!vma || start < vma->vm_start ||
69828 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69829 !(vm_flags & vma->vm_flags))
69830 return i ? : -EFAULT;
69831 @@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69832 int ret;
69833 unsigned int fault_flags = 0;
69834
69835 - /* For mlock, just skip the stack guard page. */
69836 - if (foll_flags & FOLL_MLOCK) {
69837 - if (stack_guard_page(vma, start))
69838 - goto next_page;
69839 - }
69840 if (foll_flags & FOLL_WRITE)
69841 fault_flags |= FAULT_FLAG_WRITE;
69842 if (nonblocking)
69843 @@ -1831,7 +1827,7 @@ next_page:
69844 start += PAGE_SIZE;
69845 nr_pages--;
69846 } while (nr_pages && start < vma->vm_end);
69847 - } while (nr_pages);
69848 + }
69849 return i;
69850 }
69851 EXPORT_SYMBOL(__get_user_pages);
69852 @@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69853 page_add_file_rmap(page);
69854 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69855
69856 +#ifdef CONFIG_PAX_SEGMEXEC
69857 + pax_mirror_file_pte(vma, addr, page, ptl);
69858 +#endif
69859 +
69860 retval = 0;
69861 pte_unmap_unlock(pte, ptl);
69862 return retval;
69863 @@ -2072,10 +2072,22 @@ out:
69864 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69865 struct page *page)
69866 {
69867 +
69868 +#ifdef CONFIG_PAX_SEGMEXEC
69869 + struct vm_area_struct *vma_m;
69870 +#endif
69871 +
69872 if (addr < vma->vm_start || addr >= vma->vm_end)
69873 return -EFAULT;
69874 if (!page_count(page))
69875 return -EINVAL;
69876 +
69877 +#ifdef CONFIG_PAX_SEGMEXEC
69878 + vma_m = pax_find_mirror_vma(vma);
69879 + if (vma_m)
69880 + vma_m->vm_flags |= VM_INSERTPAGE;
69881 +#endif
69882 +
69883 vma->vm_flags |= VM_INSERTPAGE;
69884 return insert_page(vma, addr, page, vma->vm_page_prot);
69885 }
69886 @@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
69887 unsigned long pfn)
69888 {
69889 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
69890 + BUG_ON(vma->vm_mirror);
69891
69892 if (addr < vma->vm_start || addr >= vma->vm_end)
69893 return -EFAULT;
69894 @@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
69895
69896 BUG_ON(pud_huge(*pud));
69897
69898 - pmd = pmd_alloc(mm, pud, addr);
69899 + pmd = (mm == &init_mm) ?
69900 + pmd_alloc_kernel(mm, pud, addr) :
69901 + pmd_alloc(mm, pud, addr);
69902 if (!pmd)
69903 return -ENOMEM;
69904 do {
69905 @@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
69906 unsigned long next;
69907 int err;
69908
69909 - pud = pud_alloc(mm, pgd, addr);
69910 + pud = (mm == &init_mm) ?
69911 + pud_alloc_kernel(mm, pgd, addr) :
69912 + pud_alloc(mm, pgd, addr);
69913 if (!pud)
69914 return -ENOMEM;
69915 do {
69916 @@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
69917 copy_user_highpage(dst, src, va, vma);
69918 }
69919
69920 +#ifdef CONFIG_PAX_SEGMEXEC
69921 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
69922 +{
69923 + struct mm_struct *mm = vma->vm_mm;
69924 + spinlock_t *ptl;
69925 + pte_t *pte, entry;
69926 +
69927 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
69928 + entry = *pte;
69929 + if (!pte_present(entry)) {
69930 + if (!pte_none(entry)) {
69931 + BUG_ON(pte_file(entry));
69932 + free_swap_and_cache(pte_to_swp_entry(entry));
69933 + pte_clear_not_present_full(mm, address, pte, 0);
69934 + }
69935 + } else {
69936 + struct page *page;
69937 +
69938 + flush_cache_page(vma, address, pte_pfn(entry));
69939 + entry = ptep_clear_flush(vma, address, pte);
69940 + BUG_ON(pte_dirty(entry));
69941 + page = vm_normal_page(vma, address, entry);
69942 + if (page) {
69943 + update_hiwater_rss(mm);
69944 + if (PageAnon(page))
69945 + dec_mm_counter_fast(mm, MM_ANONPAGES);
69946 + else
69947 + dec_mm_counter_fast(mm, MM_FILEPAGES);
69948 + page_remove_rmap(page);
69949 + page_cache_release(page);
69950 + }
69951 + }
69952 + pte_unmap_unlock(pte, ptl);
69953 +}
69954 +
69955 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
69956 + *
69957 + * the ptl of the lower mapped page is held on entry and is not released on exit
69958 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
69959 + */
69960 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69961 +{
69962 + struct mm_struct *mm = vma->vm_mm;
69963 + unsigned long address_m;
69964 + spinlock_t *ptl_m;
69965 + struct vm_area_struct *vma_m;
69966 + pmd_t *pmd_m;
69967 + pte_t *pte_m, entry_m;
69968 +
69969 + BUG_ON(!page_m || !PageAnon(page_m));
69970 +
69971 + vma_m = pax_find_mirror_vma(vma);
69972 + if (!vma_m)
69973 + return;
69974 +
69975 + BUG_ON(!PageLocked(page_m));
69976 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69977 + address_m = address + SEGMEXEC_TASK_SIZE;
69978 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69979 + pte_m = pte_offset_map(pmd_m, address_m);
69980 + ptl_m = pte_lockptr(mm, pmd_m);
69981 + if (ptl != ptl_m) {
69982 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69983 + if (!pte_none(*pte_m))
69984 + goto out;
69985 + }
69986 +
69987 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69988 + page_cache_get(page_m);
69989 + page_add_anon_rmap(page_m, vma_m, address_m);
69990 + inc_mm_counter_fast(mm, MM_ANONPAGES);
69991 + set_pte_at(mm, address_m, pte_m, entry_m);
69992 + update_mmu_cache(vma_m, address_m, entry_m);
69993 +out:
69994 + if (ptl != ptl_m)
69995 + spin_unlock(ptl_m);
69996 + pte_unmap(pte_m);
69997 + unlock_page(page_m);
69998 +}
69999 +
70000 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70001 +{
70002 + struct mm_struct *mm = vma->vm_mm;
70003 + unsigned long address_m;
70004 + spinlock_t *ptl_m;
70005 + struct vm_area_struct *vma_m;
70006 + pmd_t *pmd_m;
70007 + pte_t *pte_m, entry_m;
70008 +
70009 + BUG_ON(!page_m || PageAnon(page_m));
70010 +
70011 + vma_m = pax_find_mirror_vma(vma);
70012 + if (!vma_m)
70013 + return;
70014 +
70015 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70016 + address_m = address + SEGMEXEC_TASK_SIZE;
70017 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70018 + pte_m = pte_offset_map(pmd_m, address_m);
70019 + ptl_m = pte_lockptr(mm, pmd_m);
70020 + if (ptl != ptl_m) {
70021 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70022 + if (!pte_none(*pte_m))
70023 + goto out;
70024 + }
70025 +
70026 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70027 + page_cache_get(page_m);
70028 + page_add_file_rmap(page_m);
70029 + inc_mm_counter_fast(mm, MM_FILEPAGES);
70030 + set_pte_at(mm, address_m, pte_m, entry_m);
70031 + update_mmu_cache(vma_m, address_m, entry_m);
70032 +out:
70033 + if (ptl != ptl_m)
70034 + spin_unlock(ptl_m);
70035 + pte_unmap(pte_m);
70036 +}
70037 +
70038 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70039 +{
70040 + struct mm_struct *mm = vma->vm_mm;
70041 + unsigned long address_m;
70042 + spinlock_t *ptl_m;
70043 + struct vm_area_struct *vma_m;
70044 + pmd_t *pmd_m;
70045 + pte_t *pte_m, entry_m;
70046 +
70047 + vma_m = pax_find_mirror_vma(vma);
70048 + if (!vma_m)
70049 + return;
70050 +
70051 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70052 + address_m = address + SEGMEXEC_TASK_SIZE;
70053 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70054 + pte_m = pte_offset_map(pmd_m, address_m);
70055 + ptl_m = pte_lockptr(mm, pmd_m);
70056 + if (ptl != ptl_m) {
70057 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70058 + if (!pte_none(*pte_m))
70059 + goto out;
70060 + }
70061 +
70062 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70063 + set_pte_at(mm, address_m, pte_m, entry_m);
70064 +out:
70065 + if (ptl != ptl_m)
70066 + spin_unlock(ptl_m);
70067 + pte_unmap(pte_m);
70068 +}
70069 +
70070 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70071 +{
70072 + struct page *page_m;
70073 + pte_t entry;
70074 +
70075 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70076 + goto out;
70077 +
70078 + entry = *pte;
70079 + page_m = vm_normal_page(vma, address, entry);
70080 + if (!page_m)
70081 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70082 + else if (PageAnon(page_m)) {
70083 + if (pax_find_mirror_vma(vma)) {
70084 + pte_unmap_unlock(pte, ptl);
70085 + lock_page(page_m);
70086 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70087 + if (pte_same(entry, *pte))
70088 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70089 + else
70090 + unlock_page(page_m);
70091 + }
70092 + } else
70093 + pax_mirror_file_pte(vma, address, page_m, ptl);
70094 +
70095 +out:
70096 + pte_unmap_unlock(pte, ptl);
70097 +}
70098 +#endif
70099 +
70100 /*
70101 * This routine handles present pages, when users try to write
70102 * to a shared page. It is done by copying the page to a new address
70103 @@ -2687,6 +2884,12 @@ gotten:
70104 */
70105 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70106 if (likely(pte_same(*page_table, orig_pte))) {
70107 +
70108 +#ifdef CONFIG_PAX_SEGMEXEC
70109 + if (pax_find_mirror_vma(vma))
70110 + BUG_ON(!trylock_page(new_page));
70111 +#endif
70112 +
70113 if (old_page) {
70114 if (!PageAnon(old_page)) {
70115 dec_mm_counter_fast(mm, MM_FILEPAGES);
70116 @@ -2738,6 +2941,10 @@ gotten:
70117 page_remove_rmap(old_page);
70118 }
70119
70120 +#ifdef CONFIG_PAX_SEGMEXEC
70121 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70122 +#endif
70123 +
70124 /* Free the old page.. */
70125 new_page = old_page;
70126 ret |= VM_FAULT_WRITE;
70127 @@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70128 swap_free(entry);
70129 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70130 try_to_free_swap(page);
70131 +
70132 +#ifdef CONFIG_PAX_SEGMEXEC
70133 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70134 +#endif
70135 +
70136 unlock_page(page);
70137 if (swapcache) {
70138 /*
70139 @@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70140
70141 /* No need to invalidate - it was non-present before */
70142 update_mmu_cache(vma, address, page_table);
70143 +
70144 +#ifdef CONFIG_PAX_SEGMEXEC
70145 + pax_mirror_anon_pte(vma, address, page, ptl);
70146 +#endif
70147 +
70148 unlock:
70149 pte_unmap_unlock(page_table, ptl);
70150 out:
70151 @@ -3059,40 +3276,6 @@ out_release:
70152 }
70153
70154 /*
70155 - * This is like a special single-page "expand_{down|up}wards()",
70156 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70157 - * doesn't hit another vma.
70158 - */
70159 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70160 -{
70161 - address &= PAGE_MASK;
70162 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70163 - struct vm_area_struct *prev = vma->vm_prev;
70164 -
70165 - /*
70166 - * Is there a mapping abutting this one below?
70167 - *
70168 - * That's only ok if it's the same stack mapping
70169 - * that has gotten split..
70170 - */
70171 - if (prev && prev->vm_end == address)
70172 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70173 -
70174 - expand_downwards(vma, address - PAGE_SIZE);
70175 - }
70176 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70177 - struct vm_area_struct *next = vma->vm_next;
70178 -
70179 - /* As VM_GROWSDOWN but s/below/above/ */
70180 - if (next && next->vm_start == address + PAGE_SIZE)
70181 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70182 -
70183 - expand_upwards(vma, address + PAGE_SIZE);
70184 - }
70185 - return 0;
70186 -}
70187 -
70188 -/*
70189 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70190 * but allow concurrent faults), and pte mapped but not yet locked.
70191 * We return with mmap_sem still held, but pte unmapped and unlocked.
70192 @@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70193 unsigned long address, pte_t *page_table, pmd_t *pmd,
70194 unsigned int flags)
70195 {
70196 - struct page *page;
70197 + struct page *page = NULL;
70198 spinlock_t *ptl;
70199 pte_t entry;
70200
70201 - pte_unmap(page_table);
70202 -
70203 - /* Check if we need to add a guard page to the stack */
70204 - if (check_stack_guard_page(vma, address) < 0)
70205 - return VM_FAULT_SIGBUS;
70206 -
70207 - /* Use the zero-page for reads */
70208 if (!(flags & FAULT_FLAG_WRITE)) {
70209 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70210 vma->vm_page_prot));
70211 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70212 + ptl = pte_lockptr(mm, pmd);
70213 + spin_lock(ptl);
70214 if (!pte_none(*page_table))
70215 goto unlock;
70216 goto setpte;
70217 }
70218
70219 /* Allocate our own private page. */
70220 + pte_unmap(page_table);
70221 +
70222 if (unlikely(anon_vma_prepare(vma)))
70223 goto oom;
70224 page = alloc_zeroed_user_highpage_movable(vma, address);
70225 @@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70226 if (!pte_none(*page_table))
70227 goto release;
70228
70229 +#ifdef CONFIG_PAX_SEGMEXEC
70230 + if (pax_find_mirror_vma(vma))
70231 + BUG_ON(!trylock_page(page));
70232 +#endif
70233 +
70234 inc_mm_counter_fast(mm, MM_ANONPAGES);
70235 page_add_new_anon_rmap(page, vma, address);
70236 setpte:
70237 @@ -3147,6 +3331,12 @@ setpte:
70238
70239 /* No need to invalidate - it was non-present before */
70240 update_mmu_cache(vma, address, page_table);
70241 +
70242 +#ifdef CONFIG_PAX_SEGMEXEC
70243 + if (page)
70244 + pax_mirror_anon_pte(vma, address, page, ptl);
70245 +#endif
70246 +
70247 unlock:
70248 pte_unmap_unlock(page_table, ptl);
70249 return 0;
70250 @@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70251 */
70252 /* Only go through if we didn't race with anybody else... */
70253 if (likely(pte_same(*page_table, orig_pte))) {
70254 +
70255 +#ifdef CONFIG_PAX_SEGMEXEC
70256 + if (anon && pax_find_mirror_vma(vma))
70257 + BUG_ON(!trylock_page(page));
70258 +#endif
70259 +
70260 flush_icache_page(vma, page);
70261 entry = mk_pte(page, vma->vm_page_prot);
70262 if (flags & FAULT_FLAG_WRITE)
70263 @@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70264
70265 /* no need to invalidate: a not-present page won't be cached */
70266 update_mmu_cache(vma, address, page_table);
70267 +
70268 +#ifdef CONFIG_PAX_SEGMEXEC
70269 + if (anon)
70270 + pax_mirror_anon_pte(vma, address, page, ptl);
70271 + else
70272 + pax_mirror_file_pte(vma, address, page, ptl);
70273 +#endif
70274 +
70275 } else {
70276 if (cow_page)
70277 mem_cgroup_uncharge_page(cow_page);
70278 @@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *mm,
70279 if (flags & FAULT_FLAG_WRITE)
70280 flush_tlb_fix_spurious_fault(vma, address);
70281 }
70282 +
70283 +#ifdef CONFIG_PAX_SEGMEXEC
70284 + pax_mirror_pte(vma, address, pte, pmd, ptl);
70285 + return 0;
70286 +#endif
70287 +
70288 unlock:
70289 pte_unmap_unlock(pte, ptl);
70290 return 0;
70291 @@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70292 pmd_t *pmd;
70293 pte_t *pte;
70294
70295 +#ifdef CONFIG_PAX_SEGMEXEC
70296 + struct vm_area_struct *vma_m;
70297 +#endif
70298 +
70299 __set_current_state(TASK_RUNNING);
70300
70301 count_vm_event(PGFAULT);
70302 @@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70303 if (unlikely(is_vm_hugetlb_page(vma)))
70304 return hugetlb_fault(mm, vma, address, flags);
70305
70306 +#ifdef CONFIG_PAX_SEGMEXEC
70307 + vma_m = pax_find_mirror_vma(vma);
70308 + if (vma_m) {
70309 + unsigned long address_m;
70310 + pgd_t *pgd_m;
70311 + pud_t *pud_m;
70312 + pmd_t *pmd_m;
70313 +
70314 + if (vma->vm_start > vma_m->vm_start) {
70315 + address_m = address;
70316 + address -= SEGMEXEC_TASK_SIZE;
70317 + vma = vma_m;
70318 + } else
70319 + address_m = address + SEGMEXEC_TASK_SIZE;
70320 +
70321 + pgd_m = pgd_offset(mm, address_m);
70322 + pud_m = pud_alloc(mm, pgd_m, address_m);
70323 + if (!pud_m)
70324 + return VM_FAULT_OOM;
70325 + pmd_m = pmd_alloc(mm, pud_m, address_m);
70326 + if (!pmd_m)
70327 + return VM_FAULT_OOM;
70328 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70329 + return VM_FAULT_OOM;
70330 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70331 + }
70332 +#endif
70333 +
70334 pgd = pgd_offset(mm, address);
70335 pud = pud_alloc(mm, pgd, address);
70336 if (!pud)
70337 @@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70338 * run pte_offset_map on the pmd, if an huge pmd could
70339 * materialize from under us from a different thread.
70340 */
70341 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70342 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70343 return VM_FAULT_OOM;
70344 /* if an huge pmd materialized from under us just retry later */
70345 if (unlikely(pmd_trans_huge(*pmd)))
70346 @@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70347 spin_unlock(&mm->page_table_lock);
70348 return 0;
70349 }
70350 +
70351 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70352 +{
70353 + pud_t *new = pud_alloc_one(mm, address);
70354 + if (!new)
70355 + return -ENOMEM;
70356 +
70357 + smp_wmb(); /* See comment in __pte_alloc */
70358 +
70359 + spin_lock(&mm->page_table_lock);
70360 + if (pgd_present(*pgd)) /* Another has populated it */
70361 + pud_free(mm, new);
70362 + else
70363 + pgd_populate_kernel(mm, pgd, new);
70364 + spin_unlock(&mm->page_table_lock);
70365 + return 0;
70366 +}
70367 #endif /* __PAGETABLE_PUD_FOLDED */
70368
70369 #ifndef __PAGETABLE_PMD_FOLDED
70370 @@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70371 spin_unlock(&mm->page_table_lock);
70372 return 0;
70373 }
70374 +
70375 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70376 +{
70377 + pmd_t *new = pmd_alloc_one(mm, address);
70378 + if (!new)
70379 + return -ENOMEM;
70380 +
70381 + smp_wmb(); /* See comment in __pte_alloc */
70382 +
70383 + spin_lock(&mm->page_table_lock);
70384 +#ifndef __ARCH_HAS_4LEVEL_HACK
70385 + if (pud_present(*pud)) /* Another has populated it */
70386 + pmd_free(mm, new);
70387 + else
70388 + pud_populate_kernel(mm, pud, new);
70389 +#else
70390 + if (pgd_present(*pud)) /* Another has populated it */
70391 + pmd_free(mm, new);
70392 + else
70393 + pgd_populate_kernel(mm, pud, new);
70394 +#endif /* __ARCH_HAS_4LEVEL_HACK */
70395 + spin_unlock(&mm->page_table_lock);
70396 + return 0;
70397 +}
70398 #endif /* __PAGETABLE_PMD_FOLDED */
70399
70400 int make_pages_present(unsigned long addr, unsigned long end)
70401 @@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
70402 gate_vma.vm_start = FIXADDR_USER_START;
70403 gate_vma.vm_end = FIXADDR_USER_END;
70404 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70405 - gate_vma.vm_page_prot = __P101;
70406 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70407
70408 return 0;
70409 }
70410 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70411 index bf5b485..e44c2cb 100644
70412 --- a/mm/mempolicy.c
70413 +++ b/mm/mempolicy.c
70414 @@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70415 unsigned long vmstart;
70416 unsigned long vmend;
70417
70418 +#ifdef CONFIG_PAX_SEGMEXEC
70419 + struct vm_area_struct *vma_m;
70420 +#endif
70421 +
70422 vma = find_vma(mm, start);
70423 if (!vma || vma->vm_start > start)
70424 return -EFAULT;
70425 @@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70426 if (err)
70427 goto out;
70428 }
70429 +
70430 +#ifdef CONFIG_PAX_SEGMEXEC
70431 + vma_m = pax_find_mirror_vma(vma);
70432 + if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
70433 + err = vma_m->vm_ops->set_policy(vma_m, new_pol);
70434 + if (err)
70435 + goto out;
70436 + }
70437 +#endif
70438 +
70439 }
70440
70441 out:
70442 @@ -1105,6 +1119,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70443
70444 if (end < start)
70445 return -EINVAL;
70446 +
70447 +#ifdef CONFIG_PAX_SEGMEXEC
70448 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70449 + if (end > SEGMEXEC_TASK_SIZE)
70450 + return -EINVAL;
70451 + } else
70452 +#endif
70453 +
70454 + if (end > TASK_SIZE)
70455 + return -EINVAL;
70456 +
70457 if (end == start)
70458 return 0;
70459
70460 @@ -1328,8 +1353,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70461 */
70462 tcred = __task_cred(task);
70463 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70464 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70465 - !capable(CAP_SYS_NICE)) {
70466 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70467 rcu_read_unlock();
70468 err = -EPERM;
70469 goto out_put;
70470 @@ -1360,6 +1384,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70471 goto out;
70472 }
70473
70474 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70475 + if (mm != current->mm &&
70476 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70477 + mmput(mm);
70478 + err = -EPERM;
70479 + goto out;
70480 + }
70481 +#endif
70482 +
70483 err = do_migrate_pages(mm, old, new,
70484 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
70485
70486 diff --git a/mm/mlock.c b/mm/mlock.c
70487 index ef726e8..13e0901 100644
70488 --- a/mm/mlock.c
70489 +++ b/mm/mlock.c
70490 @@ -13,6 +13,7 @@
70491 #include <linux/pagemap.h>
70492 #include <linux/mempolicy.h>
70493 #include <linux/syscalls.h>
70494 +#include <linux/security.h>
70495 #include <linux/sched.h>
70496 #include <linux/export.h>
70497 #include <linux/rmap.h>
70498 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70499 return -EINVAL;
70500 if (end == start)
70501 return 0;
70502 + if (end > TASK_SIZE)
70503 + return -EINVAL;
70504 +
70505 vma = find_vma(current->mm, start);
70506 if (!vma || vma->vm_start > start)
70507 return -ENOMEM;
70508 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70509 for (nstart = start ; ; ) {
70510 vm_flags_t newflags;
70511
70512 +#ifdef CONFIG_PAX_SEGMEXEC
70513 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70514 + break;
70515 +#endif
70516 +
70517 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70518
70519 newflags = vma->vm_flags | VM_LOCKED;
70520 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70521 lock_limit >>= PAGE_SHIFT;
70522
70523 /* check against resource limits */
70524 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70525 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70526 error = do_mlock(start, len, 1);
70527 up_write(&current->mm->mmap_sem);
70528 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70529 static int do_mlockall(int flags)
70530 {
70531 struct vm_area_struct * vma, * prev = NULL;
70532 - unsigned int def_flags = 0;
70533
70534 if (flags & MCL_FUTURE)
70535 - def_flags = VM_LOCKED;
70536 - current->mm->def_flags = def_flags;
70537 + current->mm->def_flags |= VM_LOCKED;
70538 + else
70539 + current->mm->def_flags &= ~VM_LOCKED;
70540 if (flags == MCL_FUTURE)
70541 goto out;
70542
70543 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70544 vm_flags_t newflags;
70545
70546 +#ifdef CONFIG_PAX_SEGMEXEC
70547 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70548 + break;
70549 +#endif
70550 +
70551 + BUG_ON(vma->vm_end > TASK_SIZE);
70552 newflags = vma->vm_flags | VM_LOCKED;
70553 if (!(flags & MCL_CURRENT))
70554 newflags &= ~VM_LOCKED;
70555 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70556 lock_limit >>= PAGE_SHIFT;
70557
70558 ret = -ENOMEM;
70559 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70560 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70561 capable(CAP_IPC_LOCK))
70562 ret = do_mlockall(flags);
70563 diff --git a/mm/mmap.c b/mm/mmap.c
70564 index 848ef52..d2b586c 100644
70565 --- a/mm/mmap.c
70566 +++ b/mm/mmap.c
70567 @@ -46,6 +46,16 @@
70568 #define arch_rebalance_pgtables(addr, len) (addr)
70569 #endif
70570
70571 +static inline void verify_mm_writelocked(struct mm_struct *mm)
70572 +{
70573 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70574 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70575 + up_read(&mm->mmap_sem);
70576 + BUG();
70577 + }
70578 +#endif
70579 +}
70580 +
70581 static void unmap_region(struct mm_struct *mm,
70582 struct vm_area_struct *vma, struct vm_area_struct *prev,
70583 unsigned long start, unsigned long end);
70584 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70585 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70586 *
70587 */
70588 -pgprot_t protection_map[16] = {
70589 +pgprot_t protection_map[16] __read_only = {
70590 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70591 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70592 };
70593
70594 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
70595 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70596 {
70597 - return __pgprot(pgprot_val(protection_map[vm_flags &
70598 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70599 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70600 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70601 +
70602 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70603 + if (!(__supported_pte_mask & _PAGE_NX) &&
70604 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70605 + (vm_flags & (VM_READ | VM_WRITE)))
70606 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70607 +#endif
70608 +
70609 + return prot;
70610 }
70611 EXPORT_SYMBOL(vm_get_page_prot);
70612
70613 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70614 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70615 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70616 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70617 /*
70618 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70619 * other variables. It can be updated by several CPUs frequently.
70620 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70621 struct vm_area_struct *next = vma->vm_next;
70622
70623 might_sleep();
70624 + BUG_ON(vma->vm_mirror);
70625 if (vma->vm_ops && vma->vm_ops->close)
70626 vma->vm_ops->close(vma);
70627 if (vma->vm_file) {
70628 @@ -274,6 +295,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70629 * not page aligned -Ram Gupta
70630 */
70631 rlim = rlimit(RLIMIT_DATA);
70632 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70633 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70634 (mm->end_data - mm->start_data) > rlim)
70635 goto out;
70636 @@ -690,6 +712,12 @@ static int
70637 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70638 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70639 {
70640 +
70641 +#ifdef CONFIG_PAX_SEGMEXEC
70642 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70643 + return 0;
70644 +#endif
70645 +
70646 if (is_mergeable_vma(vma, file, vm_flags) &&
70647 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70648 if (vma->vm_pgoff == vm_pgoff)
70649 @@ -709,6 +737,12 @@ static int
70650 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70651 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70652 {
70653 +
70654 +#ifdef CONFIG_PAX_SEGMEXEC
70655 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70656 + return 0;
70657 +#endif
70658 +
70659 if (is_mergeable_vma(vma, file, vm_flags) &&
70660 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70661 pgoff_t vm_pglen;
70662 @@ -751,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70663 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70664 struct vm_area_struct *prev, unsigned long addr,
70665 unsigned long end, unsigned long vm_flags,
70666 - struct anon_vma *anon_vma, struct file *file,
70667 + struct anon_vma *anon_vma, struct file *file,
70668 pgoff_t pgoff, struct mempolicy *policy)
70669 {
70670 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70671 struct vm_area_struct *area, *next;
70672 int err;
70673
70674 +#ifdef CONFIG_PAX_SEGMEXEC
70675 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70676 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70677 +
70678 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70679 +#endif
70680 +
70681 /*
70682 * We later require that vma->vm_flags == vm_flags,
70683 * so this tests vma->vm_flags & VM_SPECIAL, too.
70684 @@ -773,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70685 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70686 next = next->vm_next;
70687
70688 +#ifdef CONFIG_PAX_SEGMEXEC
70689 + if (prev)
70690 + prev_m = pax_find_mirror_vma(prev);
70691 + if (area)
70692 + area_m = pax_find_mirror_vma(area);
70693 + if (next)
70694 + next_m = pax_find_mirror_vma(next);
70695 +#endif
70696 +
70697 /*
70698 * Can it merge with the predecessor?
70699 */
70700 @@ -792,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70701 /* cases 1, 6 */
70702 err = vma_adjust(prev, prev->vm_start,
70703 next->vm_end, prev->vm_pgoff, NULL);
70704 - } else /* cases 2, 5, 7 */
70705 +
70706 +#ifdef CONFIG_PAX_SEGMEXEC
70707 + if (!err && prev_m)
70708 + err = vma_adjust(prev_m, prev_m->vm_start,
70709 + next_m->vm_end, prev_m->vm_pgoff, NULL);
70710 +#endif
70711 +
70712 + } else { /* cases 2, 5, 7 */
70713 err = vma_adjust(prev, prev->vm_start,
70714 end, prev->vm_pgoff, NULL);
70715 +
70716 +#ifdef CONFIG_PAX_SEGMEXEC
70717 + if (!err && prev_m)
70718 + err = vma_adjust(prev_m, prev_m->vm_start,
70719 + end_m, prev_m->vm_pgoff, NULL);
70720 +#endif
70721 +
70722 + }
70723 if (err)
70724 return NULL;
70725 khugepaged_enter_vma_merge(prev);
70726 @@ -808,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70727 mpol_equal(policy, vma_policy(next)) &&
70728 can_vma_merge_before(next, vm_flags,
70729 anon_vma, file, pgoff+pglen)) {
70730 - if (prev && addr < prev->vm_end) /* case 4 */
70731 + if (prev && addr < prev->vm_end) { /* case 4 */
70732 err = vma_adjust(prev, prev->vm_start,
70733 addr, prev->vm_pgoff, NULL);
70734 - else /* cases 3, 8 */
70735 +
70736 +#ifdef CONFIG_PAX_SEGMEXEC
70737 + if (!err && prev_m)
70738 + err = vma_adjust(prev_m, prev_m->vm_start,
70739 + addr_m, prev_m->vm_pgoff, NULL);
70740 +#endif
70741 +
70742 + } else { /* cases 3, 8 */
70743 err = vma_adjust(area, addr, next->vm_end,
70744 next->vm_pgoff - pglen, NULL);
70745 +
70746 +#ifdef CONFIG_PAX_SEGMEXEC
70747 + if (!err && area_m)
70748 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
70749 + next_m->vm_pgoff - pglen, NULL);
70750 +#endif
70751 +
70752 + }
70753 if (err)
70754 return NULL;
70755 khugepaged_enter_vma_merge(area);
70756 @@ -922,14 +1002,11 @@ none:
70757 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70758 struct file *file, long pages)
70759 {
70760 - const unsigned long stack_flags
70761 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70762 -
70763 if (file) {
70764 mm->shared_vm += pages;
70765 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70766 mm->exec_vm += pages;
70767 - } else if (flags & stack_flags)
70768 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70769 mm->stack_vm += pages;
70770 if (flags & (VM_RESERVED|VM_IO))
70771 mm->reserved_vm += pages;
70772 @@ -969,7 +1046,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70773 * (the exception is when the underlying filesystem is noexec
70774 * mounted, in which case we dont add PROT_EXEC.)
70775 */
70776 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70777 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70778 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70779 prot |= PROT_EXEC;
70780
70781 @@ -995,7 +1072,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70782 /* Obtain the address to map to. we verify (or select) it and ensure
70783 * that it represents a valid section of the address space.
70784 */
70785 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
70786 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70787 if (addr & ~PAGE_MASK)
70788 return addr;
70789
70790 @@ -1006,6 +1083,36 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70791 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70792 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70793
70794 +#ifdef CONFIG_PAX_MPROTECT
70795 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70796 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70797 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70798 + gr_log_rwxmmap(file);
70799 +
70800 +#ifdef CONFIG_PAX_EMUPLT
70801 + vm_flags &= ~VM_EXEC;
70802 +#else
70803 + return -EPERM;
70804 +#endif
70805 +
70806 + }
70807 +
70808 + if (!(vm_flags & VM_EXEC))
70809 + vm_flags &= ~VM_MAYEXEC;
70810 +#else
70811 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70812 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70813 +#endif
70814 + else
70815 + vm_flags &= ~VM_MAYWRITE;
70816 + }
70817 +#endif
70818 +
70819 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70820 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70821 + vm_flags &= ~VM_PAGEEXEC;
70822 +#endif
70823 +
70824 if (flags & MAP_LOCKED)
70825 if (!can_do_mlock())
70826 return -EPERM;
70827 @@ -1017,6 +1124,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70828 locked += mm->locked_vm;
70829 lock_limit = rlimit(RLIMIT_MEMLOCK);
70830 lock_limit >>= PAGE_SHIFT;
70831 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70832 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70833 return -EAGAIN;
70834 }
70835 @@ -1087,6 +1195,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70836 if (error)
70837 return error;
70838
70839 + if (!gr_acl_handle_mmap(file, prot))
70840 + return -EACCES;
70841 +
70842 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70843 }
70844
70845 @@ -1192,7 +1303,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70846 vm_flags_t vm_flags = vma->vm_flags;
70847
70848 /* If it was private or non-writable, the write bit is already clear */
70849 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70850 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70851 return 0;
70852
70853 /* The backer wishes to know when pages are first written to? */
70854 @@ -1241,14 +1352,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70855 unsigned long charged = 0;
70856 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70857
70858 +#ifdef CONFIG_PAX_SEGMEXEC
70859 + struct vm_area_struct *vma_m = NULL;
70860 +#endif
70861 +
70862 + /*
70863 + * mm->mmap_sem is required to protect against another thread
70864 + * changing the mappings in case we sleep.
70865 + */
70866 + verify_mm_writelocked(mm);
70867 +
70868 /* Clear old maps */
70869 error = -ENOMEM;
70870 -munmap_back:
70871 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70872 if (vma && vma->vm_start < addr + len) {
70873 if (do_munmap(mm, addr, len))
70874 return -ENOMEM;
70875 - goto munmap_back;
70876 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70877 + BUG_ON(vma && vma->vm_start < addr + len);
70878 }
70879
70880 /* Check against address space limit. */
70881 @@ -1297,6 +1418,16 @@ munmap_back:
70882 goto unacct_error;
70883 }
70884
70885 +#ifdef CONFIG_PAX_SEGMEXEC
70886 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
70887 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70888 + if (!vma_m) {
70889 + error = -ENOMEM;
70890 + goto free_vma;
70891 + }
70892 + }
70893 +#endif
70894 +
70895 vma->vm_mm = mm;
70896 vma->vm_start = addr;
70897 vma->vm_end = addr + len;
70898 @@ -1321,6 +1452,19 @@ munmap_back:
70899 error = file->f_op->mmap(file, vma);
70900 if (error)
70901 goto unmap_and_free_vma;
70902 +
70903 +#ifdef CONFIG_PAX_SEGMEXEC
70904 + if (vma_m && (vm_flags & VM_EXECUTABLE))
70905 + added_exe_file_vma(mm);
70906 +#endif
70907 +
70908 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70909 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
70910 + vma->vm_flags |= VM_PAGEEXEC;
70911 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70912 + }
70913 +#endif
70914 +
70915 if (vm_flags & VM_EXECUTABLE)
70916 added_exe_file_vma(mm);
70917
70918 @@ -1358,6 +1502,11 @@ munmap_back:
70919 vma_link(mm, vma, prev, rb_link, rb_parent);
70920 file = vma->vm_file;
70921
70922 +#ifdef CONFIG_PAX_SEGMEXEC
70923 + if (vma_m)
70924 + BUG_ON(pax_mirror_vma(vma_m, vma));
70925 +#endif
70926 +
70927 /* Once vma denies write, undo our temporary denial count */
70928 if (correct_wcount)
70929 atomic_inc(&inode->i_writecount);
70930 @@ -1366,6 +1515,7 @@ out:
70931
70932 mm->total_vm += len >> PAGE_SHIFT;
70933 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
70934 + track_exec_limit(mm, addr, addr + len, vm_flags);
70935 if (vm_flags & VM_LOCKED) {
70936 if (!mlock_vma_pages_range(vma, addr, addr + len))
70937 mm->locked_vm += (len >> PAGE_SHIFT);
70938 @@ -1383,6 +1533,12 @@ unmap_and_free_vma:
70939 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
70940 charged = 0;
70941 free_vma:
70942 +
70943 +#ifdef CONFIG_PAX_SEGMEXEC
70944 + if (vma_m)
70945 + kmem_cache_free(vm_area_cachep, vma_m);
70946 +#endif
70947 +
70948 kmem_cache_free(vm_area_cachep, vma);
70949 unacct_error:
70950 if (charged)
70951 @@ -1390,6 +1546,44 @@ unacct_error:
70952 return error;
70953 }
70954
70955 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
70956 +{
70957 + if (!vma) {
70958 +#ifdef CONFIG_STACK_GROWSUP
70959 + if (addr > sysctl_heap_stack_gap)
70960 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
70961 + else
70962 + vma = find_vma(current->mm, 0);
70963 + if (vma && (vma->vm_flags & VM_GROWSUP))
70964 + return false;
70965 +#endif
70966 + return true;
70967 + }
70968 +
70969 + if (addr + len > vma->vm_start)
70970 + return false;
70971 +
70972 + if (vma->vm_flags & VM_GROWSDOWN)
70973 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
70974 +#ifdef CONFIG_STACK_GROWSUP
70975 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
70976 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
70977 +#endif
70978 +
70979 + return true;
70980 +}
70981 +
70982 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
70983 +{
70984 + if (vma->vm_start < len)
70985 + return -ENOMEM;
70986 + if (!(vma->vm_flags & VM_GROWSDOWN))
70987 + return vma->vm_start - len;
70988 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
70989 + return vma->vm_start - len - sysctl_heap_stack_gap;
70990 + return -ENOMEM;
70991 +}
70992 +
70993 /* Get an address range which is currently unmapped.
70994 * For shmat() with addr=0.
70995 *
70996 @@ -1416,18 +1610,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
70997 if (flags & MAP_FIXED)
70998 return addr;
70999
71000 +#ifdef CONFIG_PAX_RANDMMAP
71001 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71002 +#endif
71003 +
71004 if (addr) {
71005 addr = PAGE_ALIGN(addr);
71006 - vma = find_vma(mm, addr);
71007 - if (TASK_SIZE - len >= addr &&
71008 - (!vma || addr + len <= vma->vm_start))
71009 - return addr;
71010 + if (TASK_SIZE - len >= addr) {
71011 + vma = find_vma(mm, addr);
71012 + if (check_heap_stack_gap(vma, addr, len))
71013 + return addr;
71014 + }
71015 }
71016 if (len > mm->cached_hole_size) {
71017 - start_addr = addr = mm->free_area_cache;
71018 + start_addr = addr = mm->free_area_cache;
71019 } else {
71020 - start_addr = addr = TASK_UNMAPPED_BASE;
71021 - mm->cached_hole_size = 0;
71022 + start_addr = addr = mm->mmap_base;
71023 + mm->cached_hole_size = 0;
71024 }
71025
71026 full_search:
71027 @@ -1438,34 +1637,40 @@ full_search:
71028 * Start a new search - just in case we missed
71029 * some holes.
71030 */
71031 - if (start_addr != TASK_UNMAPPED_BASE) {
71032 - addr = TASK_UNMAPPED_BASE;
71033 - start_addr = addr;
71034 + if (start_addr != mm->mmap_base) {
71035 + start_addr = addr = mm->mmap_base;
71036 mm->cached_hole_size = 0;
71037 goto full_search;
71038 }
71039 return -ENOMEM;
71040 }
71041 - if (!vma || addr + len <= vma->vm_start) {
71042 - /*
71043 - * Remember the place where we stopped the search:
71044 - */
71045 - mm->free_area_cache = addr + len;
71046 - return addr;
71047 - }
71048 + if (check_heap_stack_gap(vma, addr, len))
71049 + break;
71050 if (addr + mm->cached_hole_size < vma->vm_start)
71051 mm->cached_hole_size = vma->vm_start - addr;
71052 addr = vma->vm_end;
71053 }
71054 +
71055 + /*
71056 + * Remember the place where we stopped the search:
71057 + */
71058 + mm->free_area_cache = addr + len;
71059 + return addr;
71060 }
71061 #endif
71062
71063 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71064 {
71065 +
71066 +#ifdef CONFIG_PAX_SEGMEXEC
71067 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71068 + return;
71069 +#endif
71070 +
71071 /*
71072 * Is this a new hole at the lowest possible address?
71073 */
71074 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
71075 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
71076 mm->free_area_cache = addr;
71077 }
71078
71079 @@ -1481,7 +1686,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71080 {
71081 struct vm_area_struct *vma;
71082 struct mm_struct *mm = current->mm;
71083 - unsigned long addr = addr0, start_addr;
71084 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
71085
71086 /* requested length too big for entire address space */
71087 if (len > TASK_SIZE)
71088 @@ -1490,13 +1695,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71089 if (flags & MAP_FIXED)
71090 return addr;
71091
71092 +#ifdef CONFIG_PAX_RANDMMAP
71093 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71094 +#endif
71095 +
71096 /* requesting a specific address */
71097 if (addr) {
71098 addr = PAGE_ALIGN(addr);
71099 - vma = find_vma(mm, addr);
71100 - if (TASK_SIZE - len >= addr &&
71101 - (!vma || addr + len <= vma->vm_start))
71102 - return addr;
71103 + if (TASK_SIZE - len >= addr) {
71104 + vma = find_vma(mm, addr);
71105 + if (check_heap_stack_gap(vma, addr, len))
71106 + return addr;
71107 + }
71108 }
71109
71110 /* check if free_area_cache is useful for us */
71111 @@ -1520,7 +1730,7 @@ try_again:
71112 * return with success:
71113 */
71114 vma = find_vma(mm, addr);
71115 - if (!vma || addr+len <= vma->vm_start)
71116 + if (check_heap_stack_gap(vma, addr, len))
71117 /* remember the address as a hint for next time */
71118 return (mm->free_area_cache = addr);
71119
71120 @@ -1529,8 +1739,8 @@ try_again:
71121 mm->cached_hole_size = vma->vm_start - addr;
71122
71123 /* try just below the current vma->vm_start */
71124 - addr = vma->vm_start-len;
71125 - } while (len < vma->vm_start);
71126 + addr = skip_heap_stack_gap(vma, len);
71127 + } while (!IS_ERR_VALUE(addr));
71128
71129 fail:
71130 /*
71131 @@ -1553,13 +1763,21 @@ fail:
71132 * can happen with large stack limits and large mmap()
71133 * allocations.
71134 */
71135 + mm->mmap_base = TASK_UNMAPPED_BASE;
71136 +
71137 +#ifdef CONFIG_PAX_RANDMMAP
71138 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71139 + mm->mmap_base += mm->delta_mmap;
71140 +#endif
71141 +
71142 + mm->free_area_cache = mm->mmap_base;
71143 mm->cached_hole_size = ~0UL;
71144 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71145 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71146 /*
71147 * Restore the topdown base:
71148 */
71149 - mm->free_area_cache = mm->mmap_base;
71150 + mm->mmap_base = base;
71151 + mm->free_area_cache = base;
71152 mm->cached_hole_size = ~0UL;
71153
71154 return addr;
71155 @@ -1568,6 +1786,12 @@ fail:
71156
71157 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71158 {
71159 +
71160 +#ifdef CONFIG_PAX_SEGMEXEC
71161 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71162 + return;
71163 +#endif
71164 +
71165 /*
71166 * Is this a new hole at the highest possible address?
71167 */
71168 @@ -1575,8 +1799,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71169 mm->free_area_cache = addr;
71170
71171 /* dont allow allocations above current base */
71172 - if (mm->free_area_cache > mm->mmap_base)
71173 + if (mm->free_area_cache > mm->mmap_base) {
71174 mm->free_area_cache = mm->mmap_base;
71175 + mm->cached_hole_size = ~0UL;
71176 + }
71177 }
71178
71179 unsigned long
71180 @@ -1672,6 +1898,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71181 return vma;
71182 }
71183
71184 +#ifdef CONFIG_PAX_SEGMEXEC
71185 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71186 +{
71187 + struct vm_area_struct *vma_m;
71188 +
71189 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71190 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71191 + BUG_ON(vma->vm_mirror);
71192 + return NULL;
71193 + }
71194 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71195 + vma_m = vma->vm_mirror;
71196 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71197 + BUG_ON(vma->vm_file != vma_m->vm_file);
71198 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71199 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71200 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71201 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71202 + return vma_m;
71203 +}
71204 +#endif
71205 +
71206 /*
71207 * Verify that the stack growth is acceptable and
71208 * update accounting. This is shared with both the
71209 @@ -1688,6 +1936,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71210 return -ENOMEM;
71211
71212 /* Stack limit test */
71213 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
71214 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71215 return -ENOMEM;
71216
71217 @@ -1698,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71218 locked = mm->locked_vm + grow;
71219 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71220 limit >>= PAGE_SHIFT;
71221 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71222 if (locked > limit && !capable(CAP_IPC_LOCK))
71223 return -ENOMEM;
71224 }
71225 @@ -1728,37 +1978,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71226 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71227 * vma is the last one with address > vma->vm_end. Have to extend vma.
71228 */
71229 +#ifndef CONFIG_IA64
71230 +static
71231 +#endif
71232 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71233 {
71234 int error;
71235 + bool locknext;
71236
71237 if (!(vma->vm_flags & VM_GROWSUP))
71238 return -EFAULT;
71239
71240 + /* Also guard against wrapping around to address 0. */
71241 + if (address < PAGE_ALIGN(address+1))
71242 + address = PAGE_ALIGN(address+1);
71243 + else
71244 + return -ENOMEM;
71245 +
71246 /*
71247 * We must make sure the anon_vma is allocated
71248 * so that the anon_vma locking is not a noop.
71249 */
71250 if (unlikely(anon_vma_prepare(vma)))
71251 return -ENOMEM;
71252 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71253 + if (locknext && anon_vma_prepare(vma->vm_next))
71254 + return -ENOMEM;
71255 vma_lock_anon_vma(vma);
71256 + if (locknext)
71257 + vma_lock_anon_vma(vma->vm_next);
71258
71259 /*
71260 * vma->vm_start/vm_end cannot change under us because the caller
71261 * is required to hold the mmap_sem in read mode. We need the
71262 - * anon_vma lock to serialize against concurrent expand_stacks.
71263 - * Also guard against wrapping around to address 0.
71264 + * anon_vma locks to serialize against concurrent expand_stacks
71265 + * and expand_upwards.
71266 */
71267 - if (address < PAGE_ALIGN(address+4))
71268 - address = PAGE_ALIGN(address+4);
71269 - else {
71270 - vma_unlock_anon_vma(vma);
71271 - return -ENOMEM;
71272 - }
71273 error = 0;
71274
71275 /* Somebody else might have raced and expanded it already */
71276 - if (address > vma->vm_end) {
71277 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71278 + error = -ENOMEM;
71279 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71280 unsigned long size, grow;
71281
71282 size = address - vma->vm_start;
71283 @@ -1773,6 +2034,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71284 }
71285 }
71286 }
71287 + if (locknext)
71288 + vma_unlock_anon_vma(vma->vm_next);
71289 vma_unlock_anon_vma(vma);
71290 khugepaged_enter_vma_merge(vma);
71291 return error;
71292 @@ -1786,6 +2049,8 @@ int expand_downwards(struct vm_area_struct *vma,
71293 unsigned long address)
71294 {
71295 int error;
71296 + bool lockprev = false;
71297 + struct vm_area_struct *prev;
71298
71299 /*
71300 * We must make sure the anon_vma is allocated
71301 @@ -1799,6 +2064,15 @@ int expand_downwards(struct vm_area_struct *vma,
71302 if (error)
71303 return error;
71304
71305 + prev = vma->vm_prev;
71306 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71307 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71308 +#endif
71309 + if (lockprev && anon_vma_prepare(prev))
71310 + return -ENOMEM;
71311 + if (lockprev)
71312 + vma_lock_anon_vma(prev);
71313 +
71314 vma_lock_anon_vma(vma);
71315
71316 /*
71317 @@ -1808,9 +2082,17 @@ int expand_downwards(struct vm_area_struct *vma,
71318 */
71319
71320 /* Somebody else might have raced and expanded it already */
71321 - if (address < vma->vm_start) {
71322 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71323 + error = -ENOMEM;
71324 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71325 unsigned long size, grow;
71326
71327 +#ifdef CONFIG_PAX_SEGMEXEC
71328 + struct vm_area_struct *vma_m;
71329 +
71330 + vma_m = pax_find_mirror_vma(vma);
71331 +#endif
71332 +
71333 size = vma->vm_end - address;
71334 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71335
71336 @@ -1820,11 +2102,22 @@ int expand_downwards(struct vm_area_struct *vma,
71337 if (!error) {
71338 vma->vm_start = address;
71339 vma->vm_pgoff -= grow;
71340 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71341 +
71342 +#ifdef CONFIG_PAX_SEGMEXEC
71343 + if (vma_m) {
71344 + vma_m->vm_start -= grow << PAGE_SHIFT;
71345 + vma_m->vm_pgoff -= grow;
71346 + }
71347 +#endif
71348 +
71349 perf_event_mmap(vma);
71350 }
71351 }
71352 }
71353 vma_unlock_anon_vma(vma);
71354 + if (lockprev)
71355 + vma_unlock_anon_vma(prev);
71356 khugepaged_enter_vma_merge(vma);
71357 return error;
71358 }
71359 @@ -1894,6 +2187,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71360 do {
71361 long nrpages = vma_pages(vma);
71362
71363 +#ifdef CONFIG_PAX_SEGMEXEC
71364 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71365 + vma = remove_vma(vma);
71366 + continue;
71367 + }
71368 +#endif
71369 +
71370 mm->total_vm -= nrpages;
71371 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71372 vma = remove_vma(vma);
71373 @@ -1939,6 +2239,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71374 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71375 vma->vm_prev = NULL;
71376 do {
71377 +
71378 +#ifdef CONFIG_PAX_SEGMEXEC
71379 + if (vma->vm_mirror) {
71380 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71381 + vma->vm_mirror->vm_mirror = NULL;
71382 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
71383 + vma->vm_mirror = NULL;
71384 + }
71385 +#endif
71386 +
71387 rb_erase(&vma->vm_rb, &mm->mm_rb);
71388 mm->map_count--;
71389 tail_vma = vma;
71390 @@ -1967,14 +2277,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71391 struct vm_area_struct *new;
71392 int err = -ENOMEM;
71393
71394 +#ifdef CONFIG_PAX_SEGMEXEC
71395 + struct vm_area_struct *vma_m, *new_m = NULL;
71396 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71397 +#endif
71398 +
71399 if (is_vm_hugetlb_page(vma) && (addr &
71400 ~(huge_page_mask(hstate_vma(vma)))))
71401 return -EINVAL;
71402
71403 +#ifdef CONFIG_PAX_SEGMEXEC
71404 + vma_m = pax_find_mirror_vma(vma);
71405 +#endif
71406 +
71407 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71408 if (!new)
71409 goto out_err;
71410
71411 +#ifdef CONFIG_PAX_SEGMEXEC
71412 + if (vma_m) {
71413 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71414 + if (!new_m) {
71415 + kmem_cache_free(vm_area_cachep, new);
71416 + goto out_err;
71417 + }
71418 + }
71419 +#endif
71420 +
71421 /* most fields are the same, copy all, and then fixup */
71422 *new = *vma;
71423
71424 @@ -1987,6 +2316,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71425 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71426 }
71427
71428 +#ifdef CONFIG_PAX_SEGMEXEC
71429 + if (vma_m) {
71430 + *new_m = *vma_m;
71431 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71432 + new_m->vm_mirror = new;
71433 + new->vm_mirror = new_m;
71434 +
71435 + if (new_below)
71436 + new_m->vm_end = addr_m;
71437 + else {
71438 + new_m->vm_start = addr_m;
71439 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71440 + }
71441 + }
71442 +#endif
71443 +
71444 pol = mpol_dup(vma_policy(vma));
71445 if (IS_ERR(pol)) {
71446 err = PTR_ERR(pol);
71447 @@ -2012,6 +2357,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71448 else
71449 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71450
71451 +#ifdef CONFIG_PAX_SEGMEXEC
71452 + if (!err && vma_m) {
71453 + if (anon_vma_clone(new_m, vma_m))
71454 + goto out_free_mpol;
71455 +
71456 + mpol_get(pol);
71457 + vma_set_policy(new_m, pol);
71458 +
71459 + if (new_m->vm_file) {
71460 + get_file(new_m->vm_file);
71461 + if (vma_m->vm_flags & VM_EXECUTABLE)
71462 + added_exe_file_vma(mm);
71463 + }
71464 +
71465 + if (new_m->vm_ops && new_m->vm_ops->open)
71466 + new_m->vm_ops->open(new_m);
71467 +
71468 + if (new_below)
71469 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71470 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71471 + else
71472 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71473 +
71474 + if (err) {
71475 + if (new_m->vm_ops && new_m->vm_ops->close)
71476 + new_m->vm_ops->close(new_m);
71477 + if (new_m->vm_file) {
71478 + if (vma_m->vm_flags & VM_EXECUTABLE)
71479 + removed_exe_file_vma(mm);
71480 + fput(new_m->vm_file);
71481 + }
71482 + mpol_put(pol);
71483 + }
71484 + }
71485 +#endif
71486 +
71487 /* Success. */
71488 if (!err)
71489 return 0;
71490 @@ -2024,10 +2405,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71491 removed_exe_file_vma(mm);
71492 fput(new->vm_file);
71493 }
71494 - unlink_anon_vmas(new);
71495 out_free_mpol:
71496 mpol_put(pol);
71497 out_free_vma:
71498 +
71499 +#ifdef CONFIG_PAX_SEGMEXEC
71500 + if (new_m) {
71501 + unlink_anon_vmas(new_m);
71502 + kmem_cache_free(vm_area_cachep, new_m);
71503 + }
71504 +#endif
71505 +
71506 + unlink_anon_vmas(new);
71507 kmem_cache_free(vm_area_cachep, new);
71508 out_err:
71509 return err;
71510 @@ -2040,6 +2429,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71511 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71512 unsigned long addr, int new_below)
71513 {
71514 +
71515 +#ifdef CONFIG_PAX_SEGMEXEC
71516 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71517 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71518 + if (mm->map_count >= sysctl_max_map_count-1)
71519 + return -ENOMEM;
71520 + } else
71521 +#endif
71522 +
71523 if (mm->map_count >= sysctl_max_map_count)
71524 return -ENOMEM;
71525
71526 @@ -2051,11 +2449,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71527 * work. This now handles partial unmappings.
71528 * Jeremy Fitzhardinge <jeremy@goop.org>
71529 */
71530 +#ifdef CONFIG_PAX_SEGMEXEC
71531 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71532 {
71533 + int ret = __do_munmap(mm, start, len);
71534 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71535 + return ret;
71536 +
71537 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71538 +}
71539 +
71540 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71541 +#else
71542 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71543 +#endif
71544 +{
71545 unsigned long end;
71546 struct vm_area_struct *vma, *prev, *last;
71547
71548 + /*
71549 + * mm->mmap_sem is required to protect against another thread
71550 + * changing the mappings in case we sleep.
71551 + */
71552 + verify_mm_writelocked(mm);
71553 +
71554 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71555 return -EINVAL;
71556
71557 @@ -2130,6 +2547,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71558 /* Fix up all other VM information */
71559 remove_vma_list(mm, vma);
71560
71561 + track_exec_limit(mm, start, end, 0UL);
71562 +
71563 return 0;
71564 }
71565 EXPORT_SYMBOL(do_munmap);
71566 @@ -2139,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
71567 int ret;
71568 struct mm_struct *mm = current->mm;
71569
71570 +
71571 +#ifdef CONFIG_PAX_SEGMEXEC
71572 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71573 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
71574 + return -EINVAL;
71575 +#endif
71576 +
71577 down_write(&mm->mmap_sem);
71578 ret = do_munmap(mm, start, len);
71579 up_write(&mm->mmap_sem);
71580 @@ -2152,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71581 return vm_munmap(addr, len);
71582 }
71583
71584 -static inline void verify_mm_writelocked(struct mm_struct *mm)
71585 -{
71586 -#ifdef CONFIG_DEBUG_VM
71587 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71588 - WARN_ON(1);
71589 - up_read(&mm->mmap_sem);
71590 - }
71591 -#endif
71592 -}
71593 -
71594 /*
71595 * this is really a simplified "do_mmap". it only handles
71596 * anonymous maps. eventually we may be able to do some
71597 @@ -2175,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71598 struct rb_node ** rb_link, * rb_parent;
71599 pgoff_t pgoff = addr >> PAGE_SHIFT;
71600 int error;
71601 + unsigned long charged;
71602
71603 len = PAGE_ALIGN(len);
71604 if (!len)
71605 @@ -2186,16 +2603,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71606
71607 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71608
71609 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71610 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71611 + flags &= ~VM_EXEC;
71612 +
71613 +#ifdef CONFIG_PAX_MPROTECT
71614 + if (mm->pax_flags & MF_PAX_MPROTECT)
71615 + flags &= ~VM_MAYEXEC;
71616 +#endif
71617 +
71618 + }
71619 +#endif
71620 +
71621 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71622 if (error & ~PAGE_MASK)
71623 return error;
71624
71625 + charged = len >> PAGE_SHIFT;
71626 +
71627 /*
71628 * mlock MCL_FUTURE?
71629 */
71630 if (mm->def_flags & VM_LOCKED) {
71631 unsigned long locked, lock_limit;
71632 - locked = len >> PAGE_SHIFT;
71633 + locked = charged;
71634 locked += mm->locked_vm;
71635 lock_limit = rlimit(RLIMIT_MEMLOCK);
71636 lock_limit >>= PAGE_SHIFT;
71637 @@ -2212,22 +2643,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71638 /*
71639 * Clear old maps. this also does some error checking for us
71640 */
71641 - munmap_back:
71642 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71643 if (vma && vma->vm_start < addr + len) {
71644 if (do_munmap(mm, addr, len))
71645 return -ENOMEM;
71646 - goto munmap_back;
71647 - }
71648 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71649 + BUG_ON(vma && vma->vm_start < addr + len);
71650 + }
71651
71652 /* Check against address space limits *after* clearing old maps... */
71653 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71654 + if (!may_expand_vm(mm, charged))
71655 return -ENOMEM;
71656
71657 if (mm->map_count > sysctl_max_map_count)
71658 return -ENOMEM;
71659
71660 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
71661 + if (security_vm_enough_memory_mm(mm, charged))
71662 return -ENOMEM;
71663
71664 /* Can we just expand an old private anonymous mapping? */
71665 @@ -2241,7 +2672,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71666 */
71667 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71668 if (!vma) {
71669 - vm_unacct_memory(len >> PAGE_SHIFT);
71670 + vm_unacct_memory(charged);
71671 return -ENOMEM;
71672 }
71673
71674 @@ -2255,11 +2686,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71675 vma_link(mm, vma, prev, rb_link, rb_parent);
71676 out:
71677 perf_event_mmap(vma);
71678 - mm->total_vm += len >> PAGE_SHIFT;
71679 + mm->total_vm += charged;
71680 if (flags & VM_LOCKED) {
71681 if (!mlock_vma_pages_range(vma, addr, addr + len))
71682 - mm->locked_vm += (len >> PAGE_SHIFT);
71683 + mm->locked_vm += charged;
71684 }
71685 + track_exec_limit(mm, addr, addr + len, flags);
71686 return addr;
71687 }
71688
71689 @@ -2315,8 +2747,10 @@ void exit_mmap(struct mm_struct *mm)
71690 * Walk the list again, actually closing and freeing it,
71691 * with preemption enabled, without holding any MM locks.
71692 */
71693 - while (vma)
71694 + while (vma) {
71695 + vma->vm_mirror = NULL;
71696 vma = remove_vma(vma);
71697 + }
71698
71699 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71700 }
71701 @@ -2330,6 +2764,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71702 struct vm_area_struct * __vma, * prev;
71703 struct rb_node ** rb_link, * rb_parent;
71704
71705 +#ifdef CONFIG_PAX_SEGMEXEC
71706 + struct vm_area_struct *vma_m = NULL;
71707 +#endif
71708 +
71709 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71710 + return -EPERM;
71711 +
71712 /*
71713 * The vm_pgoff of a purely anonymous vma should be irrelevant
71714 * until its first write fault, when page's anon_vma and index
71715 @@ -2352,7 +2793,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71716 if ((vma->vm_flags & VM_ACCOUNT) &&
71717 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71718 return -ENOMEM;
71719 +
71720 +#ifdef CONFIG_PAX_SEGMEXEC
71721 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71722 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71723 + if (!vma_m)
71724 + return -ENOMEM;
71725 + }
71726 +#endif
71727 +
71728 vma_link(mm, vma, prev, rb_link, rb_parent);
71729 +
71730 +#ifdef CONFIG_PAX_SEGMEXEC
71731 + if (vma_m)
71732 + BUG_ON(pax_mirror_vma(vma_m, vma));
71733 +#endif
71734 +
71735 return 0;
71736 }
71737
71738 @@ -2371,6 +2827,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71739 struct mempolicy *pol;
71740 bool faulted_in_anon_vma = true;
71741
71742 + BUG_ON(vma->vm_mirror);
71743 +
71744 /*
71745 * If anonymous vma has not yet been faulted, update new pgoff
71746 * to match new location, to increase its chance of merging.
71747 @@ -2438,6 +2896,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71748 return NULL;
71749 }
71750
71751 +#ifdef CONFIG_PAX_SEGMEXEC
71752 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71753 +{
71754 + struct vm_area_struct *prev_m;
71755 + struct rb_node **rb_link_m, *rb_parent_m;
71756 + struct mempolicy *pol_m;
71757 +
71758 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71759 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71760 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71761 + *vma_m = *vma;
71762 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71763 + if (anon_vma_clone(vma_m, vma))
71764 + return -ENOMEM;
71765 + pol_m = vma_policy(vma_m);
71766 + mpol_get(pol_m);
71767 + vma_set_policy(vma_m, pol_m);
71768 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71769 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71770 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71771 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71772 + if (vma_m->vm_file)
71773 + get_file(vma_m->vm_file);
71774 + if (vma_m->vm_ops && vma_m->vm_ops->open)
71775 + vma_m->vm_ops->open(vma_m);
71776 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71777 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71778 + vma_m->vm_mirror = vma;
71779 + vma->vm_mirror = vma_m;
71780 + return 0;
71781 +}
71782 +#endif
71783 +
71784 /*
71785 * Return true if the calling process may expand its vm space by the passed
71786 * number of pages
71787 @@ -2449,6 +2940,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71788
71789 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71790
71791 +#ifdef CONFIG_PAX_RANDMMAP
71792 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71793 + cur -= mm->brk_gap;
71794 +#endif
71795 +
71796 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71797 if (cur + npages > lim)
71798 return 0;
71799 return 1;
71800 @@ -2519,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
71801 vma->vm_start = addr;
71802 vma->vm_end = addr + len;
71803
71804 +#ifdef CONFIG_PAX_MPROTECT
71805 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71806 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71807 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71808 + return -EPERM;
71809 + if (!(vm_flags & VM_EXEC))
71810 + vm_flags &= ~VM_MAYEXEC;
71811 +#else
71812 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71813 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71814 +#endif
71815 + else
71816 + vm_flags &= ~VM_MAYWRITE;
71817 + }
71818 +#endif
71819 +
71820 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71821 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71822
71823 diff --git a/mm/mprotect.c b/mm/mprotect.c
71824 index a409926..8b32e6d 100644
71825 --- a/mm/mprotect.c
71826 +++ b/mm/mprotect.c
71827 @@ -23,10 +23,17 @@
71828 #include <linux/mmu_notifier.h>
71829 #include <linux/migrate.h>
71830 #include <linux/perf_event.h>
71831 +
71832 +#ifdef CONFIG_PAX_MPROTECT
71833 +#include <linux/elf.h>
71834 +#include <linux/binfmts.h>
71835 +#endif
71836 +
71837 #include <asm/uaccess.h>
71838 #include <asm/pgtable.h>
71839 #include <asm/cacheflush.h>
71840 #include <asm/tlbflush.h>
71841 +#include <asm/mmu_context.h>
71842
71843 #ifndef pgprot_modify
71844 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71845 @@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
71846 flush_tlb_range(vma, start, end);
71847 }
71848
71849 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71850 +/* called while holding the mmap semaphor for writing except stack expansion */
71851 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71852 +{
71853 + unsigned long oldlimit, newlimit = 0UL;
71854 +
71855 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71856 + return;
71857 +
71858 + spin_lock(&mm->page_table_lock);
71859 + oldlimit = mm->context.user_cs_limit;
71860 + if ((prot & VM_EXEC) && oldlimit < end)
71861 + /* USER_CS limit moved up */
71862 + newlimit = end;
71863 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71864 + /* USER_CS limit moved down */
71865 + newlimit = start;
71866 +
71867 + if (newlimit) {
71868 + mm->context.user_cs_limit = newlimit;
71869 +
71870 +#ifdef CONFIG_SMP
71871 + wmb();
71872 + cpus_clear(mm->context.cpu_user_cs_mask);
71873 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71874 +#endif
71875 +
71876 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71877 + }
71878 + spin_unlock(&mm->page_table_lock);
71879 + if (newlimit == end) {
71880 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
71881 +
71882 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
71883 + if (is_vm_hugetlb_page(vma))
71884 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
71885 + else
71886 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
71887 + }
71888 +}
71889 +#endif
71890 +
71891 int
71892 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71893 unsigned long start, unsigned long end, unsigned long newflags)
71894 @@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71895 int error;
71896 int dirty_accountable = 0;
71897
71898 +#ifdef CONFIG_PAX_SEGMEXEC
71899 + struct vm_area_struct *vma_m = NULL;
71900 + unsigned long start_m, end_m;
71901 +
71902 + start_m = start + SEGMEXEC_TASK_SIZE;
71903 + end_m = end + SEGMEXEC_TASK_SIZE;
71904 +#endif
71905 +
71906 if (newflags == oldflags) {
71907 *pprev = vma;
71908 return 0;
71909 }
71910
71911 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
71912 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
71913 +
71914 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
71915 + return -ENOMEM;
71916 +
71917 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
71918 + return -ENOMEM;
71919 + }
71920 +
71921 /*
71922 * If we make a private mapping writable we increase our commit;
71923 * but (without finer accounting) cannot reduce our commit if we
71924 @@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71925 }
71926 }
71927
71928 +#ifdef CONFIG_PAX_SEGMEXEC
71929 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
71930 + if (start != vma->vm_start) {
71931 + error = split_vma(mm, vma, start, 1);
71932 + if (error)
71933 + goto fail;
71934 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
71935 + *pprev = (*pprev)->vm_next;
71936 + }
71937 +
71938 + if (end != vma->vm_end) {
71939 + error = split_vma(mm, vma, end, 0);
71940 + if (error)
71941 + goto fail;
71942 + }
71943 +
71944 + if (pax_find_mirror_vma(vma)) {
71945 + error = __do_munmap(mm, start_m, end_m - start_m);
71946 + if (error)
71947 + goto fail;
71948 + } else {
71949 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71950 + if (!vma_m) {
71951 + error = -ENOMEM;
71952 + goto fail;
71953 + }
71954 + vma->vm_flags = newflags;
71955 + error = pax_mirror_vma(vma_m, vma);
71956 + if (error) {
71957 + vma->vm_flags = oldflags;
71958 + goto fail;
71959 + }
71960 + }
71961 + }
71962 +#endif
71963 +
71964 /*
71965 * First try to merge with previous and/or next vma.
71966 */
71967 @@ -204,9 +307,21 @@ success:
71968 * vm_flags and vm_page_prot are protected by the mmap_sem
71969 * held in write mode.
71970 */
71971 +
71972 +#ifdef CONFIG_PAX_SEGMEXEC
71973 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
71974 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
71975 +#endif
71976 +
71977 vma->vm_flags = newflags;
71978 +
71979 +#ifdef CONFIG_PAX_MPROTECT
71980 + if (mm->binfmt && mm->binfmt->handle_mprotect)
71981 + mm->binfmt->handle_mprotect(vma, newflags);
71982 +#endif
71983 +
71984 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
71985 - vm_get_page_prot(newflags));
71986 + vm_get_page_prot(vma->vm_flags));
71987
71988 if (vma_wants_writenotify(vma)) {
71989 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
71990 @@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71991 end = start + len;
71992 if (end <= start)
71993 return -ENOMEM;
71994 +
71995 +#ifdef CONFIG_PAX_SEGMEXEC
71996 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71997 + if (end > SEGMEXEC_TASK_SIZE)
71998 + return -EINVAL;
71999 + } else
72000 +#endif
72001 +
72002 + if (end > TASK_SIZE)
72003 + return -EINVAL;
72004 +
72005 if (!arch_validate_prot(prot))
72006 return -EINVAL;
72007
72008 @@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72009 /*
72010 * Does the application expect PROT_READ to imply PROT_EXEC:
72011 */
72012 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72013 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72014 prot |= PROT_EXEC;
72015
72016 vm_flags = calc_vm_prot_bits(prot);
72017 @@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72018 if (start > vma->vm_start)
72019 prev = vma;
72020
72021 +#ifdef CONFIG_PAX_MPROTECT
72022 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72023 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
72024 +#endif
72025 +
72026 for (nstart = start ; ; ) {
72027 unsigned long newflags;
72028
72029 @@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72030
72031 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72032 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72033 + if (prot & (PROT_WRITE | PROT_EXEC))
72034 + gr_log_rwxmprotect(vma->vm_file);
72035 +
72036 + error = -EACCES;
72037 + goto out;
72038 + }
72039 +
72040 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72041 error = -EACCES;
72042 goto out;
72043 }
72044 @@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72045 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72046 if (error)
72047 goto out;
72048 +
72049 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72050 +
72051 nstart = tmp;
72052
72053 if (nstart < prev->vm_end)
72054 diff --git a/mm/mremap.c b/mm/mremap.c
72055 index db8d983..76506cb 100644
72056 --- a/mm/mremap.c
72057 +++ b/mm/mremap.c
72058 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72059 continue;
72060 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72061 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72062 +
72063 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72064 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72065 + pte = pte_exprotect(pte);
72066 +#endif
72067 +
72068 set_pte_at(mm, new_addr, new_pte, pte);
72069 }
72070
72071 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72072 if (is_vm_hugetlb_page(vma))
72073 goto Einval;
72074
72075 +#ifdef CONFIG_PAX_SEGMEXEC
72076 + if (pax_find_mirror_vma(vma))
72077 + goto Einval;
72078 +#endif
72079 +
72080 /* We can't remap across vm area boundaries */
72081 if (old_len > vma->vm_end - addr)
72082 goto Efault;
72083 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72084 unsigned long ret = -EINVAL;
72085 unsigned long charged = 0;
72086 unsigned long map_flags;
72087 + unsigned long pax_task_size = TASK_SIZE;
72088
72089 if (new_addr & ~PAGE_MASK)
72090 goto out;
72091
72092 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72093 +#ifdef CONFIG_PAX_SEGMEXEC
72094 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72095 + pax_task_size = SEGMEXEC_TASK_SIZE;
72096 +#endif
72097 +
72098 + pax_task_size -= PAGE_SIZE;
72099 +
72100 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72101 goto out;
72102
72103 /* Check if the location we're moving into overlaps the
72104 * old location at all, and fail if it does.
72105 */
72106 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72107 - goto out;
72108 -
72109 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72110 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72111 goto out;
72112
72113 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72114 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72115 struct vm_area_struct *vma;
72116 unsigned long ret = -EINVAL;
72117 unsigned long charged = 0;
72118 + unsigned long pax_task_size = TASK_SIZE;
72119
72120 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72121 goto out;
72122 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72123 if (!new_len)
72124 goto out;
72125
72126 +#ifdef CONFIG_PAX_SEGMEXEC
72127 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72128 + pax_task_size = SEGMEXEC_TASK_SIZE;
72129 +#endif
72130 +
72131 + pax_task_size -= PAGE_SIZE;
72132 +
72133 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72134 + old_len > pax_task_size || addr > pax_task_size-old_len)
72135 + goto out;
72136 +
72137 if (flags & MREMAP_FIXED) {
72138 if (flags & MREMAP_MAYMOVE)
72139 ret = mremap_to(addr, old_len, new_addr, new_len);
72140 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72141 addr + new_len);
72142 }
72143 ret = addr;
72144 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72145 goto out;
72146 }
72147 }
72148 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72149 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72150 if (ret)
72151 goto out;
72152 +
72153 + map_flags = vma->vm_flags;
72154 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72155 + if (!(ret & ~PAGE_MASK)) {
72156 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72157 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72158 + }
72159 }
72160 out:
72161 if (ret & ~PAGE_MASK)
72162 diff --git a/mm/nommu.c b/mm/nommu.c
72163 index bb8f4f0..40d3e02 100644
72164 --- a/mm/nommu.c
72165 +++ b/mm/nommu.c
72166 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72167 int sysctl_overcommit_ratio = 50; /* default is 50% */
72168 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72169 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72170 -int heap_stack_gap = 0;
72171
72172 atomic_long_t mmap_pages_allocated;
72173
72174 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72175 EXPORT_SYMBOL(find_vma);
72176
72177 /*
72178 - * find a VMA
72179 - * - we don't extend stack VMAs under NOMMU conditions
72180 - */
72181 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72182 -{
72183 - return find_vma(mm, addr);
72184 -}
72185 -
72186 -/*
72187 * expand a stack to a given address
72188 * - not supported under NOMMU conditions
72189 */
72190 @@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72191
72192 /* most fields are the same, copy all, and then fixup */
72193 *new = *vma;
72194 + INIT_LIST_HEAD(&new->anon_vma_chain);
72195 *region = *vma->vm_region;
72196 new->vm_region = region;
72197
72198 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72199 index 918330f..ae99ae1 100644
72200 --- a/mm/page_alloc.c
72201 +++ b/mm/page_alloc.c
72202 @@ -335,7 +335,7 @@ out:
72203 * This usage means that zero-order pages may not be compound.
72204 */
72205
72206 -static void free_compound_page(struct page *page)
72207 +void free_compound_page(struct page *page)
72208 {
72209 __free_pages_ok(page, compound_order(page));
72210 }
72211 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72212 int i;
72213 int bad = 0;
72214
72215 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72216 + unsigned long index = 1UL << order;
72217 +#endif
72218 +
72219 trace_mm_page_free(page, order);
72220 kmemcheck_free_shadow(page, order);
72221
72222 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72223 debug_check_no_obj_freed(page_address(page),
72224 PAGE_SIZE << order);
72225 }
72226 +
72227 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72228 + for (; index; --index)
72229 + sanitize_highpage(page + index - 1);
72230 +#endif
72231 +
72232 arch_free_page(page, order);
72233 kernel_map_pages(page, 1 << order, 0);
72234
72235 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72236 arch_alloc_page(page, order);
72237 kernel_map_pages(page, 1 << order, 1);
72238
72239 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
72240 if (gfp_flags & __GFP_ZERO)
72241 prep_zero_page(page, order, gfp_flags);
72242 +#endif
72243
72244 if (order && (gfp_flags & __GFP_COMP))
72245 prep_compound_page(page, order);
72246 @@ -3523,7 +3535,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72247 unsigned long pfn;
72248
72249 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72250 +#ifdef CONFIG_X86_32
72251 + /* boot failures in VMware 8 on 32bit vanilla since
72252 + this change */
72253 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72254 +#else
72255 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72256 +#endif
72257 return 1;
72258 }
72259 return 0;
72260 diff --git a/mm/percpu.c b/mm/percpu.c
72261 index bb4be74..a43ea85 100644
72262 --- a/mm/percpu.c
72263 +++ b/mm/percpu.c
72264 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
72265 static unsigned int pcpu_high_unit_cpu __read_mostly;
72266
72267 /* the address of the first chunk which starts with the kernel static area */
72268 -void *pcpu_base_addr __read_mostly;
72269 +void *pcpu_base_addr __read_only;
72270 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72271
72272 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72273 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
72274 index c20ff48..137702a 100644
72275 --- a/mm/process_vm_access.c
72276 +++ b/mm/process_vm_access.c
72277 @@ -13,6 +13,7 @@
72278 #include <linux/uio.h>
72279 #include <linux/sched.h>
72280 #include <linux/highmem.h>
72281 +#include <linux/security.h>
72282 #include <linux/ptrace.h>
72283 #include <linux/slab.h>
72284 #include <linux/syscalls.h>
72285 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72286 size_t iov_l_curr_offset = 0;
72287 ssize_t iov_len;
72288
72289 + return -ENOSYS; // PaX: until properly audited
72290 +
72291 /*
72292 * Work out how many pages of struct pages we're going to need
72293 * when eventually calling get_user_pages
72294 */
72295 for (i = 0; i < riovcnt; i++) {
72296 iov_len = rvec[i].iov_len;
72297 - if (iov_len > 0) {
72298 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
72299 - + iov_len)
72300 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
72301 - / PAGE_SIZE + 1;
72302 - nr_pages = max(nr_pages, nr_pages_iov);
72303 - }
72304 + if (iov_len <= 0)
72305 + continue;
72306 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
72307 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
72308 + nr_pages = max(nr_pages, nr_pages_iov);
72309 }
72310
72311 if (nr_pages == 0)
72312 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72313 goto free_proc_pages;
72314 }
72315
72316 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
72317 + rc = -EPERM;
72318 + goto put_task_struct;
72319 + }
72320 +
72321 mm = mm_access(task, PTRACE_MODE_ATTACH);
72322 if (!mm || IS_ERR(mm)) {
72323 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
72324 diff --git a/mm/rmap.c b/mm/rmap.c
72325 index 5b5ad58..0f77903 100644
72326 --- a/mm/rmap.c
72327 +++ b/mm/rmap.c
72328 @@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72329 struct anon_vma *anon_vma = vma->anon_vma;
72330 struct anon_vma_chain *avc;
72331
72332 +#ifdef CONFIG_PAX_SEGMEXEC
72333 + struct anon_vma_chain *avc_m = NULL;
72334 +#endif
72335 +
72336 might_sleep();
72337 if (unlikely(!anon_vma)) {
72338 struct mm_struct *mm = vma->vm_mm;
72339 @@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72340 if (!avc)
72341 goto out_enomem;
72342
72343 +#ifdef CONFIG_PAX_SEGMEXEC
72344 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72345 + if (!avc_m)
72346 + goto out_enomem_free_avc;
72347 +#endif
72348 +
72349 anon_vma = find_mergeable_anon_vma(vma);
72350 allocated = NULL;
72351 if (!anon_vma) {
72352 @@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72353 /* page_table_lock to protect against threads */
72354 spin_lock(&mm->page_table_lock);
72355 if (likely(!vma->anon_vma)) {
72356 +
72357 +#ifdef CONFIG_PAX_SEGMEXEC
72358 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72359 +
72360 + if (vma_m) {
72361 + BUG_ON(vma_m->anon_vma);
72362 + vma_m->anon_vma = anon_vma;
72363 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
72364 + avc_m = NULL;
72365 + }
72366 +#endif
72367 +
72368 vma->anon_vma = anon_vma;
72369 anon_vma_chain_link(vma, avc, anon_vma);
72370 allocated = NULL;
72371 @@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72372
72373 if (unlikely(allocated))
72374 put_anon_vma(allocated);
72375 +
72376 +#ifdef CONFIG_PAX_SEGMEXEC
72377 + if (unlikely(avc_m))
72378 + anon_vma_chain_free(avc_m);
72379 +#endif
72380 +
72381 if (unlikely(avc))
72382 anon_vma_chain_free(avc);
72383 }
72384 return 0;
72385
72386 out_enomem_free_avc:
72387 +
72388 +#ifdef CONFIG_PAX_SEGMEXEC
72389 + if (avc_m)
72390 + anon_vma_chain_free(avc_m);
72391 +#endif
72392 +
72393 anon_vma_chain_free(avc);
72394 out_enomem:
72395 return -ENOMEM;
72396 @@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
72397 * Attach the anon_vmas from src to dst.
72398 * Returns 0 on success, -ENOMEM on failure.
72399 */
72400 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72401 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72402 {
72403 struct anon_vma_chain *avc, *pavc;
72404 struct anon_vma *root = NULL;
72405 @@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
72406 * the corresponding VMA in the parent process is attached to.
72407 * Returns 0 on success, non-zero on failure.
72408 */
72409 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72410 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72411 {
72412 struct anon_vma_chain *avc;
72413 struct anon_vma *anon_vma;
72414 diff --git a/mm/shmem.c b/mm/shmem.c
72415 index f99ff3e..faea8b6 100644
72416 --- a/mm/shmem.c
72417 +++ b/mm/shmem.c
72418 @@ -31,7 +31,7 @@
72419 #include <linux/export.h>
72420 #include <linux/swap.h>
72421
72422 -static struct vfsmount *shm_mnt;
72423 +struct vfsmount *shm_mnt;
72424
72425 #ifdef CONFIG_SHMEM
72426 /*
72427 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72428 #define BOGO_DIRENT_SIZE 20
72429
72430 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72431 -#define SHORT_SYMLINK_LEN 128
72432 +#define SHORT_SYMLINK_LEN 64
72433
72434 struct shmem_xattr {
72435 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72436 @@ -2235,8 +2235,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72437 int err = -ENOMEM;
72438
72439 /* Round up to L1_CACHE_BYTES to resist false sharing */
72440 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72441 - L1_CACHE_BYTES), GFP_KERNEL);
72442 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72443 if (!sbinfo)
72444 return -ENOMEM;
72445
72446 diff --git a/mm/slab.c b/mm/slab.c
72447 index e901a36..ee8fe97 100644
72448 --- a/mm/slab.c
72449 +++ b/mm/slab.c
72450 @@ -153,7 +153,7 @@
72451
72452 /* Legal flag mask for kmem_cache_create(). */
72453 #if DEBUG
72454 -# define CREATE_MASK (SLAB_RED_ZONE | \
72455 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72456 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72457 SLAB_CACHE_DMA | \
72458 SLAB_STORE_USER | \
72459 @@ -161,7 +161,7 @@
72460 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72461 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72462 #else
72463 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72464 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72465 SLAB_CACHE_DMA | \
72466 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72467 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72468 @@ -290,7 +290,7 @@ struct kmem_list3 {
72469 * Need this for bootstrapping a per node allocator.
72470 */
72471 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72472 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72473 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72474 #define CACHE_CACHE 0
72475 #define SIZE_AC MAX_NUMNODES
72476 #define SIZE_L3 (2 * MAX_NUMNODES)
72477 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72478 if ((x)->max_freeable < i) \
72479 (x)->max_freeable = i; \
72480 } while (0)
72481 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72482 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72483 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72484 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72485 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72486 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72487 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72488 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72489 #else
72490 #define STATS_INC_ACTIVE(x) do { } while (0)
72491 #define STATS_DEC_ACTIVE(x) do { } while (0)
72492 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72493 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72494 */
72495 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72496 - const struct slab *slab, void *obj)
72497 + const struct slab *slab, const void *obj)
72498 {
72499 u32 offset = (obj - slab->s_mem);
72500 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72501 @@ -568,7 +568,7 @@ struct cache_names {
72502 static struct cache_names __initdata cache_names[] = {
72503 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72504 #include <linux/kmalloc_sizes.h>
72505 - {NULL,}
72506 + {NULL}
72507 #undef CACHE
72508 };
72509
72510 @@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
72511 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72512 sizes[INDEX_AC].cs_size,
72513 ARCH_KMALLOC_MINALIGN,
72514 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72515 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72516 NULL);
72517
72518 if (INDEX_AC != INDEX_L3) {
72519 @@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
72520 kmem_cache_create(names[INDEX_L3].name,
72521 sizes[INDEX_L3].cs_size,
72522 ARCH_KMALLOC_MINALIGN,
72523 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72524 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72525 NULL);
72526 }
72527
72528 @@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
72529 sizes->cs_cachep = kmem_cache_create(names->name,
72530 sizes->cs_size,
72531 ARCH_KMALLOC_MINALIGN,
72532 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72533 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72534 NULL);
72535 }
72536 #ifdef CONFIG_ZONE_DMA
72537 @@ -4390,10 +4390,10 @@ static int s_show(struct seq_file *m, void *p)
72538 }
72539 /* cpu stats */
72540 {
72541 - unsigned long allochit = atomic_read(&cachep->allochit);
72542 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72543 - unsigned long freehit = atomic_read(&cachep->freehit);
72544 - unsigned long freemiss = atomic_read(&cachep->freemiss);
72545 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72546 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72547 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72548 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72549
72550 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72551 allochit, allocmiss, freehit, freemiss);
72552 @@ -4652,13 +4652,62 @@ static int __init slab_proc_init(void)
72553 {
72554 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72555 #ifdef CONFIG_DEBUG_SLAB_LEAK
72556 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72557 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72558 #endif
72559 return 0;
72560 }
72561 module_init(slab_proc_init);
72562 #endif
72563
72564 +void check_object_size(const void *ptr, unsigned long n, bool to)
72565 +{
72566 +
72567 +#ifdef CONFIG_PAX_USERCOPY
72568 + struct page *page;
72569 + struct kmem_cache *cachep = NULL;
72570 + struct slab *slabp;
72571 + unsigned int objnr;
72572 + unsigned long offset;
72573 + const char *type;
72574 +
72575 + if (!n)
72576 + return;
72577 +
72578 + type = "<null>";
72579 + if (ZERO_OR_NULL_PTR(ptr))
72580 + goto report;
72581 +
72582 + if (!virt_addr_valid(ptr))
72583 + return;
72584 +
72585 + page = virt_to_head_page(ptr);
72586 +
72587 + type = "<process stack>";
72588 + if (!PageSlab(page)) {
72589 + if (object_is_on_stack(ptr, n) == -1)
72590 + goto report;
72591 + return;
72592 + }
72593 +
72594 + cachep = page_get_cache(page);
72595 + type = cachep->name;
72596 + if (!(cachep->flags & SLAB_USERCOPY))
72597 + goto report;
72598 +
72599 + slabp = page_get_slab(page);
72600 + objnr = obj_to_index(cachep, slabp, ptr);
72601 + BUG_ON(objnr >= cachep->num);
72602 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72603 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72604 + return;
72605 +
72606 +report:
72607 + pax_report_usercopy(ptr, n, to, type);
72608 +#endif
72609 +
72610 +}
72611 +EXPORT_SYMBOL(check_object_size);
72612 +
72613 /**
72614 * ksize - get the actual amount of memory allocated for a given object
72615 * @objp: Pointer to the object
72616 diff --git a/mm/slob.c b/mm/slob.c
72617 index 8105be4..e045f96 100644
72618 --- a/mm/slob.c
72619 +++ b/mm/slob.c
72620 @@ -29,7 +29,7 @@
72621 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72622 * alloc_pages() directly, allocating compound pages so the page order
72623 * does not have to be separately tracked, and also stores the exact
72624 - * allocation size in page->private so that it can be used to accurately
72625 + * allocation size in slob_page->size so that it can be used to accurately
72626 * provide ksize(). These objects are detected in kfree() because slob_page()
72627 * is false for them.
72628 *
72629 @@ -58,6 +58,7 @@
72630 */
72631
72632 #include <linux/kernel.h>
72633 +#include <linux/sched.h>
72634 #include <linux/slab.h>
72635 #include <linux/mm.h>
72636 #include <linux/swap.h> /* struct reclaim_state */
72637 @@ -102,7 +103,8 @@ struct slob_page {
72638 unsigned long flags; /* mandatory */
72639 atomic_t _count; /* mandatory */
72640 slobidx_t units; /* free units left in page */
72641 - unsigned long pad[2];
72642 + unsigned long pad[1];
72643 + unsigned long size; /* size when >=PAGE_SIZE */
72644 slob_t *free; /* first free slob_t in page */
72645 struct list_head list; /* linked list of free pages */
72646 };
72647 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72648 */
72649 static inline int is_slob_page(struct slob_page *sp)
72650 {
72651 - return PageSlab((struct page *)sp);
72652 + return PageSlab((struct page *)sp) && !sp->size;
72653 }
72654
72655 static inline void set_slob_page(struct slob_page *sp)
72656 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72657
72658 static inline struct slob_page *slob_page(const void *addr)
72659 {
72660 - return (struct slob_page *)virt_to_page(addr);
72661 + return (struct slob_page *)virt_to_head_page(addr);
72662 }
72663
72664 /*
72665 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72666 /*
72667 * Return the size of a slob block.
72668 */
72669 -static slobidx_t slob_units(slob_t *s)
72670 +static slobidx_t slob_units(const slob_t *s)
72671 {
72672 if (s->units > 0)
72673 return s->units;
72674 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72675 /*
72676 * Return the next free slob block pointer after this one.
72677 */
72678 -static slob_t *slob_next(slob_t *s)
72679 +static slob_t *slob_next(const slob_t *s)
72680 {
72681 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72682 slobidx_t next;
72683 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72684 /*
72685 * Returns true if s is the last free block in its page.
72686 */
72687 -static int slob_last(slob_t *s)
72688 +static int slob_last(const slob_t *s)
72689 {
72690 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72691 }
72692 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72693 if (!page)
72694 return NULL;
72695
72696 + set_slob_page(page);
72697 return page_address(page);
72698 }
72699
72700 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72701 if (!b)
72702 return NULL;
72703 sp = slob_page(b);
72704 - set_slob_page(sp);
72705
72706 spin_lock_irqsave(&slob_lock, flags);
72707 sp->units = SLOB_UNITS(PAGE_SIZE);
72708 sp->free = b;
72709 + sp->size = 0;
72710 INIT_LIST_HEAD(&sp->list);
72711 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72712 set_slob_page_free(sp, slob_list);
72713 @@ -476,10 +479,9 @@ out:
72714 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72715 */
72716
72717 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72718 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72719 {
72720 - unsigned int *m;
72721 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72722 + slob_t *m;
72723 void *ret;
72724
72725 gfp &= gfp_allowed_mask;
72726 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72727
72728 if (!m)
72729 return NULL;
72730 - *m = size;
72731 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72732 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72733 + m[0].units = size;
72734 + m[1].units = align;
72735 ret = (void *)m + align;
72736
72737 trace_kmalloc_node(_RET_IP_, ret,
72738 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72739 gfp |= __GFP_COMP;
72740 ret = slob_new_pages(gfp, order, node);
72741 if (ret) {
72742 - struct page *page;
72743 - page = virt_to_page(ret);
72744 - page->private = size;
72745 + struct slob_page *sp;
72746 + sp = slob_page(ret);
72747 + sp->size = size;
72748 }
72749
72750 trace_kmalloc_node(_RET_IP_, ret,
72751 size, PAGE_SIZE << order, gfp, node);
72752 }
72753
72754 - kmemleak_alloc(ret, size, 1, gfp);
72755 + return ret;
72756 +}
72757 +
72758 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72759 +{
72760 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72761 + void *ret = __kmalloc_node_align(size, gfp, node, align);
72762 +
72763 + if (!ZERO_OR_NULL_PTR(ret))
72764 + kmemleak_alloc(ret, size, 1, gfp);
72765 return ret;
72766 }
72767 EXPORT_SYMBOL(__kmalloc_node);
72768 @@ -533,13 +547,92 @@ void kfree(const void *block)
72769 sp = slob_page(block);
72770 if (is_slob_page(sp)) {
72771 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72772 - unsigned int *m = (unsigned int *)(block - align);
72773 - slob_free(m, *m + align);
72774 - } else
72775 + slob_t *m = (slob_t *)(block - align);
72776 + slob_free(m, m[0].units + align);
72777 + } else {
72778 + clear_slob_page(sp);
72779 + free_slob_page(sp);
72780 + sp->size = 0;
72781 put_page(&sp->page);
72782 + }
72783 }
72784 EXPORT_SYMBOL(kfree);
72785
72786 +void check_object_size(const void *ptr, unsigned long n, bool to)
72787 +{
72788 +
72789 +#ifdef CONFIG_PAX_USERCOPY
72790 + struct slob_page *sp;
72791 + const slob_t *free;
72792 + const void *base;
72793 + unsigned long flags;
72794 + const char *type;
72795 +
72796 + if (!n)
72797 + return;
72798 +
72799 + type = "<null>";
72800 + if (ZERO_OR_NULL_PTR(ptr))
72801 + goto report;
72802 +
72803 + if (!virt_addr_valid(ptr))
72804 + return;
72805 +
72806 + type = "<process stack>";
72807 + sp = slob_page(ptr);
72808 + if (!PageSlab((struct page *)sp)) {
72809 + if (object_is_on_stack(ptr, n) == -1)
72810 + goto report;
72811 + return;
72812 + }
72813 +
72814 + type = "<slob>";
72815 + if (sp->size) {
72816 + base = page_address(&sp->page);
72817 + if (base <= ptr && n <= sp->size - (ptr - base))
72818 + return;
72819 + goto report;
72820 + }
72821 +
72822 + /* some tricky double walking to find the chunk */
72823 + spin_lock_irqsave(&slob_lock, flags);
72824 + base = (void *)((unsigned long)ptr & PAGE_MASK);
72825 + free = sp->free;
72826 +
72827 + while (!slob_last(free) && (void *)free <= ptr) {
72828 + base = free + slob_units(free);
72829 + free = slob_next(free);
72830 + }
72831 +
72832 + while (base < (void *)free) {
72833 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72834 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
72835 + int offset;
72836 +
72837 + if (ptr < base + align)
72838 + break;
72839 +
72840 + offset = ptr - base - align;
72841 + if (offset >= m) {
72842 + base += size;
72843 + continue;
72844 + }
72845 +
72846 + if (n > m - offset)
72847 + break;
72848 +
72849 + spin_unlock_irqrestore(&slob_lock, flags);
72850 + return;
72851 + }
72852 +
72853 + spin_unlock_irqrestore(&slob_lock, flags);
72854 +report:
72855 + pax_report_usercopy(ptr, n, to, type);
72856 +#endif
72857 +
72858 +}
72859 +EXPORT_SYMBOL(check_object_size);
72860 +
72861 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72862 size_t ksize(const void *block)
72863 {
72864 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
72865 sp = slob_page(block);
72866 if (is_slob_page(sp)) {
72867 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72868 - unsigned int *m = (unsigned int *)(block - align);
72869 - return SLOB_UNITS(*m) * SLOB_UNIT;
72870 + slob_t *m = (slob_t *)(block - align);
72871 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72872 } else
72873 - return sp->page.private;
72874 + return sp->size;
72875 }
72876 EXPORT_SYMBOL(ksize);
72877
72878 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72879 {
72880 struct kmem_cache *c;
72881
72882 +#ifdef CONFIG_PAX_USERCOPY
72883 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
72884 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
72885 +#else
72886 c = slob_alloc(sizeof(struct kmem_cache),
72887 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
72888 +#endif
72889
72890 if (c) {
72891 c->name = name;
72892 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
72893
72894 lockdep_trace_alloc(flags);
72895
72896 +#ifdef CONFIG_PAX_USERCOPY
72897 + b = __kmalloc_node_align(c->size, flags, node, c->align);
72898 +#else
72899 if (c->size < PAGE_SIZE) {
72900 b = slob_alloc(c->size, flags, c->align, node);
72901 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72902 SLOB_UNITS(c->size) * SLOB_UNIT,
72903 flags, node);
72904 } else {
72905 + struct slob_page *sp;
72906 +
72907 b = slob_new_pages(flags, get_order(c->size), node);
72908 + sp = slob_page(b);
72909 + sp->size = c->size;
72910 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72911 PAGE_SIZE << get_order(c->size),
72912 flags, node);
72913 }
72914 +#endif
72915
72916 if (c->ctor)
72917 c->ctor(b);
72918 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
72919
72920 static void __kmem_cache_free(void *b, int size)
72921 {
72922 - if (size < PAGE_SIZE)
72923 + struct slob_page *sp = slob_page(b);
72924 +
72925 + if (is_slob_page(sp))
72926 slob_free(b, size);
72927 - else
72928 + else {
72929 + clear_slob_page(sp);
72930 + free_slob_page(sp);
72931 + sp->size = 0;
72932 slob_free_pages(b, get_order(size));
72933 + }
72934 }
72935
72936 static void kmem_rcu_free(struct rcu_head *head)
72937 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
72938
72939 void kmem_cache_free(struct kmem_cache *c, void *b)
72940 {
72941 + int size = c->size;
72942 +
72943 +#ifdef CONFIG_PAX_USERCOPY
72944 + if (size + c->align < PAGE_SIZE) {
72945 + size += c->align;
72946 + b -= c->align;
72947 + }
72948 +#endif
72949 +
72950 kmemleak_free_recursive(b, c->flags);
72951 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
72952 struct slob_rcu *slob_rcu;
72953 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
72954 - slob_rcu->size = c->size;
72955 + slob_rcu = b + (size - sizeof(struct slob_rcu));
72956 + slob_rcu->size = size;
72957 call_rcu(&slob_rcu->head, kmem_rcu_free);
72958 } else {
72959 - __kmem_cache_free(b, c->size);
72960 + __kmem_cache_free(b, size);
72961 }
72962
72963 +#ifdef CONFIG_PAX_USERCOPY
72964 + trace_kfree(_RET_IP_, b);
72965 +#else
72966 trace_kmem_cache_free(_RET_IP_, b);
72967 +#endif
72968 +
72969 }
72970 EXPORT_SYMBOL(kmem_cache_free);
72971
72972 diff --git a/mm/slub.c b/mm/slub.c
72973 index 71de9b5..dd263c5 100644
72974 --- a/mm/slub.c
72975 +++ b/mm/slub.c
72976 @@ -209,7 +209,7 @@ struct track {
72977
72978 enum track_item { TRACK_ALLOC, TRACK_FREE };
72979
72980 -#ifdef CONFIG_SYSFS
72981 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72982 static int sysfs_slab_add(struct kmem_cache *);
72983 static int sysfs_slab_alias(struct kmem_cache *, const char *);
72984 static void sysfs_slab_remove(struct kmem_cache *);
72985 @@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
72986 if (!t->addr)
72987 return;
72988
72989 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
72990 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
72991 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
72992 #ifdef CONFIG_STACKTRACE
72993 {
72994 @@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
72995
72996 page = virt_to_head_page(x);
72997
72998 + BUG_ON(!PageSlab(page));
72999 +
73000 slab_free(s, page, x, _RET_IP_);
73001
73002 trace_kmem_cache_free(_RET_IP_, x);
73003 @@ -2636,7 +2638,7 @@ static int slub_min_objects;
73004 * Merge control. If this is set then no merging of slab caches will occur.
73005 * (Could be removed. This was introduced to pacify the merge skeptics.)
73006 */
73007 -static int slub_nomerge;
73008 +static int slub_nomerge = 1;
73009
73010 /*
73011 * Calculate the order of allocation given an slab object size.
73012 @@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73013 else
73014 s->cpu_partial = 30;
73015
73016 - s->refcount = 1;
73017 + atomic_set(&s->refcount, 1);
73018 #ifdef CONFIG_NUMA
73019 s->remote_node_defrag_ratio = 1000;
73020 #endif
73021 @@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73022 void kmem_cache_destroy(struct kmem_cache *s)
73023 {
73024 down_write(&slub_lock);
73025 - s->refcount--;
73026 - if (!s->refcount) {
73027 + if (atomic_dec_and_test(&s->refcount)) {
73028 list_del(&s->list);
73029 up_write(&slub_lock);
73030 if (kmem_cache_close(s)) {
73031 @@ -3405,6 +3406,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73032 EXPORT_SYMBOL(__kmalloc_node);
73033 #endif
73034
73035 +void check_object_size(const void *ptr, unsigned long n, bool to)
73036 +{
73037 +
73038 +#ifdef CONFIG_PAX_USERCOPY
73039 + struct page *page;
73040 + struct kmem_cache *s = NULL;
73041 + unsigned long offset;
73042 + const char *type;
73043 +
73044 + if (!n)
73045 + return;
73046 +
73047 + type = "<null>";
73048 + if (ZERO_OR_NULL_PTR(ptr))
73049 + goto report;
73050 +
73051 + if (!virt_addr_valid(ptr))
73052 + return;
73053 +
73054 + page = virt_to_head_page(ptr);
73055 +
73056 + type = "<process stack>";
73057 + if (!PageSlab(page)) {
73058 + if (object_is_on_stack(ptr, n) == -1)
73059 + goto report;
73060 + return;
73061 + }
73062 +
73063 + s = page->slab;
73064 + type = s->name;
73065 + if (!(s->flags & SLAB_USERCOPY))
73066 + goto report;
73067 +
73068 + offset = (ptr - page_address(page)) % s->size;
73069 + if (offset <= s->objsize && n <= s->objsize - offset)
73070 + return;
73071 +
73072 +report:
73073 + pax_report_usercopy(ptr, n, to, type);
73074 +#endif
73075 +
73076 +}
73077 +EXPORT_SYMBOL(check_object_size);
73078 +
73079 size_t ksize(const void *object)
73080 {
73081 struct page *page;
73082 @@ -3679,7 +3724,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73083 int node;
73084
73085 list_add(&s->list, &slab_caches);
73086 - s->refcount = -1;
73087 + atomic_set(&s->refcount, -1);
73088
73089 for_each_node_state(node, N_NORMAL_MEMORY) {
73090 struct kmem_cache_node *n = get_node(s, node);
73091 @@ -3799,17 +3844,17 @@ void __init kmem_cache_init(void)
73092
73093 /* Caches that are not of the two-to-the-power-of size */
73094 if (KMALLOC_MIN_SIZE <= 32) {
73095 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73096 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73097 caches++;
73098 }
73099
73100 if (KMALLOC_MIN_SIZE <= 64) {
73101 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73102 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73103 caches++;
73104 }
73105
73106 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73107 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73108 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73109 caches++;
73110 }
73111
73112 @@ -3877,7 +3922,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73113 /*
73114 * We may have set a slab to be unmergeable during bootstrap.
73115 */
73116 - if (s->refcount < 0)
73117 + if (atomic_read(&s->refcount) < 0)
73118 return 1;
73119
73120 return 0;
73121 @@ -3936,7 +3981,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73122 down_write(&slub_lock);
73123 s = find_mergeable(size, align, flags, name, ctor);
73124 if (s) {
73125 - s->refcount++;
73126 + atomic_inc(&s->refcount);
73127 /*
73128 * Adjust the object sizes so that we clear
73129 * the complete object on kzalloc.
73130 @@ -3945,7 +3990,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73131 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73132
73133 if (sysfs_slab_alias(s, name)) {
73134 - s->refcount--;
73135 + atomic_dec(&s->refcount);
73136 goto err;
73137 }
73138 up_write(&slub_lock);
73139 @@ -4074,7 +4119,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73140 }
73141 #endif
73142
73143 -#ifdef CONFIG_SYSFS
73144 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73145 static int count_inuse(struct page *page)
73146 {
73147 return page->inuse;
73148 @@ -4461,12 +4506,12 @@ static void resiliency_test(void)
73149 validate_slab_cache(kmalloc_caches[9]);
73150 }
73151 #else
73152 -#ifdef CONFIG_SYSFS
73153 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73154 static void resiliency_test(void) {};
73155 #endif
73156 #endif
73157
73158 -#ifdef CONFIG_SYSFS
73159 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73160 enum slab_stat_type {
73161 SL_ALL, /* All slabs */
73162 SL_PARTIAL, /* Only partially allocated slabs */
73163 @@ -4709,7 +4754,7 @@ SLAB_ATTR_RO(ctor);
73164
73165 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73166 {
73167 - return sprintf(buf, "%d\n", s->refcount - 1);
73168 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73169 }
73170 SLAB_ATTR_RO(aliases);
73171
73172 @@ -5280,6 +5325,7 @@ static char *create_unique_id(struct kmem_cache *s)
73173 return name;
73174 }
73175
73176 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73177 static int sysfs_slab_add(struct kmem_cache *s)
73178 {
73179 int err;
73180 @@ -5342,6 +5388,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73181 kobject_del(&s->kobj);
73182 kobject_put(&s->kobj);
73183 }
73184 +#endif
73185
73186 /*
73187 * Need to buffer aliases during bootup until sysfs becomes
73188 @@ -5355,6 +5402,7 @@ struct saved_alias {
73189
73190 static struct saved_alias *alias_list;
73191
73192 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73193 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73194 {
73195 struct saved_alias *al;
73196 @@ -5377,6 +5425,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73197 alias_list = al;
73198 return 0;
73199 }
73200 +#endif
73201
73202 static int __init slab_sysfs_init(void)
73203 {
73204 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
73205 index 1b7e22a..3fcd4f3 100644
73206 --- a/mm/sparse-vmemmap.c
73207 +++ b/mm/sparse-vmemmap.c
73208 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
73209 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73210 if (!p)
73211 return NULL;
73212 - pud_populate(&init_mm, pud, p);
73213 + pud_populate_kernel(&init_mm, pud, p);
73214 }
73215 return pud;
73216 }
73217 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
73218 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73219 if (!p)
73220 return NULL;
73221 - pgd_populate(&init_mm, pgd, p);
73222 + pgd_populate_kernel(&init_mm, pgd, p);
73223 }
73224 return pgd;
73225 }
73226 diff --git a/mm/swap.c b/mm/swap.c
73227 index 5c13f13..f1cfc13 100644
73228 --- a/mm/swap.c
73229 +++ b/mm/swap.c
73230 @@ -30,6 +30,7 @@
73231 #include <linux/backing-dev.h>
73232 #include <linux/memcontrol.h>
73233 #include <linux/gfp.h>
73234 +#include <linux/hugetlb.h>
73235
73236 #include "internal.h"
73237
73238 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
73239
73240 __page_cache_release(page);
73241 dtor = get_compound_page_dtor(page);
73242 + if (!PageHuge(page))
73243 + BUG_ON(dtor != free_compound_page);
73244 (*dtor)(page);
73245 }
73246
73247 diff --git a/mm/swapfile.c b/mm/swapfile.c
73248 index fafc26d..1b7493e 100644
73249 --- a/mm/swapfile.c
73250 +++ b/mm/swapfile.c
73251 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73252
73253 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73254 /* Activity counter to indicate that a swapon or swapoff has occurred */
73255 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
73256 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73257
73258 static inline unsigned char swap_count(unsigned char ent)
73259 {
73260 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73261 }
73262 filp_close(swap_file, NULL);
73263 err = 0;
73264 - atomic_inc(&proc_poll_event);
73265 + atomic_inc_unchecked(&proc_poll_event);
73266 wake_up_interruptible(&proc_poll_wait);
73267
73268 out_dput:
73269 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73270
73271 poll_wait(file, &proc_poll_wait, wait);
73272
73273 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
73274 - seq->poll_event = atomic_read(&proc_poll_event);
73275 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73276 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73277 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73278 }
73279
73280 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73281 return ret;
73282
73283 seq = file->private_data;
73284 - seq->poll_event = atomic_read(&proc_poll_event);
73285 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73286 return 0;
73287 }
73288
73289 @@ -2127,7 +2127,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73290 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73291
73292 mutex_unlock(&swapon_mutex);
73293 - atomic_inc(&proc_poll_event);
73294 + atomic_inc_unchecked(&proc_poll_event);
73295 wake_up_interruptible(&proc_poll_wait);
73296
73297 if (S_ISREG(inode->i_mode))
73298 diff --git a/mm/util.c b/mm/util.c
73299 index ae962b3..0bba886 100644
73300 --- a/mm/util.c
73301 +++ b/mm/util.c
73302 @@ -284,6 +284,12 @@ done:
73303 void arch_pick_mmap_layout(struct mm_struct *mm)
73304 {
73305 mm->mmap_base = TASK_UNMAPPED_BASE;
73306 +
73307 +#ifdef CONFIG_PAX_RANDMMAP
73308 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73309 + mm->mmap_base += mm->delta_mmap;
73310 +#endif
73311 +
73312 mm->get_unmapped_area = arch_get_unmapped_area;
73313 mm->unmap_area = arch_unmap_area;
73314 }
73315 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73316 index 1196c77..2e608e8 100644
73317 --- a/mm/vmalloc.c
73318 +++ b/mm/vmalloc.c
73319 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73320
73321 pte = pte_offset_kernel(pmd, addr);
73322 do {
73323 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73324 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73325 +
73326 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73327 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73328 + BUG_ON(!pte_exec(*pte));
73329 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73330 + continue;
73331 + }
73332 +#endif
73333 +
73334 + {
73335 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73336 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73337 + }
73338 } while (pte++, addr += PAGE_SIZE, addr != end);
73339 }
73340
73341 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73342 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73343 {
73344 pte_t *pte;
73345 + int ret = -ENOMEM;
73346
73347 /*
73348 * nr is a running index into the array which helps higher level
73349 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73350 pte = pte_alloc_kernel(pmd, addr);
73351 if (!pte)
73352 return -ENOMEM;
73353 +
73354 + pax_open_kernel();
73355 do {
73356 struct page *page = pages[*nr];
73357
73358 - if (WARN_ON(!pte_none(*pte)))
73359 - return -EBUSY;
73360 - if (WARN_ON(!page))
73361 - return -ENOMEM;
73362 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73363 + if (pgprot_val(prot) & _PAGE_NX)
73364 +#endif
73365 +
73366 + if (WARN_ON(!pte_none(*pte))) {
73367 + ret = -EBUSY;
73368 + goto out;
73369 + }
73370 + if (WARN_ON(!page)) {
73371 + ret = -ENOMEM;
73372 + goto out;
73373 + }
73374 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73375 (*nr)++;
73376 } while (pte++, addr += PAGE_SIZE, addr != end);
73377 - return 0;
73378 + ret = 0;
73379 +out:
73380 + pax_close_kernel();
73381 + return ret;
73382 }
73383
73384 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73385 @@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73386 pmd_t *pmd;
73387 unsigned long next;
73388
73389 - pmd = pmd_alloc(&init_mm, pud, addr);
73390 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
73391 if (!pmd)
73392 return -ENOMEM;
73393 do {
73394 @@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
73395 pud_t *pud;
73396 unsigned long next;
73397
73398 - pud = pud_alloc(&init_mm, pgd, addr);
73399 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
73400 if (!pud)
73401 return -ENOMEM;
73402 do {
73403 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
73404 * and fall back on vmalloc() if that fails. Others
73405 * just put it in the vmalloc space.
73406 */
73407 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73408 +#ifdef CONFIG_MODULES
73409 +#ifdef MODULES_VADDR
73410 unsigned long addr = (unsigned long)x;
73411 if (addr >= MODULES_VADDR && addr < MODULES_END)
73412 return 1;
73413 #endif
73414 +
73415 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73416 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73417 + return 1;
73418 +#endif
73419 +
73420 +#endif
73421 +
73422 return is_vmalloc_addr(x);
73423 }
73424
73425 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73426
73427 if (!pgd_none(*pgd)) {
73428 pud_t *pud = pud_offset(pgd, addr);
73429 +#ifdef CONFIG_X86
73430 + if (!pud_large(*pud))
73431 +#endif
73432 if (!pud_none(*pud)) {
73433 pmd_t *pmd = pmd_offset(pud, addr);
73434 +#ifdef CONFIG_X86
73435 + if (!pmd_large(*pmd))
73436 +#endif
73437 if (!pmd_none(*pmd)) {
73438 pte_t *ptep, pte;
73439
73440 @@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
73441 static struct vmap_area *alloc_vmap_area(unsigned long size,
73442 unsigned long align,
73443 unsigned long vstart, unsigned long vend,
73444 + int node, gfp_t gfp_mask) __size_overflow(1);
73445 +static struct vmap_area *alloc_vmap_area(unsigned long size,
73446 + unsigned long align,
73447 + unsigned long vstart, unsigned long vend,
73448 int node, gfp_t gfp_mask)
73449 {
73450 struct vmap_area *va;
73451 @@ -1320,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73452 struct vm_struct *area;
73453
73454 BUG_ON(in_interrupt());
73455 +
73456 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73457 + if (flags & VM_KERNEXEC) {
73458 + if (start != VMALLOC_START || end != VMALLOC_END)
73459 + return NULL;
73460 + start = (unsigned long)MODULES_EXEC_VADDR;
73461 + end = (unsigned long)MODULES_EXEC_END;
73462 + }
73463 +#endif
73464 +
73465 if (flags & VM_IOREMAP) {
73466 int bit = fls(size);
73467
73468 @@ -1552,6 +1606,11 @@ void *vmap(struct page **pages, unsigned int count,
73469 if (count > totalram_pages)
73470 return NULL;
73471
73472 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73473 + if (!(pgprot_val(prot) & _PAGE_NX))
73474 + flags |= VM_KERNEXEC;
73475 +#endif
73476 +
73477 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73478 __builtin_return_address(0));
73479 if (!area)
73480 @@ -1653,6 +1712,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73481 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73482 goto fail;
73483
73484 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73485 + if (!(pgprot_val(prot) & _PAGE_NX))
73486 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73487 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73488 + else
73489 +#endif
73490 +
73491 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73492 start, end, node, gfp_mask, caller);
73493 if (!area)
73494 @@ -1826,10 +1892,9 @@ EXPORT_SYMBOL(vzalloc_node);
73495 * For tight control over page level allocator and protection flags
73496 * use __vmalloc() instead.
73497 */
73498 -
73499 void *vmalloc_exec(unsigned long size)
73500 {
73501 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73502 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73503 -1, __builtin_return_address(0));
73504 }
73505
73506 @@ -2124,6 +2189,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73507 unsigned long uaddr = vma->vm_start;
73508 unsigned long usize = vma->vm_end - vma->vm_start;
73509
73510 + BUG_ON(vma->vm_mirror);
73511 +
73512 if ((PAGE_SIZE-1) & (unsigned long)addr)
73513 return -EINVAL;
73514
73515 @@ -2376,8 +2443,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
73516 return NULL;
73517 }
73518
73519 - vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
73520 - vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
73521 + vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
73522 + vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
73523 if (!vas || !vms)
73524 goto err_free2;
73525
73526 diff --git a/mm/vmstat.c b/mm/vmstat.c
73527 index 7db1b9b..e9f6b07 100644
73528 --- a/mm/vmstat.c
73529 +++ b/mm/vmstat.c
73530 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73531 *
73532 * vm_stat contains the global counters
73533 */
73534 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73535 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73536 EXPORT_SYMBOL(vm_stat);
73537
73538 #ifdef CONFIG_SMP
73539 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73540 v = p->vm_stat_diff[i];
73541 p->vm_stat_diff[i] = 0;
73542 local_irq_restore(flags);
73543 - atomic_long_add(v, &zone->vm_stat[i]);
73544 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73545 global_diff[i] += v;
73546 #ifdef CONFIG_NUMA
73547 /* 3 seconds idle till flush */
73548 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73549
73550 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73551 if (global_diff[i])
73552 - atomic_long_add(global_diff[i], &vm_stat[i]);
73553 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73554 }
73555
73556 #endif
73557 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73558 start_cpu_timer(cpu);
73559 #endif
73560 #ifdef CONFIG_PROC_FS
73561 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73562 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73563 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73564 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73565 + {
73566 + mode_t gr_mode = S_IRUGO;
73567 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73568 + gr_mode = S_IRUSR;
73569 +#endif
73570 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73571 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73572 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73573 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73574 +#else
73575 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73576 +#endif
73577 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73578 + }
73579 #endif
73580 return 0;
73581 }
73582 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73583 index efea35b..9c8dd0b 100644
73584 --- a/net/8021q/vlan.c
73585 +++ b/net/8021q/vlan.c
73586 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73587 err = -EPERM;
73588 if (!capable(CAP_NET_ADMIN))
73589 break;
73590 - if ((args.u.name_type >= 0) &&
73591 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73592 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73593 struct vlan_net *vn;
73594
73595 vn = net_generic(net, vlan_net_id);
73596 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73597 index fccae26..e7ece2f 100644
73598 --- a/net/9p/trans_fd.c
73599 +++ b/net/9p/trans_fd.c
73600 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73601 oldfs = get_fs();
73602 set_fs(get_ds());
73603 /* The cast to a user pointer is valid due to the set_fs() */
73604 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73605 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73606 set_fs(oldfs);
73607
73608 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73609 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73610 index 876fbe8..8bbea9f 100644
73611 --- a/net/atm/atm_misc.c
73612 +++ b/net/atm/atm_misc.c
73613 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73614 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73615 return 1;
73616 atm_return(vcc, truesize);
73617 - atomic_inc(&vcc->stats->rx_drop);
73618 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73619 return 0;
73620 }
73621 EXPORT_SYMBOL(atm_charge);
73622 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73623 }
73624 }
73625 atm_return(vcc, guess);
73626 - atomic_inc(&vcc->stats->rx_drop);
73627 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73628 return NULL;
73629 }
73630 EXPORT_SYMBOL(atm_alloc_charge);
73631 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73632
73633 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73634 {
73635 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73636 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73637 __SONET_ITEMS
73638 #undef __HANDLE_ITEM
73639 }
73640 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73641
73642 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73643 {
73644 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73645 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73646 __SONET_ITEMS
73647 #undef __HANDLE_ITEM
73648 }
73649 diff --git a/net/atm/lec.h b/net/atm/lec.h
73650 index dfc0719..47c5322 100644
73651 --- a/net/atm/lec.h
73652 +++ b/net/atm/lec.h
73653 @@ -48,7 +48,7 @@ struct lane2_ops {
73654 const u8 *tlvs, u32 sizeoftlvs);
73655 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73656 const u8 *tlvs, u32 sizeoftlvs);
73657 -};
73658 +} __no_const;
73659
73660 /*
73661 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73662 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73663 index 0919a88..a23d54e 100644
73664 --- a/net/atm/mpc.h
73665 +++ b/net/atm/mpc.h
73666 @@ -33,7 +33,7 @@ struct mpoa_client {
73667 struct mpc_parameters parameters; /* parameters for this client */
73668
73669 const struct net_device_ops *old_ops;
73670 - struct net_device_ops new_ops;
73671 + net_device_ops_no_const new_ops;
73672 };
73673
73674
73675 diff --git a/net/atm/proc.c b/net/atm/proc.c
73676 index 0d020de..011c7bb 100644
73677 --- a/net/atm/proc.c
73678 +++ b/net/atm/proc.c
73679 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73680 const struct k_atm_aal_stats *stats)
73681 {
73682 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73683 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73684 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73685 - atomic_read(&stats->rx_drop));
73686 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73687 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73688 + atomic_read_unchecked(&stats->rx_drop));
73689 }
73690
73691 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73692 diff --git a/net/atm/resources.c b/net/atm/resources.c
73693 index 23f45ce..c748f1a 100644
73694 --- a/net/atm/resources.c
73695 +++ b/net/atm/resources.c
73696 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73697 static void copy_aal_stats(struct k_atm_aal_stats *from,
73698 struct atm_aal_stats *to)
73699 {
73700 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73701 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73702 __AAL_STAT_ITEMS
73703 #undef __HANDLE_ITEM
73704 }
73705 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73706 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73707 struct atm_aal_stats *to)
73708 {
73709 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73710 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73711 __AAL_STAT_ITEMS
73712 #undef __HANDLE_ITEM
73713 }
73714 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73715 index a6d5d63..1cc6c2b 100644
73716 --- a/net/batman-adv/bat_iv_ogm.c
73717 +++ b/net/batman-adv/bat_iv_ogm.c
73718 @@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73719
73720 /* change sequence number to network order */
73721 batman_ogm_packet->seqno =
73722 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
73723 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73724
73725 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73726 batman_ogm_packet->tt_crc = htons((uint16_t)
73727 @@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73728 else
73729 batman_ogm_packet->gw_flags = NO_FLAGS;
73730
73731 - atomic_inc(&hard_iface->seqno);
73732 + atomic_inc_unchecked(&hard_iface->seqno);
73733
73734 slide_own_bcast_window(hard_iface);
73735 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73736 @@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
73737 return;
73738
73739 /* could be changed by schedule_own_packet() */
73740 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
73741 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73742
73743 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73744
73745 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73746 index 3778977..f6a9450 100644
73747 --- a/net/batman-adv/hard-interface.c
73748 +++ b/net/batman-adv/hard-interface.c
73749 @@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73750 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73751 dev_add_pack(&hard_iface->batman_adv_ptype);
73752
73753 - atomic_set(&hard_iface->seqno, 1);
73754 - atomic_set(&hard_iface->frag_seqno, 1);
73755 + atomic_set_unchecked(&hard_iface->seqno, 1);
73756 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73757 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73758 hard_iface->net_dev->name);
73759
73760 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73761 index a5590f4..8d31969 100644
73762 --- a/net/batman-adv/soft-interface.c
73763 +++ b/net/batman-adv/soft-interface.c
73764 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73765
73766 /* set broadcast sequence number */
73767 bcast_packet->seqno =
73768 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73769 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73770
73771 add_bcast_packet_to_list(bat_priv, skb, 1);
73772
73773 @@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
73774 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73775
73776 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73777 - atomic_set(&bat_priv->bcast_seqno, 1);
73778 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73779 atomic_set(&bat_priv->ttvn, 0);
73780 atomic_set(&bat_priv->tt_local_changes, 0);
73781 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73782 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73783 index 302efb5..1590365 100644
73784 --- a/net/batman-adv/types.h
73785 +++ b/net/batman-adv/types.h
73786 @@ -38,8 +38,8 @@ struct hard_iface {
73787 int16_t if_num;
73788 char if_status;
73789 struct net_device *net_dev;
73790 - atomic_t seqno;
73791 - atomic_t frag_seqno;
73792 + atomic_unchecked_t seqno;
73793 + atomic_unchecked_t frag_seqno;
73794 unsigned char *packet_buff;
73795 int packet_len;
73796 struct kobject *hardif_obj;
73797 @@ -155,7 +155,7 @@ struct bat_priv {
73798 atomic_t orig_interval; /* uint */
73799 atomic_t hop_penalty; /* uint */
73800 atomic_t log_level; /* uint */
73801 - atomic_t bcast_seqno;
73802 + atomic_unchecked_t bcast_seqno;
73803 atomic_t bcast_queue_left;
73804 atomic_t batman_queue_left;
73805 atomic_t ttvn; /* translation table version number */
73806 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73807 index 676f6a6..3b4e668 100644
73808 --- a/net/batman-adv/unicast.c
73809 +++ b/net/batman-adv/unicast.c
73810 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73811 frag1->flags = UNI_FRAG_HEAD | large_tail;
73812 frag2->flags = large_tail;
73813
73814 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73815 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73816 frag1->seqno = htons(seqno - 1);
73817 frag2->seqno = htons(seqno);
73818
73819 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73820 index 5238b6b..c9798ce 100644
73821 --- a/net/bluetooth/hci_conn.c
73822 +++ b/net/bluetooth/hci_conn.c
73823 @@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73824 memset(&cp, 0, sizeof(cp));
73825
73826 cp.handle = cpu_to_le16(conn->handle);
73827 - memcpy(cp.ltk, ltk, sizeof(ltk));
73828 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73829
73830 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73831 }
73832 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73833 index 6f9c25b..d19fd66 100644
73834 --- a/net/bluetooth/l2cap_core.c
73835 +++ b/net/bluetooth/l2cap_core.c
73836 @@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73837 break;
73838
73839 case L2CAP_CONF_RFC:
73840 - if (olen == sizeof(rfc))
73841 - memcpy(&rfc, (void *)val, olen);
73842 + if (olen != sizeof(rfc))
73843 + break;
73844 +
73845 + memcpy(&rfc, (void *)val, olen);
73846
73847 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73848 rfc.mode != chan->mode)
73849 @@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73850
73851 switch (type) {
73852 case L2CAP_CONF_RFC:
73853 - if (olen == sizeof(rfc))
73854 - memcpy(&rfc, (void *)val, olen);
73855 + if (olen != sizeof(rfc))
73856 + break;
73857 +
73858 + memcpy(&rfc, (void *)val, olen);
73859 goto done;
73860 }
73861 }
73862 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73863 index 5fe2ff3..10968b5 100644
73864 --- a/net/bridge/netfilter/ebtables.c
73865 +++ b/net/bridge/netfilter/ebtables.c
73866 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73867 tmp.valid_hooks = t->table->valid_hooks;
73868 }
73869 mutex_unlock(&ebt_mutex);
73870 - if (copy_to_user(user, &tmp, *len) != 0){
73871 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73872 BUGPRINT("c2u Didn't work\n");
73873 ret = -EFAULT;
73874 break;
73875 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
73876 index 5cf5222..6f704ad 100644
73877 --- a/net/caif/cfctrl.c
73878 +++ b/net/caif/cfctrl.c
73879 @@ -9,6 +9,7 @@
73880 #include <linux/stddef.h>
73881 #include <linux/spinlock.h>
73882 #include <linux/slab.h>
73883 +#include <linux/sched.h>
73884 #include <net/caif/caif_layer.h>
73885 #include <net/caif/cfpkt.h>
73886 #include <net/caif/cfctrl.h>
73887 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
73888 memset(&dev_info, 0, sizeof(dev_info));
73889 dev_info.id = 0xff;
73890 cfsrvl_init(&this->serv, 0, &dev_info, false);
73891 - atomic_set(&this->req_seq_no, 1);
73892 - atomic_set(&this->rsp_seq_no, 1);
73893 + atomic_set_unchecked(&this->req_seq_no, 1);
73894 + atomic_set_unchecked(&this->rsp_seq_no, 1);
73895 this->serv.layer.receive = cfctrl_recv;
73896 sprintf(this->serv.layer.name, "ctrl");
73897 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
73898 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
73899 struct cfctrl_request_info *req)
73900 {
73901 spin_lock_bh(&ctrl->info_list_lock);
73902 - atomic_inc(&ctrl->req_seq_no);
73903 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
73904 + atomic_inc_unchecked(&ctrl->req_seq_no);
73905 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
73906 list_add_tail(&req->list, &ctrl->list);
73907 spin_unlock_bh(&ctrl->info_list_lock);
73908 }
73909 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
73910 if (p != first)
73911 pr_warn("Requests are not received in order\n");
73912
73913 - atomic_set(&ctrl->rsp_seq_no,
73914 + atomic_set_unchecked(&ctrl->rsp_seq_no,
73915 p->sequence_no);
73916 list_del(&p->list);
73917 goto out;
73918 diff --git a/net/can/gw.c b/net/can/gw.c
73919 index 3d79b12..8de85fa 100644
73920 --- a/net/can/gw.c
73921 +++ b/net/can/gw.c
73922 @@ -96,7 +96,7 @@ struct cf_mod {
73923 struct {
73924 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
73925 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
73926 - } csumfunc;
73927 + } __no_const csumfunc;
73928 };
73929
73930
73931 diff --git a/net/compat.c b/net/compat.c
73932 index e055708..3f80795 100644
73933 --- a/net/compat.c
73934 +++ b/net/compat.c
73935 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
73936 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
73937 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73938 return -EFAULT;
73939 - kmsg->msg_name = compat_ptr(tmp1);
73940 - kmsg->msg_iov = compat_ptr(tmp2);
73941 - kmsg->msg_control = compat_ptr(tmp3);
73942 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
73943 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
73944 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
73945 return 0;
73946 }
73947
73948 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73949
73950 if (kern_msg->msg_namelen) {
73951 if (mode == VERIFY_READ) {
73952 - int err = move_addr_to_kernel(kern_msg->msg_name,
73953 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
73954 kern_msg->msg_namelen,
73955 kern_address);
73956 if (err < 0)
73957 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73958 kern_msg->msg_name = NULL;
73959
73960 tot_len = iov_from_user_compat_to_kern(kern_iov,
73961 - (struct compat_iovec __user *)kern_msg->msg_iov,
73962 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
73963 kern_msg->msg_iovlen);
73964 if (tot_len >= 0)
73965 kern_msg->msg_iov = kern_iov;
73966 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73967
73968 #define CMSG_COMPAT_FIRSTHDR(msg) \
73969 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
73970 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
73971 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
73972 (struct compat_cmsghdr __user *)NULL)
73973
73974 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
73975 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
73976 (ucmlen) <= (unsigned long) \
73977 ((mhdr)->msg_controllen - \
73978 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
73979 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
73980
73981 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
73982 struct compat_cmsghdr __user *cmsg, int cmsg_len)
73983 {
73984 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
73985 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
73986 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
73987 msg->msg_controllen)
73988 return NULL;
73989 return (struct compat_cmsghdr __user *)ptr;
73990 @@ -219,7 +219,7 @@ Efault:
73991
73992 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
73993 {
73994 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73995 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73996 struct compat_cmsghdr cmhdr;
73997 int cmlen;
73998
73999 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74000
74001 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74002 {
74003 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74004 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74005 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74006 int fdnum = scm->fp->count;
74007 struct file **fp = scm->fp->fp;
74008 @@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74009 return -EFAULT;
74010 old_fs = get_fs();
74011 set_fs(KERNEL_DS);
74012 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74013 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74014 set_fs(old_fs);
74015
74016 return err;
74017 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74018 len = sizeof(ktime);
74019 old_fs = get_fs();
74020 set_fs(KERNEL_DS);
74021 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74022 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74023 set_fs(old_fs);
74024
74025 if (!err) {
74026 @@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74027 case MCAST_JOIN_GROUP:
74028 case MCAST_LEAVE_GROUP:
74029 {
74030 - struct compat_group_req __user *gr32 = (void *)optval;
74031 + struct compat_group_req __user *gr32 = (void __user *)optval;
74032 struct group_req __user *kgr =
74033 compat_alloc_user_space(sizeof(struct group_req));
74034 u32 interface;
74035 @@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74036 case MCAST_BLOCK_SOURCE:
74037 case MCAST_UNBLOCK_SOURCE:
74038 {
74039 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74040 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74041 struct group_source_req __user *kgsr = compat_alloc_user_space(
74042 sizeof(struct group_source_req));
74043 u32 interface;
74044 @@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74045 }
74046 case MCAST_MSFILTER:
74047 {
74048 - struct compat_group_filter __user *gf32 = (void *)optval;
74049 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74050 struct group_filter __user *kgf;
74051 u32 interface, fmode, numsrc;
74052
74053 @@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74054 char __user *optval, int __user *optlen,
74055 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74056 {
74057 - struct compat_group_filter __user *gf32 = (void *)optval;
74058 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74059 struct group_filter __user *kgf;
74060 int __user *koptlen;
74061 u32 interface, fmode, numsrc;
74062 diff --git a/net/core/datagram.c b/net/core/datagram.c
74063 index e4fbfd6..6a6ac94 100644
74064 --- a/net/core/datagram.c
74065 +++ b/net/core/datagram.c
74066 @@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74067 }
74068
74069 kfree_skb(skb);
74070 - atomic_inc(&sk->sk_drops);
74071 + atomic_inc_unchecked(&sk->sk_drops);
74072 sk_mem_reclaim_partial(sk);
74073
74074 return err;
74075 diff --git a/net/core/dev.c b/net/core/dev.c
74076 index 99e1d75..adf968a 100644
74077 --- a/net/core/dev.c
74078 +++ b/net/core/dev.c
74079 @@ -1136,9 +1136,13 @@ void dev_load(struct net *net, const char *name)
74080 if (no_module && capable(CAP_NET_ADMIN))
74081 no_module = request_module("netdev-%s", name);
74082 if (no_module && capable(CAP_SYS_MODULE)) {
74083 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74084 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74085 +#else
74086 if (!request_module("%s", name))
74087 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
74088 name);
74089 +#endif
74090 }
74091 }
74092 EXPORT_SYMBOL(dev_load);
74093 @@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74094 {
74095 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74096 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74097 - atomic_long_inc(&dev->rx_dropped);
74098 + atomic_long_inc_unchecked(&dev->rx_dropped);
74099 kfree_skb(skb);
74100 return NET_RX_DROP;
74101 }
74102 @@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74103 nf_reset(skb);
74104
74105 if (unlikely(!is_skb_forwardable(dev, skb))) {
74106 - atomic_long_inc(&dev->rx_dropped);
74107 + atomic_long_inc_unchecked(&dev->rx_dropped);
74108 kfree_skb(skb);
74109 return NET_RX_DROP;
74110 }
74111 @@ -2042,7 +2046,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74112
74113 struct dev_gso_cb {
74114 void (*destructor)(struct sk_buff *skb);
74115 -};
74116 +} __no_const;
74117
74118 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74119
74120 @@ -2898,7 +2902,7 @@ enqueue:
74121
74122 local_irq_restore(flags);
74123
74124 - atomic_long_inc(&skb->dev->rx_dropped);
74125 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74126 kfree_skb(skb);
74127 return NET_RX_DROP;
74128 }
74129 @@ -2970,7 +2974,7 @@ int netif_rx_ni(struct sk_buff *skb)
74130 }
74131 EXPORT_SYMBOL(netif_rx_ni);
74132
74133 -static void net_tx_action(struct softirq_action *h)
74134 +static void net_tx_action(void)
74135 {
74136 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74137
74138 @@ -3258,7 +3262,7 @@ ncls:
74139 if (pt_prev) {
74140 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74141 } else {
74142 - atomic_long_inc(&skb->dev->rx_dropped);
74143 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74144 kfree_skb(skb);
74145 /* Jamal, now you will not able to escape explaining
74146 * me how you were going to use this. :-)
74147 @@ -3818,7 +3822,7 @@ void netif_napi_del(struct napi_struct *napi)
74148 }
74149 EXPORT_SYMBOL(netif_napi_del);
74150
74151 -static void net_rx_action(struct softirq_action *h)
74152 +static void net_rx_action(void)
74153 {
74154 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74155 unsigned long time_limit = jiffies + 2;
74156 @@ -4288,8 +4292,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
74157 else
74158 seq_printf(seq, "%04x", ntohs(pt->type));
74159
74160 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74161 + seq_printf(seq, " %-8s %p\n",
74162 + pt->dev ? pt->dev->name : "", NULL);
74163 +#else
74164 seq_printf(seq, " %-8s %pF\n",
74165 pt->dev ? pt->dev->name : "", pt->func);
74166 +#endif
74167 }
74168
74169 return 0;
74170 @@ -5839,7 +5848,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74171 } else {
74172 netdev_stats_to_stats64(storage, &dev->stats);
74173 }
74174 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74175 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74176 return storage;
74177 }
74178 EXPORT_SYMBOL(dev_get_stats);
74179 diff --git a/net/core/flow.c b/net/core/flow.c
74180 index e318c7e..168b1d0 100644
74181 --- a/net/core/flow.c
74182 +++ b/net/core/flow.c
74183 @@ -61,7 +61,7 @@ struct flow_cache {
74184 struct timer_list rnd_timer;
74185 };
74186
74187 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
74188 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74189 EXPORT_SYMBOL(flow_cache_genid);
74190 static struct flow_cache flow_cache_global;
74191 static struct kmem_cache *flow_cachep __read_mostly;
74192 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74193
74194 static int flow_entry_valid(struct flow_cache_entry *fle)
74195 {
74196 - if (atomic_read(&flow_cache_genid) != fle->genid)
74197 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74198 return 0;
74199 if (fle->object && !fle->object->ops->check(fle->object))
74200 return 0;
74201 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74202 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74203 fcp->hash_count++;
74204 }
74205 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74206 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74207 flo = fle->object;
74208 if (!flo)
74209 goto ret_object;
74210 @@ -280,7 +280,7 @@ nocache:
74211 }
74212 flo = resolver(net, key, family, dir, flo, ctx);
74213 if (fle) {
74214 - fle->genid = atomic_read(&flow_cache_genid);
74215 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
74216 if (!IS_ERR(flo))
74217 fle->object = flo;
74218 else
74219 diff --git a/net/core/iovec.c b/net/core/iovec.c
74220 index 7e7aeb0..2a998cb 100644
74221 --- a/net/core/iovec.c
74222 +++ b/net/core/iovec.c
74223 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74224 if (m->msg_namelen) {
74225 if (mode == VERIFY_READ) {
74226 void __user *namep;
74227 - namep = (void __user __force *) m->msg_name;
74228 + namep = (void __force_user *) m->msg_name;
74229 err = move_addr_to_kernel(namep, m->msg_namelen,
74230 address);
74231 if (err < 0)
74232 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74233 }
74234
74235 size = m->msg_iovlen * sizeof(struct iovec);
74236 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74237 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74238 return -EFAULT;
74239
74240 m->msg_iov = iov;
74241 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74242 index 90430b7..0032ec0 100644
74243 --- a/net/core/rtnetlink.c
74244 +++ b/net/core/rtnetlink.c
74245 @@ -56,7 +56,7 @@ struct rtnl_link {
74246 rtnl_doit_func doit;
74247 rtnl_dumpit_func dumpit;
74248 rtnl_calcit_func calcit;
74249 -};
74250 +} __no_const;
74251
74252 static DEFINE_MUTEX(rtnl_mutex);
74253
74254 diff --git a/net/core/scm.c b/net/core/scm.c
74255 index 611c5ef..88f6d6d 100644
74256 --- a/net/core/scm.c
74257 +++ b/net/core/scm.c
74258 @@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
74259 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74260 {
74261 struct cmsghdr __user *cm
74262 - = (__force struct cmsghdr __user *)msg->msg_control;
74263 + = (struct cmsghdr __force_user *)msg->msg_control;
74264 struct cmsghdr cmhdr;
74265 int cmlen = CMSG_LEN(len);
74266 int err;
74267 @@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74268 err = -EFAULT;
74269 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74270 goto out;
74271 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74272 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74273 goto out;
74274 cmlen = CMSG_SPACE(len);
74275 if (msg->msg_controllen < cmlen)
74276 @@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
74277 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74278 {
74279 struct cmsghdr __user *cm
74280 - = (__force struct cmsghdr __user*)msg->msg_control;
74281 + = (struct cmsghdr __force_user *)msg->msg_control;
74282
74283 int fdmax = 0;
74284 int fdnum = scm->fp->count;
74285 @@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74286 if (fdnum < fdmax)
74287 fdmax = fdnum;
74288
74289 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74290 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74291 i++, cmfptr++)
74292 {
74293 int new_fd;
74294 diff --git a/net/core/sock.c b/net/core/sock.c
74295 index b2e14c0..6651b32 100644
74296 --- a/net/core/sock.c
74297 +++ b/net/core/sock.c
74298 @@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74299 struct sk_buff_head *list = &sk->sk_receive_queue;
74300
74301 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
74302 - atomic_inc(&sk->sk_drops);
74303 + atomic_inc_unchecked(&sk->sk_drops);
74304 trace_sock_rcvqueue_full(sk, skb);
74305 return -ENOMEM;
74306 }
74307 @@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74308 return err;
74309
74310 if (!sk_rmem_schedule(sk, skb->truesize)) {
74311 - atomic_inc(&sk->sk_drops);
74312 + atomic_inc_unchecked(&sk->sk_drops);
74313 return -ENOBUFS;
74314 }
74315
74316 @@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74317 skb_dst_force(skb);
74318
74319 spin_lock_irqsave(&list->lock, flags);
74320 - skb->dropcount = atomic_read(&sk->sk_drops);
74321 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74322 __skb_queue_tail(list, skb);
74323 spin_unlock_irqrestore(&list->lock, flags);
74324
74325 @@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74326 skb->dev = NULL;
74327
74328 if (sk_rcvqueues_full(sk, skb)) {
74329 - atomic_inc(&sk->sk_drops);
74330 + atomic_inc_unchecked(&sk->sk_drops);
74331 goto discard_and_relse;
74332 }
74333 if (nested)
74334 @@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74335 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74336 } else if (sk_add_backlog(sk, skb)) {
74337 bh_unlock_sock(sk);
74338 - atomic_inc(&sk->sk_drops);
74339 + atomic_inc_unchecked(&sk->sk_drops);
74340 goto discard_and_relse;
74341 }
74342
74343 @@ -984,7 +984,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74344 if (len > sizeof(peercred))
74345 len = sizeof(peercred);
74346 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74347 - if (copy_to_user(optval, &peercred, len))
74348 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74349 return -EFAULT;
74350 goto lenout;
74351 }
74352 @@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74353 return -ENOTCONN;
74354 if (lv < len)
74355 return -EINVAL;
74356 - if (copy_to_user(optval, address, len))
74357 + if (len > sizeof(address) || copy_to_user(optval, address, len))
74358 return -EFAULT;
74359 goto lenout;
74360 }
74361 @@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74362
74363 if (len > lv)
74364 len = lv;
74365 - if (copy_to_user(optval, &v, len))
74366 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
74367 return -EFAULT;
74368 lenout:
74369 if (put_user(len, optlen))
74370 @@ -2128,7 +2128,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74371 */
74372 smp_wmb();
74373 atomic_set(&sk->sk_refcnt, 1);
74374 - atomic_set(&sk->sk_drops, 0);
74375 + atomic_set_unchecked(&sk->sk_drops, 0);
74376 }
74377 EXPORT_SYMBOL(sock_init_data);
74378
74379 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
74380 index b9868e1..849f809 100644
74381 --- a/net/core/sock_diag.c
74382 +++ b/net/core/sock_diag.c
74383 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
74384
74385 int sock_diag_check_cookie(void *sk, __u32 *cookie)
74386 {
74387 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74388 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
74389 cookie[1] != INET_DIAG_NOCOOKIE) &&
74390 ((u32)(unsigned long)sk != cookie[0] ||
74391 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
74392 return -ESTALE;
74393 else
74394 +#endif
74395 return 0;
74396 }
74397 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
74398
74399 void sock_diag_save_cookie(void *sk, __u32 *cookie)
74400 {
74401 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74402 + cookie[0] = 0;
74403 + cookie[1] = 0;
74404 +#else
74405 cookie[0] = (u32)(unsigned long)sk;
74406 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74407 +#endif
74408 }
74409 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
74410
74411 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74412 index 02e75d1..9a57a7c 100644
74413 --- a/net/decnet/sysctl_net_decnet.c
74414 +++ b/net/decnet/sysctl_net_decnet.c
74415 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74416
74417 if (len > *lenp) len = *lenp;
74418
74419 - if (copy_to_user(buffer, addr, len))
74420 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
74421 return -EFAULT;
74422
74423 *lenp = len;
74424 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74425
74426 if (len > *lenp) len = *lenp;
74427
74428 - if (copy_to_user(buffer, devname, len))
74429 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
74430 return -EFAULT;
74431
74432 *lenp = len;
74433 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74434 index 39a2d29..f39c0fe 100644
74435 --- a/net/econet/Kconfig
74436 +++ b/net/econet/Kconfig
74437 @@ -4,7 +4,7 @@
74438
74439 config ECONET
74440 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74441 - depends on EXPERIMENTAL && INET
74442 + depends on EXPERIMENTAL && INET && BROKEN
74443 ---help---
74444 Econet is a fairly old and slow networking protocol mainly used by
74445 Acorn computers to access file and print servers. It uses native
74446 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74447 index cbe3a68..a879b75 100644
74448 --- a/net/ipv4/fib_frontend.c
74449 +++ b/net/ipv4/fib_frontend.c
74450 @@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74451 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74452 fib_sync_up(dev);
74453 #endif
74454 - atomic_inc(&net->ipv4.dev_addr_genid);
74455 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74456 rt_cache_flush(dev_net(dev), -1);
74457 break;
74458 case NETDEV_DOWN:
74459 fib_del_ifaddr(ifa, NULL);
74460 - atomic_inc(&net->ipv4.dev_addr_genid);
74461 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74462 if (ifa->ifa_dev->ifa_list == NULL) {
74463 /* Last address was deleted from this interface.
74464 * Disable IP.
74465 @@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74466 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74467 fib_sync_up(dev);
74468 #endif
74469 - atomic_inc(&net->ipv4.dev_addr_genid);
74470 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74471 rt_cache_flush(dev_net(dev), -1);
74472 break;
74473 case NETDEV_DOWN:
74474 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74475 index 8861f91..ab1e3c1 100644
74476 --- a/net/ipv4/fib_semantics.c
74477 +++ b/net/ipv4/fib_semantics.c
74478 @@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74479 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74480 nh->nh_gw,
74481 nh->nh_parent->fib_scope);
74482 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74483 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74484
74485 return nh->nh_saddr;
74486 }
74487 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74488 index 984ec65..97ac518 100644
74489 --- a/net/ipv4/inet_hashtables.c
74490 +++ b/net/ipv4/inet_hashtables.c
74491 @@ -18,12 +18,15 @@
74492 #include <linux/sched.h>
74493 #include <linux/slab.h>
74494 #include <linux/wait.h>
74495 +#include <linux/security.h>
74496
74497 #include <net/inet_connection_sock.h>
74498 #include <net/inet_hashtables.h>
74499 #include <net/secure_seq.h>
74500 #include <net/ip.h>
74501
74502 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74503 +
74504 /*
74505 * Allocate and initialize a new local port bind bucket.
74506 * The bindhash mutex for snum's hash chain must be held here.
74507 @@ -530,6 +533,8 @@ ok:
74508 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74509 spin_unlock(&head->lock);
74510
74511 + gr_update_task_in_ip_table(current, inet_sk(sk));
74512 +
74513 if (tw) {
74514 inet_twsk_deschedule(tw, death_row);
74515 while (twrefcnt) {
74516 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74517 index d4d61b6..b81aec8 100644
74518 --- a/net/ipv4/inetpeer.c
74519 +++ b/net/ipv4/inetpeer.c
74520 @@ -487,8 +487,8 @@ relookup:
74521 if (p) {
74522 p->daddr = *daddr;
74523 atomic_set(&p->refcnt, 1);
74524 - atomic_set(&p->rid, 0);
74525 - atomic_set(&p->ip_id_count,
74526 + atomic_set_unchecked(&p->rid, 0);
74527 + atomic_set_unchecked(&p->ip_id_count,
74528 (daddr->family == AF_INET) ?
74529 secure_ip_id(daddr->addr.a4) :
74530 secure_ipv6_id(daddr->addr.a6));
74531 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74532 index 3727e23..517f5df 100644
74533 --- a/net/ipv4/ip_fragment.c
74534 +++ b/net/ipv4/ip_fragment.c
74535 @@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74536 return 0;
74537
74538 start = qp->rid;
74539 - end = atomic_inc_return(&peer->rid);
74540 + end = atomic_inc_return_unchecked(&peer->rid);
74541 qp->rid = end;
74542
74543 rc = qp->q.fragments && (end - start) > max;
74544 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74545 index 2fd0fba..83fac99 100644
74546 --- a/net/ipv4/ip_sockglue.c
74547 +++ b/net/ipv4/ip_sockglue.c
74548 @@ -1137,7 +1137,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74549 len = min_t(unsigned int, len, opt->optlen);
74550 if (put_user(len, optlen))
74551 return -EFAULT;
74552 - if (copy_to_user(optval, opt->__data, len))
74553 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74554 + copy_to_user(optval, opt->__data, len))
74555 return -EFAULT;
74556 return 0;
74557 }
74558 @@ -1268,7 +1269,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74559 if (sk->sk_type != SOCK_STREAM)
74560 return -ENOPROTOOPT;
74561
74562 - msg.msg_control = optval;
74563 + msg.msg_control = (void __force_kernel *)optval;
74564 msg.msg_controllen = len;
74565 msg.msg_flags = flags;
74566
74567 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74568 index 92ac7e7..13f93d9 100644
74569 --- a/net/ipv4/ipconfig.c
74570 +++ b/net/ipv4/ipconfig.c
74571 @@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74572
74573 mm_segment_t oldfs = get_fs();
74574 set_fs(get_ds());
74575 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74576 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74577 set_fs(oldfs);
74578 return res;
74579 }
74580 @@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74581
74582 mm_segment_t oldfs = get_fs();
74583 set_fs(get_ds());
74584 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74585 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74586 set_fs(oldfs);
74587 return res;
74588 }
74589 @@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74590
74591 mm_segment_t oldfs = get_fs();
74592 set_fs(get_ds());
74593 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74594 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74595 set_fs(oldfs);
74596 return res;
74597 }
74598 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74599 index 50009c7..5996a9f 100644
74600 --- a/net/ipv4/ping.c
74601 +++ b/net/ipv4/ping.c
74602 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74603 sk_rmem_alloc_get(sp),
74604 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74605 atomic_read(&sp->sk_refcnt), sp,
74606 - atomic_read(&sp->sk_drops), len);
74607 + atomic_read_unchecked(&sp->sk_drops), len);
74608 }
74609
74610 static int ping_seq_show(struct seq_file *seq, void *v)
74611 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74612 index bbd604c..4d5469c 100644
74613 --- a/net/ipv4/raw.c
74614 +++ b/net/ipv4/raw.c
74615 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74616 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74617 {
74618 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74619 - atomic_inc(&sk->sk_drops);
74620 + atomic_inc_unchecked(&sk->sk_drops);
74621 kfree_skb(skb);
74622 return NET_RX_DROP;
74623 }
74624 @@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
74625
74626 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74627 {
74628 + struct icmp_filter filter;
74629 +
74630 if (optlen > sizeof(struct icmp_filter))
74631 optlen = sizeof(struct icmp_filter);
74632 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74633 + if (copy_from_user(&filter, optval, optlen))
74634 return -EFAULT;
74635 + raw_sk(sk)->filter = filter;
74636 return 0;
74637 }
74638
74639 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74640 {
74641 int len, ret = -EFAULT;
74642 + struct icmp_filter filter;
74643
74644 if (get_user(len, optlen))
74645 goto out;
74646 @@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74647 if (len > sizeof(struct icmp_filter))
74648 len = sizeof(struct icmp_filter);
74649 ret = -EFAULT;
74650 - if (put_user(len, optlen) ||
74651 - copy_to_user(optval, &raw_sk(sk)->filter, len))
74652 + filter = raw_sk(sk)->filter;
74653 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74654 goto out;
74655 ret = 0;
74656 out: return ret;
74657 @@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74658 sk_wmem_alloc_get(sp),
74659 sk_rmem_alloc_get(sp),
74660 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74661 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74662 + atomic_read(&sp->sk_refcnt),
74663 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74664 + NULL,
74665 +#else
74666 + sp,
74667 +#endif
74668 + atomic_read_unchecked(&sp->sk_drops));
74669 }
74670
74671 static int raw_seq_show(struct seq_file *seq, void *v)
74672 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74673 index 167ea10..4b15883 100644
74674 --- a/net/ipv4/route.c
74675 +++ b/net/ipv4/route.c
74676 @@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74677
74678 static inline int rt_genid(struct net *net)
74679 {
74680 - return atomic_read(&net->ipv4.rt_genid);
74681 + return atomic_read_unchecked(&net->ipv4.rt_genid);
74682 }
74683
74684 #ifdef CONFIG_PROC_FS
74685 @@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
74686 unsigned char shuffle;
74687
74688 get_random_bytes(&shuffle, sizeof(shuffle));
74689 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74690 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74691 inetpeer_invalidate_tree(AF_INET);
74692 }
74693
74694 @@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
74695 error = rt->dst.error;
74696 if (peer) {
74697 inet_peer_refcheck(rt->peer);
74698 - id = atomic_read(&peer->ip_id_count) & 0xffff;
74699 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74700 if (peer->tcp_ts_stamp) {
74701 ts = peer->tcp_ts;
74702 tsage = get_seconds() - peer->tcp_ts_stamp;
74703 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74704 index 0cb86ce..8e7fda8 100644
74705 --- a/net/ipv4/tcp_ipv4.c
74706 +++ b/net/ipv4/tcp_ipv4.c
74707 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
74708 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74709
74710
74711 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74712 +extern int grsec_enable_blackhole;
74713 +#endif
74714 +
74715 #ifdef CONFIG_TCP_MD5SIG
74716 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
74717 __be32 daddr, __be32 saddr, const struct tcphdr *th);
74718 @@ -1641,6 +1645,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74719 return 0;
74720
74721 reset:
74722 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74723 + if (!grsec_enable_blackhole)
74724 +#endif
74725 tcp_v4_send_reset(rsk, skb);
74726 discard:
74727 kfree_skb(skb);
74728 @@ -1703,12 +1710,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74729 TCP_SKB_CB(skb)->sacked = 0;
74730
74731 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74732 - if (!sk)
74733 + if (!sk) {
74734 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74735 + ret = 1;
74736 +#endif
74737 goto no_tcp_socket;
74738 -
74739 + }
74740 process:
74741 - if (sk->sk_state == TCP_TIME_WAIT)
74742 + if (sk->sk_state == TCP_TIME_WAIT) {
74743 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74744 + ret = 2;
74745 +#endif
74746 goto do_time_wait;
74747 + }
74748
74749 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
74750 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74751 @@ -1758,6 +1772,10 @@ no_tcp_socket:
74752 bad_packet:
74753 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74754 } else {
74755 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74756 + if (!grsec_enable_blackhole || (ret == 1 &&
74757 + (skb->dev->flags & IFF_LOOPBACK)))
74758 +#endif
74759 tcp_v4_send_reset(NULL, skb);
74760 }
74761
74762 @@ -2419,7 +2437,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
74763 0, /* non standard timer */
74764 0, /* open_requests have no inode */
74765 atomic_read(&sk->sk_refcnt),
74766 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74767 + NULL,
74768 +#else
74769 req,
74770 +#endif
74771 len);
74772 }
74773
74774 @@ -2469,7 +2491,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
74775 sock_i_uid(sk),
74776 icsk->icsk_probes_out,
74777 sock_i_ino(sk),
74778 - atomic_read(&sk->sk_refcnt), sk,
74779 + atomic_read(&sk->sk_refcnt),
74780 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74781 + NULL,
74782 +#else
74783 + sk,
74784 +#endif
74785 jiffies_to_clock_t(icsk->icsk_rto),
74786 jiffies_to_clock_t(icsk->icsk_ack.ato),
74787 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
74788 @@ -2497,7 +2524,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
74789 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
74790 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
74791 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74792 - atomic_read(&tw->tw_refcnt), tw, len);
74793 + atomic_read(&tw->tw_refcnt),
74794 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74795 + NULL,
74796 +#else
74797 + tw,
74798 +#endif
74799 + len);
74800 }
74801
74802 #define TMPSZ 150
74803 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
74804 index 3cabafb..640525b 100644
74805 --- a/net/ipv4/tcp_minisocks.c
74806 +++ b/net/ipv4/tcp_minisocks.c
74807 @@ -27,6 +27,10 @@
74808 #include <net/inet_common.h>
74809 #include <net/xfrm.h>
74810
74811 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74812 +extern int grsec_enable_blackhole;
74813 +#endif
74814 +
74815 int sysctl_tcp_syncookies __read_mostly = 1;
74816 EXPORT_SYMBOL(sysctl_tcp_syncookies);
74817
74818 @@ -753,6 +757,10 @@ listen_overflow:
74819
74820 embryonic_reset:
74821 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
74822 +
74823 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74824 + if (!grsec_enable_blackhole)
74825 +#endif
74826 if (!(flg & TCP_FLAG_RST))
74827 req->rsk_ops->send_reset(sk, skb);
74828
74829 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
74830 index a981cdc..48f4c3a 100644
74831 --- a/net/ipv4/tcp_probe.c
74832 +++ b/net/ipv4/tcp_probe.c
74833 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
74834 if (cnt + width >= len)
74835 break;
74836
74837 - if (copy_to_user(buf + cnt, tbuf, width))
74838 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
74839 return -EFAULT;
74840 cnt += width;
74841 }
74842 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
74843 index 34d4a02..3b57f86 100644
74844 --- a/net/ipv4/tcp_timer.c
74845 +++ b/net/ipv4/tcp_timer.c
74846 @@ -22,6 +22,10 @@
74847 #include <linux/gfp.h>
74848 #include <net/tcp.h>
74849
74850 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74851 +extern int grsec_lastack_retries;
74852 +#endif
74853 +
74854 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
74855 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
74856 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
74857 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
74858 }
74859 }
74860
74861 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74862 + if ((sk->sk_state == TCP_LAST_ACK) &&
74863 + (grsec_lastack_retries > 0) &&
74864 + (grsec_lastack_retries < retry_until))
74865 + retry_until = grsec_lastack_retries;
74866 +#endif
74867 +
74868 if (retransmits_timed_out(sk, retry_until,
74869 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
74870 /* Has it gone just too far? */
74871 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
74872 index fe14105..0618260 100644
74873 --- a/net/ipv4/udp.c
74874 +++ b/net/ipv4/udp.c
74875 @@ -87,6 +87,7 @@
74876 #include <linux/types.h>
74877 #include <linux/fcntl.h>
74878 #include <linux/module.h>
74879 +#include <linux/security.h>
74880 #include <linux/socket.h>
74881 #include <linux/sockios.h>
74882 #include <linux/igmp.h>
74883 @@ -109,6 +110,10 @@
74884 #include <trace/events/udp.h>
74885 #include "udp_impl.h"
74886
74887 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74888 +extern int grsec_enable_blackhole;
74889 +#endif
74890 +
74891 struct udp_table udp_table __read_mostly;
74892 EXPORT_SYMBOL(udp_table);
74893
74894 @@ -567,6 +572,9 @@ found:
74895 return s;
74896 }
74897
74898 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
74899 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
74900 +
74901 /*
74902 * This routine is called by the ICMP module when it gets some
74903 * sort of error condition. If err < 0 then the socket should
74904 @@ -858,9 +866,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
74905 dport = usin->sin_port;
74906 if (dport == 0)
74907 return -EINVAL;
74908 +
74909 + err = gr_search_udp_sendmsg(sk, usin);
74910 + if (err)
74911 + return err;
74912 } else {
74913 if (sk->sk_state != TCP_ESTABLISHED)
74914 return -EDESTADDRREQ;
74915 +
74916 + err = gr_search_udp_sendmsg(sk, NULL);
74917 + if (err)
74918 + return err;
74919 +
74920 daddr = inet->inet_daddr;
74921 dport = inet->inet_dport;
74922 /* Open fast path for connected socket.
74923 @@ -1102,7 +1119,7 @@ static unsigned int first_packet_length(struct sock *sk)
74924 udp_lib_checksum_complete(skb)) {
74925 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74926 IS_UDPLITE(sk));
74927 - atomic_inc(&sk->sk_drops);
74928 + atomic_inc_unchecked(&sk->sk_drops);
74929 __skb_unlink(skb, rcvq);
74930 __skb_queue_tail(&list_kill, skb);
74931 }
74932 @@ -1188,6 +1205,10 @@ try_again:
74933 if (!skb)
74934 goto out;
74935
74936 + err = gr_search_udp_recvmsg(sk, skb);
74937 + if (err)
74938 + goto out_free;
74939 +
74940 ulen = skb->len - sizeof(struct udphdr);
74941 copied = len;
74942 if (copied > ulen)
74943 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74944
74945 drop:
74946 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74947 - atomic_inc(&sk->sk_drops);
74948 + atomic_inc_unchecked(&sk->sk_drops);
74949 kfree_skb(skb);
74950 return -1;
74951 }
74952 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
74953 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
74954
74955 if (!skb1) {
74956 - atomic_inc(&sk->sk_drops);
74957 + atomic_inc_unchecked(&sk->sk_drops);
74958 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
74959 IS_UDPLITE(sk));
74960 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74961 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
74962 goto csum_error;
74963
74964 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
74965 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74966 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
74967 +#endif
74968 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
74969
74970 /*
74971 @@ -2094,8 +2118,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
74972 sk_wmem_alloc_get(sp),
74973 sk_rmem_alloc_get(sp),
74974 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74975 - atomic_read(&sp->sk_refcnt), sp,
74976 - atomic_read(&sp->sk_drops), len);
74977 + atomic_read(&sp->sk_refcnt),
74978 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74979 + NULL,
74980 +#else
74981 + sp,
74982 +#endif
74983 + atomic_read_unchecked(&sp->sk_drops), len);
74984 }
74985
74986 int udp4_seq_show(struct seq_file *seq, void *v)
74987 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
74988 index 7d5cb97..c56564f 100644
74989 --- a/net/ipv6/addrconf.c
74990 +++ b/net/ipv6/addrconf.c
74991 @@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
74992 p.iph.ihl = 5;
74993 p.iph.protocol = IPPROTO_IPV6;
74994 p.iph.ttl = 64;
74995 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
74996 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
74997
74998 if (ops->ndo_do_ioctl) {
74999 mm_segment_t oldfs = get_fs();
75000 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75001 index 02dd203..e03fcc9 100644
75002 --- a/net/ipv6/inet6_connection_sock.c
75003 +++ b/net/ipv6/inet6_connection_sock.c
75004 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75005 #ifdef CONFIG_XFRM
75006 {
75007 struct rt6_info *rt = (struct rt6_info *)dst;
75008 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75009 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75010 }
75011 #endif
75012 }
75013 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75014 #ifdef CONFIG_XFRM
75015 if (dst) {
75016 struct rt6_info *rt = (struct rt6_info *)dst;
75017 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75018 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75019 __sk_dst_reset(sk);
75020 dst = NULL;
75021 }
75022 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75023 index 63dd1f8..e7f53ca 100644
75024 --- a/net/ipv6/ipv6_sockglue.c
75025 +++ b/net/ipv6/ipv6_sockglue.c
75026 @@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75027 if (sk->sk_type != SOCK_STREAM)
75028 return -ENOPROTOOPT;
75029
75030 - msg.msg_control = optval;
75031 + msg.msg_control = (void __force_kernel *)optval;
75032 msg.msg_controllen = len;
75033 msg.msg_flags = flags;
75034
75035 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75036 index 5bddea7..82d9d67 100644
75037 --- a/net/ipv6/raw.c
75038 +++ b/net/ipv6/raw.c
75039 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75040 {
75041 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75042 skb_checksum_complete(skb)) {
75043 - atomic_inc(&sk->sk_drops);
75044 + atomic_inc_unchecked(&sk->sk_drops);
75045 kfree_skb(skb);
75046 return NET_RX_DROP;
75047 }
75048 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75049 struct raw6_sock *rp = raw6_sk(sk);
75050
75051 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75052 - atomic_inc(&sk->sk_drops);
75053 + atomic_inc_unchecked(&sk->sk_drops);
75054 kfree_skb(skb);
75055 return NET_RX_DROP;
75056 }
75057 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75058
75059 if (inet->hdrincl) {
75060 if (skb_checksum_complete(skb)) {
75061 - atomic_inc(&sk->sk_drops);
75062 + atomic_inc_unchecked(&sk->sk_drops);
75063 kfree_skb(skb);
75064 return NET_RX_DROP;
75065 }
75066 @@ -602,7 +602,7 @@ out:
75067 return err;
75068 }
75069
75070 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75071 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75072 struct flowi6 *fl6, struct dst_entry **dstp,
75073 unsigned int flags)
75074 {
75075 @@ -914,12 +914,15 @@ do_confirm:
75076 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75077 char __user *optval, int optlen)
75078 {
75079 + struct icmp6_filter filter;
75080 +
75081 switch (optname) {
75082 case ICMPV6_FILTER:
75083 if (optlen > sizeof(struct icmp6_filter))
75084 optlen = sizeof(struct icmp6_filter);
75085 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75086 + if (copy_from_user(&filter, optval, optlen))
75087 return -EFAULT;
75088 + raw6_sk(sk)->filter = filter;
75089 return 0;
75090 default:
75091 return -ENOPROTOOPT;
75092 @@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75093 char __user *optval, int __user *optlen)
75094 {
75095 int len;
75096 + struct icmp6_filter filter;
75097
75098 switch (optname) {
75099 case ICMPV6_FILTER:
75100 @@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75101 len = sizeof(struct icmp6_filter);
75102 if (put_user(len, optlen))
75103 return -EFAULT;
75104 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75105 + filter = raw6_sk(sk)->filter;
75106 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
75107 return -EFAULT;
75108 return 0;
75109 default:
75110 @@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75111 0, 0L, 0,
75112 sock_i_uid(sp), 0,
75113 sock_i_ino(sp),
75114 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75115 + atomic_read(&sp->sk_refcnt),
75116 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75117 + NULL,
75118 +#else
75119 + sp,
75120 +#endif
75121 + atomic_read_unchecked(&sp->sk_drops));
75122 }
75123
75124 static int raw6_seq_show(struct seq_file *seq, void *v)
75125 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75126 index 98256cf..7f16dbd 100644
75127 --- a/net/ipv6/tcp_ipv6.c
75128 +++ b/net/ipv6/tcp_ipv6.c
75129 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75130 }
75131 #endif
75132
75133 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75134 +extern int grsec_enable_blackhole;
75135 +#endif
75136 +
75137 static void tcp_v6_hash(struct sock *sk)
75138 {
75139 if (sk->sk_state != TCP_CLOSE) {
75140 @@ -1542,6 +1546,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75141 return 0;
75142
75143 reset:
75144 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75145 + if (!grsec_enable_blackhole)
75146 +#endif
75147 tcp_v6_send_reset(sk, skb);
75148 discard:
75149 if (opt_skb)
75150 @@ -1623,12 +1630,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75151 TCP_SKB_CB(skb)->sacked = 0;
75152
75153 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75154 - if (!sk)
75155 + if (!sk) {
75156 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75157 + ret = 1;
75158 +#endif
75159 goto no_tcp_socket;
75160 + }
75161
75162 process:
75163 - if (sk->sk_state == TCP_TIME_WAIT)
75164 + if (sk->sk_state == TCP_TIME_WAIT) {
75165 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75166 + ret = 2;
75167 +#endif
75168 goto do_time_wait;
75169 + }
75170
75171 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75172 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75173 @@ -1676,6 +1691,10 @@ no_tcp_socket:
75174 bad_packet:
75175 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75176 } else {
75177 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75178 + if (!grsec_enable_blackhole || (ret == 1 &&
75179 + (skb->dev->flags & IFF_LOOPBACK)))
75180 +#endif
75181 tcp_v6_send_reset(NULL, skb);
75182 }
75183
75184 @@ -1930,7 +1949,13 @@ static void get_openreq6(struct seq_file *seq,
75185 uid,
75186 0, /* non standard timer */
75187 0, /* open_requests have no inode */
75188 - 0, req);
75189 + 0,
75190 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75191 + NULL
75192 +#else
75193 + req
75194 +#endif
75195 + );
75196 }
75197
75198 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75199 @@ -1980,7 +2005,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75200 sock_i_uid(sp),
75201 icsk->icsk_probes_out,
75202 sock_i_ino(sp),
75203 - atomic_read(&sp->sk_refcnt), sp,
75204 + atomic_read(&sp->sk_refcnt),
75205 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75206 + NULL,
75207 +#else
75208 + sp,
75209 +#endif
75210 jiffies_to_clock_t(icsk->icsk_rto),
75211 jiffies_to_clock_t(icsk->icsk_ack.ato),
75212 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75213 @@ -2015,7 +2045,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75214 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75215 tw->tw_substate, 0, 0,
75216 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75217 - atomic_read(&tw->tw_refcnt), tw);
75218 + atomic_read(&tw->tw_refcnt),
75219 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75220 + NULL
75221 +#else
75222 + tw
75223 +#endif
75224 + );
75225 }
75226
75227 static int tcp6_seq_show(struct seq_file *seq, void *v)
75228 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
75229 index 37b0699..d323408 100644
75230 --- a/net/ipv6/udp.c
75231 +++ b/net/ipv6/udp.c
75232 @@ -50,6 +50,10 @@
75233 #include <linux/seq_file.h>
75234 #include "udp_impl.h"
75235
75236 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75237 +extern int grsec_enable_blackhole;
75238 +#endif
75239 +
75240 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
75241 {
75242 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
75243 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
75244
75245 return 0;
75246 drop:
75247 - atomic_inc(&sk->sk_drops);
75248 + atomic_inc_unchecked(&sk->sk_drops);
75249 drop_no_sk_drops_inc:
75250 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75251 kfree_skb(skb);
75252 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75253 continue;
75254 }
75255 drop:
75256 - atomic_inc(&sk->sk_drops);
75257 + atomic_inc_unchecked(&sk->sk_drops);
75258 UDP6_INC_STATS_BH(sock_net(sk),
75259 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
75260 UDP6_INC_STATS_BH(sock_net(sk),
75261 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75262 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
75263 proto == IPPROTO_UDPLITE);
75264
75265 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75266 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75267 +#endif
75268 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75269
75270 kfree_skb(skb);
75271 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75272 if (!sock_owned_by_user(sk))
75273 udpv6_queue_rcv_skb(sk, skb);
75274 else if (sk_add_backlog(sk, skb)) {
75275 - atomic_inc(&sk->sk_drops);
75276 + atomic_inc_unchecked(&sk->sk_drops);
75277 bh_unlock_sock(sk);
75278 sock_put(sk);
75279 goto discard;
75280 @@ -1411,8 +1418,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75281 0, 0L, 0,
75282 sock_i_uid(sp), 0,
75283 sock_i_ino(sp),
75284 - atomic_read(&sp->sk_refcnt), sp,
75285 - atomic_read(&sp->sk_drops));
75286 + atomic_read(&sp->sk_refcnt),
75287 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75288 + NULL,
75289 +#else
75290 + sp,
75291 +#endif
75292 + atomic_read_unchecked(&sp->sk_drops));
75293 }
75294
75295 int udp6_seq_show(struct seq_file *seq, void *v)
75296 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75297 index 6b9d5a0..4dffaf1 100644
75298 --- a/net/irda/ircomm/ircomm_tty.c
75299 +++ b/net/irda/ircomm/ircomm_tty.c
75300 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75301 add_wait_queue(&self->open_wait, &wait);
75302
75303 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75304 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75305 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75306
75307 /* As far as I can see, we protect open_count - Jean II */
75308 spin_lock_irqsave(&self->spinlock, flags);
75309 if (!tty_hung_up_p(filp)) {
75310 extra_count = 1;
75311 - self->open_count--;
75312 + local_dec(&self->open_count);
75313 }
75314 spin_unlock_irqrestore(&self->spinlock, flags);
75315 - self->blocked_open++;
75316 + local_inc(&self->blocked_open);
75317
75318 while (1) {
75319 if (tty->termios->c_cflag & CBAUD) {
75320 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75321 }
75322
75323 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75324 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75325 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75326
75327 schedule();
75328 }
75329 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75330 if (extra_count) {
75331 /* ++ is not atomic, so this should be protected - Jean II */
75332 spin_lock_irqsave(&self->spinlock, flags);
75333 - self->open_count++;
75334 + local_inc(&self->open_count);
75335 spin_unlock_irqrestore(&self->spinlock, flags);
75336 }
75337 - self->blocked_open--;
75338 + local_dec(&self->blocked_open);
75339
75340 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75341 - __FILE__,__LINE__, tty->driver->name, self->open_count);
75342 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75343
75344 if (!retval)
75345 self->flags |= ASYNC_NORMAL_ACTIVE;
75346 @@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75347 }
75348 /* ++ is not atomic, so this should be protected - Jean II */
75349 spin_lock_irqsave(&self->spinlock, flags);
75350 - self->open_count++;
75351 + local_inc(&self->open_count);
75352
75353 tty->driver_data = self;
75354 self->tty = tty;
75355 spin_unlock_irqrestore(&self->spinlock, flags);
75356
75357 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75358 - self->line, self->open_count);
75359 + self->line, local_read(&self->open_count));
75360
75361 /* Not really used by us, but lets do it anyway */
75362 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75363 @@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75364 return;
75365 }
75366
75367 - if ((tty->count == 1) && (self->open_count != 1)) {
75368 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75369 /*
75370 * Uh, oh. tty->count is 1, which means that the tty
75371 * structure will be freed. state->count should always
75372 @@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75373 */
75374 IRDA_DEBUG(0, "%s(), bad serial port count; "
75375 "tty->count is 1, state->count is %d\n", __func__ ,
75376 - self->open_count);
75377 - self->open_count = 1;
75378 + local_read(&self->open_count));
75379 + local_set(&self->open_count, 1);
75380 }
75381
75382 - if (--self->open_count < 0) {
75383 + if (local_dec_return(&self->open_count) < 0) {
75384 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75385 - __func__, self->line, self->open_count);
75386 - self->open_count = 0;
75387 + __func__, self->line, local_read(&self->open_count));
75388 + local_set(&self->open_count, 0);
75389 }
75390 - if (self->open_count) {
75391 + if (local_read(&self->open_count)) {
75392 spin_unlock_irqrestore(&self->spinlock, flags);
75393
75394 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75395 @@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75396 tty->closing = 0;
75397 self->tty = NULL;
75398
75399 - if (self->blocked_open) {
75400 + if (local_read(&self->blocked_open)) {
75401 if (self->close_delay)
75402 schedule_timeout_interruptible(self->close_delay);
75403 wake_up_interruptible(&self->open_wait);
75404 @@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75405 spin_lock_irqsave(&self->spinlock, flags);
75406 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75407 self->tty = NULL;
75408 - self->open_count = 0;
75409 + local_set(&self->open_count, 0);
75410 spin_unlock_irqrestore(&self->spinlock, flags);
75411
75412 wake_up_interruptible(&self->open_wait);
75413 @@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75414 seq_putc(m, '\n');
75415
75416 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75417 - seq_printf(m, "Open count: %d\n", self->open_count);
75418 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75419 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75420 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75421
75422 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75423 index 07d7d55..541de95 100644
75424 --- a/net/iucv/af_iucv.c
75425 +++ b/net/iucv/af_iucv.c
75426 @@ -783,10 +783,10 @@ static int iucv_sock_autobind(struct sock *sk)
75427
75428 write_lock_bh(&iucv_sk_list.lock);
75429
75430 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75431 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75432 while (__iucv_get_sock_by_name(name)) {
75433 sprintf(name, "%08x",
75434 - atomic_inc_return(&iucv_sk_list.autobind_name));
75435 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75436 }
75437
75438 write_unlock_bh(&iucv_sk_list.lock);
75439 diff --git a/net/key/af_key.c b/net/key/af_key.c
75440 index 7e5d927..cdbb54e 100644
75441 --- a/net/key/af_key.c
75442 +++ b/net/key/af_key.c
75443 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75444 static u32 get_acqseq(void)
75445 {
75446 u32 res;
75447 - static atomic_t acqseq;
75448 + static atomic_unchecked_t acqseq;
75449
75450 do {
75451 - res = atomic_inc_return(&acqseq);
75452 + res = atomic_inc_return_unchecked(&acqseq);
75453 } while (!res);
75454 return res;
75455 }
75456 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75457 index db8fae5..ff070cd 100644
75458 --- a/net/mac80211/ieee80211_i.h
75459 +++ b/net/mac80211/ieee80211_i.h
75460 @@ -28,6 +28,7 @@
75461 #include <net/ieee80211_radiotap.h>
75462 #include <net/cfg80211.h>
75463 #include <net/mac80211.h>
75464 +#include <asm/local.h>
75465 #include "key.h"
75466 #include "sta_info.h"
75467
75468 @@ -842,7 +843,7 @@ struct ieee80211_local {
75469 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75470 spinlock_t queue_stop_reason_lock;
75471
75472 - int open_count;
75473 + local_t open_count;
75474 int monitors, cooked_mntrs;
75475 /* number of interfaces with corresponding FIF_ flags */
75476 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75477 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75478 index c20051b..2accbc4 100644
75479 --- a/net/mac80211/iface.c
75480 +++ b/net/mac80211/iface.c
75481 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75482 break;
75483 }
75484
75485 - if (local->open_count == 0) {
75486 + if (local_read(&local->open_count) == 0) {
75487 res = drv_start(local);
75488 if (res)
75489 goto err_del_bss;
75490 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75491 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75492
75493 if (!is_valid_ether_addr(dev->dev_addr)) {
75494 - if (!local->open_count)
75495 + if (!local_read(&local->open_count))
75496 drv_stop(local);
75497 return -EADDRNOTAVAIL;
75498 }
75499 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75500 mutex_unlock(&local->mtx);
75501
75502 if (coming_up)
75503 - local->open_count++;
75504 + local_inc(&local->open_count);
75505
75506 if (hw_reconf_flags)
75507 ieee80211_hw_config(local, hw_reconf_flags);
75508 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75509 err_del_interface:
75510 drv_remove_interface(local, sdata);
75511 err_stop:
75512 - if (!local->open_count)
75513 + if (!local_read(&local->open_count))
75514 drv_stop(local);
75515 err_del_bss:
75516 sdata->bss = NULL;
75517 @@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75518 }
75519
75520 if (going_down)
75521 - local->open_count--;
75522 + local_dec(&local->open_count);
75523
75524 switch (sdata->vif.type) {
75525 case NL80211_IFTYPE_AP_VLAN:
75526 @@ -550,7 +550,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75527
75528 ieee80211_recalc_ps(local, -1);
75529
75530 - if (local->open_count == 0) {
75531 + if (local_read(&local->open_count) == 0) {
75532 if (local->ops->napi_poll)
75533 napi_disable(&local->napi);
75534 ieee80211_clear_tx_pending(local);
75535 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75536 index 1633648..d45ebfa 100644
75537 --- a/net/mac80211/main.c
75538 +++ b/net/mac80211/main.c
75539 @@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75540 local->hw.conf.power_level = power;
75541 }
75542
75543 - if (changed && local->open_count) {
75544 + if (changed && local_read(&local->open_count)) {
75545 ret = drv_config(local, changed);
75546 /*
75547 * Goal:
75548 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75549 index ef8eba1..5c63952 100644
75550 --- a/net/mac80211/pm.c
75551 +++ b/net/mac80211/pm.c
75552 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75553 struct ieee80211_sub_if_data *sdata;
75554 struct sta_info *sta;
75555
75556 - if (!local->open_count)
75557 + if (!local_read(&local->open_count))
75558 goto suspend;
75559
75560 ieee80211_scan_cancel(local);
75561 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75562 cancel_work_sync(&local->dynamic_ps_enable_work);
75563 del_timer_sync(&local->dynamic_ps_timer);
75564
75565 - local->wowlan = wowlan && local->open_count;
75566 + local->wowlan = wowlan && local_read(&local->open_count);
75567 if (local->wowlan) {
75568 int err = drv_suspend(local, wowlan);
75569 if (err < 0) {
75570 @@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75571 }
75572
75573 /* stop hardware - this must stop RX */
75574 - if (local->open_count)
75575 + if (local_read(&local->open_count))
75576 ieee80211_stop_device(local);
75577
75578 suspend:
75579 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75580 index 3313c11..bec9f17 100644
75581 --- a/net/mac80211/rate.c
75582 +++ b/net/mac80211/rate.c
75583 @@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75584
75585 ASSERT_RTNL();
75586
75587 - if (local->open_count)
75588 + if (local_read(&local->open_count))
75589 return -EBUSY;
75590
75591 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75592 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75593 index c97a065..ff61928 100644
75594 --- a/net/mac80211/rc80211_pid_debugfs.c
75595 +++ b/net/mac80211/rc80211_pid_debugfs.c
75596 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75597
75598 spin_unlock_irqrestore(&events->lock, status);
75599
75600 - if (copy_to_user(buf, pb, p))
75601 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75602 return -EFAULT;
75603
75604 return p;
75605 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75606 index 3862c96..3258ddc 100644
75607 --- a/net/mac80211/util.c
75608 +++ b/net/mac80211/util.c
75609 @@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75610 }
75611 #endif
75612 /* everything else happens only if HW was up & running */
75613 - if (!local->open_count)
75614 + if (!local_read(&local->open_count))
75615 goto wake_up;
75616
75617 /*
75618 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75619 index 0c6f67e..d02cdfc 100644
75620 --- a/net/netfilter/Kconfig
75621 +++ b/net/netfilter/Kconfig
75622 @@ -836,6 +836,16 @@ config NETFILTER_XT_MATCH_ESP
75623
75624 To compile it as a module, choose M here. If unsure, say N.
75625
75626 +config NETFILTER_XT_MATCH_GRADM
75627 + tristate '"gradm" match support'
75628 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75629 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75630 + ---help---
75631 + The gradm match allows to match on grsecurity RBAC being enabled.
75632 + It is useful when iptables rules are applied early on bootup to
75633 + prevent connections to the machine (except from a trusted host)
75634 + while the RBAC system is disabled.
75635 +
75636 config NETFILTER_XT_MATCH_HASHLIMIT
75637 tristate '"hashlimit" match support'
75638 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75639 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75640 index ca36765..0882e7c 100644
75641 --- a/net/netfilter/Makefile
75642 +++ b/net/netfilter/Makefile
75643 @@ -86,6 +86,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75644 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75645 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
75646 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75647 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75648 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75649 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75650 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75651 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75652 index 29fa5ba..8debc79 100644
75653 --- a/net/netfilter/ipvs/ip_vs_conn.c
75654 +++ b/net/netfilter/ipvs/ip_vs_conn.c
75655 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75656 /* Increase the refcnt counter of the dest */
75657 atomic_inc(&dest->refcnt);
75658
75659 - conn_flags = atomic_read(&dest->conn_flags);
75660 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
75661 if (cp->protocol != IPPROTO_UDP)
75662 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75663 /* Bind with the destination and its corresponding transmitter */
75664 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75665 atomic_set(&cp->refcnt, 1);
75666
75667 atomic_set(&cp->n_control, 0);
75668 - atomic_set(&cp->in_pkts, 0);
75669 + atomic_set_unchecked(&cp->in_pkts, 0);
75670
75671 atomic_inc(&ipvs->conn_count);
75672 if (flags & IP_VS_CONN_F_NO_CPORT)
75673 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75674
75675 /* Don't drop the entry if its number of incoming packets is not
75676 located in [0, 8] */
75677 - i = atomic_read(&cp->in_pkts);
75678 + i = atomic_read_unchecked(&cp->in_pkts);
75679 if (i > 8 || i < 0) return 0;
75680
75681 if (!todrop_rate[i]) return 0;
75682 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75683 index 00bdb1d..6725a48 100644
75684 --- a/net/netfilter/ipvs/ip_vs_core.c
75685 +++ b/net/netfilter/ipvs/ip_vs_core.c
75686 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75687 ret = cp->packet_xmit(skb, cp, pd->pp);
75688 /* do not touch skb anymore */
75689
75690 - atomic_inc(&cp->in_pkts);
75691 + atomic_inc_unchecked(&cp->in_pkts);
75692 ip_vs_conn_put(cp);
75693 return ret;
75694 }
75695 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75696 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75697 pkts = sysctl_sync_threshold(ipvs);
75698 else
75699 - pkts = atomic_add_return(1, &cp->in_pkts);
75700 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75701
75702 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75703 cp->protocol == IPPROTO_SCTP) {
75704 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75705 index f558998..9cdff60 100644
75706 --- a/net/netfilter/ipvs/ip_vs_ctl.c
75707 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
75708 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75709 ip_vs_rs_hash(ipvs, dest);
75710 write_unlock_bh(&ipvs->rs_lock);
75711 }
75712 - atomic_set(&dest->conn_flags, conn_flags);
75713 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
75714
75715 /* bind the service */
75716 if (!dest->svc) {
75717 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75718 " %-7s %-6d %-10d %-10d\n",
75719 &dest->addr.in6,
75720 ntohs(dest->port),
75721 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75722 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75723 atomic_read(&dest->weight),
75724 atomic_read(&dest->activeconns),
75725 atomic_read(&dest->inactconns));
75726 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75727 "%-7s %-6d %-10d %-10d\n",
75728 ntohl(dest->addr.ip),
75729 ntohs(dest->port),
75730 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75731 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75732 atomic_read(&dest->weight),
75733 atomic_read(&dest->activeconns),
75734 atomic_read(&dest->inactconns));
75735 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
75736
75737 entry.addr = dest->addr.ip;
75738 entry.port = dest->port;
75739 - entry.conn_flags = atomic_read(&dest->conn_flags);
75740 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
75741 entry.weight = atomic_read(&dest->weight);
75742 entry.u_threshold = dest->u_threshold;
75743 entry.l_threshold = dest->l_threshold;
75744 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
75745 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
75746
75747 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
75748 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75749 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75750 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
75751 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
75752 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
75753 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
75754 index 8a0d6d6..90ec197 100644
75755 --- a/net/netfilter/ipvs/ip_vs_sync.c
75756 +++ b/net/netfilter/ipvs/ip_vs_sync.c
75757 @@ -649,7 +649,7 @@ control:
75758 * i.e only increment in_pkts for Templates.
75759 */
75760 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
75761 - int pkts = atomic_add_return(1, &cp->in_pkts);
75762 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75763
75764 if (pkts % sysctl_sync_period(ipvs) != 1)
75765 return;
75766 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
75767
75768 if (opt)
75769 memcpy(&cp->in_seq, opt, sizeof(*opt));
75770 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75771 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75772 cp->state = state;
75773 cp->old_state = cp->state;
75774 /*
75775 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
75776 index 7fd66de..e6fb361 100644
75777 --- a/net/netfilter/ipvs/ip_vs_xmit.c
75778 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
75779 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
75780 else
75781 rc = NF_ACCEPT;
75782 /* do not touch skb anymore */
75783 - atomic_inc(&cp->in_pkts);
75784 + atomic_inc_unchecked(&cp->in_pkts);
75785 goto out;
75786 }
75787
75788 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
75789 else
75790 rc = NF_ACCEPT;
75791 /* do not touch skb anymore */
75792 - atomic_inc(&cp->in_pkts);
75793 + atomic_inc_unchecked(&cp->in_pkts);
75794 goto out;
75795 }
75796
75797 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
75798 index 66b2c54..c7884e3 100644
75799 --- a/net/netfilter/nfnetlink_log.c
75800 +++ b/net/netfilter/nfnetlink_log.c
75801 @@ -70,7 +70,7 @@ struct nfulnl_instance {
75802 };
75803
75804 static DEFINE_SPINLOCK(instances_lock);
75805 -static atomic_t global_seq;
75806 +static atomic_unchecked_t global_seq;
75807
75808 #define INSTANCE_BUCKETS 16
75809 static struct hlist_head instance_table[INSTANCE_BUCKETS];
75810 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
75811 /* global sequence number */
75812 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
75813 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
75814 - htonl(atomic_inc_return(&global_seq)));
75815 + htonl(atomic_inc_return_unchecked(&global_seq)));
75816
75817 if (data_len) {
75818 struct nlattr *nla;
75819 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
75820 new file mode 100644
75821 index 0000000..6905327
75822 --- /dev/null
75823 +++ b/net/netfilter/xt_gradm.c
75824 @@ -0,0 +1,51 @@
75825 +/*
75826 + * gradm match for netfilter
75827 + * Copyright © Zbigniew Krzystolik, 2010
75828 + *
75829 + * This program is free software; you can redistribute it and/or modify
75830 + * it under the terms of the GNU General Public License; either version
75831 + * 2 or 3 as published by the Free Software Foundation.
75832 + */
75833 +#include <linux/module.h>
75834 +#include <linux/moduleparam.h>
75835 +#include <linux/skbuff.h>
75836 +#include <linux/netfilter/x_tables.h>
75837 +#include <linux/grsecurity.h>
75838 +#include <linux/netfilter/xt_gradm.h>
75839 +
75840 +static bool
75841 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
75842 +{
75843 + const struct xt_gradm_mtinfo *info = par->matchinfo;
75844 + bool retval = false;
75845 + if (gr_acl_is_enabled())
75846 + retval = true;
75847 + return retval ^ info->invflags;
75848 +}
75849 +
75850 +static struct xt_match gradm_mt_reg __read_mostly = {
75851 + .name = "gradm",
75852 + .revision = 0,
75853 + .family = NFPROTO_UNSPEC,
75854 + .match = gradm_mt,
75855 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
75856 + .me = THIS_MODULE,
75857 +};
75858 +
75859 +static int __init gradm_mt_init(void)
75860 +{
75861 + return xt_register_match(&gradm_mt_reg);
75862 +}
75863 +
75864 +static void __exit gradm_mt_exit(void)
75865 +{
75866 + xt_unregister_match(&gradm_mt_reg);
75867 +}
75868 +
75869 +module_init(gradm_mt_init);
75870 +module_exit(gradm_mt_exit);
75871 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
75872 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
75873 +MODULE_LICENSE("GPL");
75874 +MODULE_ALIAS("ipt_gradm");
75875 +MODULE_ALIAS("ip6t_gradm");
75876 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
75877 index 4fe4fb4..87a89e5 100644
75878 --- a/net/netfilter/xt_statistic.c
75879 +++ b/net/netfilter/xt_statistic.c
75880 @@ -19,7 +19,7 @@
75881 #include <linux/module.h>
75882
75883 struct xt_statistic_priv {
75884 - atomic_t count;
75885 + atomic_unchecked_t count;
75886 } ____cacheline_aligned_in_smp;
75887
75888 MODULE_LICENSE("GPL");
75889 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
75890 break;
75891 case XT_STATISTIC_MODE_NTH:
75892 do {
75893 - oval = atomic_read(&info->master->count);
75894 + oval = atomic_read_unchecked(&info->master->count);
75895 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
75896 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
75897 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
75898 if (nval == 0)
75899 ret = !ret;
75900 break;
75901 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
75902 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
75903 if (info->master == NULL)
75904 return -ENOMEM;
75905 - atomic_set(&info->master->count, info->u.nth.count);
75906 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
75907
75908 return 0;
75909 }
75910 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
75911 index faa48f7..65f7f54 100644
75912 --- a/net/netlink/af_netlink.c
75913 +++ b/net/netlink/af_netlink.c
75914 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
75915 sk->sk_error_report(sk);
75916 }
75917 }
75918 - atomic_inc(&sk->sk_drops);
75919 + atomic_inc_unchecked(&sk->sk_drops);
75920 }
75921
75922 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
75923 @@ -2013,7 +2013,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
75924 sk_wmem_alloc_get(s),
75925 nlk->cb,
75926 atomic_read(&s->sk_refcnt),
75927 - atomic_read(&s->sk_drops),
75928 + atomic_read_unchecked(&s->sk_drops),
75929 sock_i_ino(s)
75930 );
75931
75932 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
75933 index 06592d8..64860f6 100644
75934 --- a/net/netrom/af_netrom.c
75935 +++ b/net/netrom/af_netrom.c
75936 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75937 struct sock *sk = sock->sk;
75938 struct nr_sock *nr = nr_sk(sk);
75939
75940 + memset(sax, 0, sizeof(*sax));
75941 lock_sock(sk);
75942 if (peer != 0) {
75943 if (sk->sk_state != TCP_ESTABLISHED) {
75944 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75945 *uaddr_len = sizeof(struct full_sockaddr_ax25);
75946 } else {
75947 sax->fsa_ax25.sax25_family = AF_NETROM;
75948 - sax->fsa_ax25.sax25_ndigis = 0;
75949 sax->fsa_ax25.sax25_call = nr->source_addr;
75950 *uaddr_len = sizeof(struct sockaddr_ax25);
75951 }
75952 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
75953 index 4f2c0df..f0ff342 100644
75954 --- a/net/packet/af_packet.c
75955 +++ b/net/packet/af_packet.c
75956 @@ -1687,7 +1687,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75957
75958 spin_lock(&sk->sk_receive_queue.lock);
75959 po->stats.tp_packets++;
75960 - skb->dropcount = atomic_read(&sk->sk_drops);
75961 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75962 __skb_queue_tail(&sk->sk_receive_queue, skb);
75963 spin_unlock(&sk->sk_receive_queue.lock);
75964 sk->sk_data_ready(sk, skb->len);
75965 @@ -1696,7 +1696,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75966 drop_n_acct:
75967 spin_lock(&sk->sk_receive_queue.lock);
75968 po->stats.tp_drops++;
75969 - atomic_inc(&sk->sk_drops);
75970 + atomic_inc_unchecked(&sk->sk_drops);
75971 spin_unlock(&sk->sk_receive_queue.lock);
75972
75973 drop_n_restore:
75974 @@ -3294,7 +3294,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75975 case PACKET_HDRLEN:
75976 if (len > sizeof(int))
75977 len = sizeof(int);
75978 - if (copy_from_user(&val, optval, len))
75979 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
75980 return -EFAULT;
75981 switch (val) {
75982 case TPACKET_V1:
75983 @@ -3344,7 +3344,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75984
75985 if (put_user(len, optlen))
75986 return -EFAULT;
75987 - if (copy_to_user(optval, data, len))
75988 + if (len > sizeof(st) || copy_to_user(optval, data, len))
75989 return -EFAULT;
75990 return 0;
75991 }
75992 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
75993 index d65f699..05aa6ce 100644
75994 --- a/net/phonet/af_phonet.c
75995 +++ b/net/phonet/af_phonet.c
75996 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
75997 {
75998 struct phonet_protocol *pp;
75999
76000 - if (protocol >= PHONET_NPROTO)
76001 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76002 return NULL;
76003
76004 rcu_read_lock();
76005 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
76006 {
76007 int err = 0;
76008
76009 - if (protocol >= PHONET_NPROTO)
76010 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76011 return -EINVAL;
76012
76013 err = proto_register(pp->prot, 1);
76014 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
76015 index 9726fe6..fc4e3a4 100644
76016 --- a/net/phonet/pep.c
76017 +++ b/net/phonet/pep.c
76018 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76019
76020 case PNS_PEP_CTRL_REQ:
76021 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
76022 - atomic_inc(&sk->sk_drops);
76023 + atomic_inc_unchecked(&sk->sk_drops);
76024 break;
76025 }
76026 __skb_pull(skb, 4);
76027 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76028 }
76029
76030 if (pn->rx_credits == 0) {
76031 - atomic_inc(&sk->sk_drops);
76032 + atomic_inc_unchecked(&sk->sk_drops);
76033 err = -ENOBUFS;
76034 break;
76035 }
76036 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
76037 }
76038
76039 if (pn->rx_credits == 0) {
76040 - atomic_inc(&sk->sk_drops);
76041 + atomic_inc_unchecked(&sk->sk_drops);
76042 err = NET_RX_DROP;
76043 break;
76044 }
76045 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
76046 index 4c7eff3..59c727f 100644
76047 --- a/net/phonet/socket.c
76048 +++ b/net/phonet/socket.c
76049 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
76050 pn->resource, sk->sk_state,
76051 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
76052 sock_i_uid(sk), sock_i_ino(sk),
76053 - atomic_read(&sk->sk_refcnt), sk,
76054 - atomic_read(&sk->sk_drops), &len);
76055 + atomic_read(&sk->sk_refcnt),
76056 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76057 + NULL,
76058 +#else
76059 + sk,
76060 +#endif
76061 + atomic_read_unchecked(&sk->sk_drops), &len);
76062 }
76063 seq_printf(seq, "%*s\n", 127 - len, "");
76064 return 0;
76065 diff --git a/net/rds/cong.c b/net/rds/cong.c
76066 index e5b65ac..f3b6fb7 100644
76067 --- a/net/rds/cong.c
76068 +++ b/net/rds/cong.c
76069 @@ -78,7 +78,7 @@
76070 * finds that the saved generation number is smaller than the global generation
76071 * number, it wakes up the process.
76072 */
76073 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
76074 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
76075
76076 /*
76077 * Congestion monitoring
76078 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
76079 rdsdebug("waking map %p for %pI4\n",
76080 map, &map->m_addr);
76081 rds_stats_inc(s_cong_update_received);
76082 - atomic_inc(&rds_cong_generation);
76083 + atomic_inc_unchecked(&rds_cong_generation);
76084 if (waitqueue_active(&map->m_waitq))
76085 wake_up(&map->m_waitq);
76086 if (waitqueue_active(&rds_poll_waitq))
76087 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
76088
76089 int rds_cong_updated_since(unsigned long *recent)
76090 {
76091 - unsigned long gen = atomic_read(&rds_cong_generation);
76092 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
76093
76094 if (likely(*recent == gen))
76095 return 0;
76096 diff --git a/net/rds/ib.h b/net/rds/ib.h
76097 index edfaaaf..8c89879 100644
76098 --- a/net/rds/ib.h
76099 +++ b/net/rds/ib.h
76100 @@ -128,7 +128,7 @@ struct rds_ib_connection {
76101 /* sending acks */
76102 unsigned long i_ack_flags;
76103 #ifdef KERNEL_HAS_ATOMIC64
76104 - atomic64_t i_ack_next; /* next ACK to send */
76105 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76106 #else
76107 spinlock_t i_ack_lock; /* protect i_ack_next */
76108 u64 i_ack_next; /* next ACK to send */
76109 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
76110 index a1e1162..265e129 100644
76111 --- a/net/rds/ib_cm.c
76112 +++ b/net/rds/ib_cm.c
76113 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
76114 /* Clear the ACK state */
76115 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76116 #ifdef KERNEL_HAS_ATOMIC64
76117 - atomic64_set(&ic->i_ack_next, 0);
76118 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76119 #else
76120 ic->i_ack_next = 0;
76121 #endif
76122 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
76123 index 8d19491..05a3e65 100644
76124 --- a/net/rds/ib_recv.c
76125 +++ b/net/rds/ib_recv.c
76126 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76127 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
76128 int ack_required)
76129 {
76130 - atomic64_set(&ic->i_ack_next, seq);
76131 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76132 if (ack_required) {
76133 smp_mb__before_clear_bit();
76134 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76135 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76136 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76137 smp_mb__after_clear_bit();
76138
76139 - return atomic64_read(&ic->i_ack_next);
76140 + return atomic64_read_unchecked(&ic->i_ack_next);
76141 }
76142 #endif
76143
76144 diff --git a/net/rds/iw.h b/net/rds/iw.h
76145 index 04ce3b1..48119a6 100644
76146 --- a/net/rds/iw.h
76147 +++ b/net/rds/iw.h
76148 @@ -134,7 +134,7 @@ struct rds_iw_connection {
76149 /* sending acks */
76150 unsigned long i_ack_flags;
76151 #ifdef KERNEL_HAS_ATOMIC64
76152 - atomic64_t i_ack_next; /* next ACK to send */
76153 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76154 #else
76155 spinlock_t i_ack_lock; /* protect i_ack_next */
76156 u64 i_ack_next; /* next ACK to send */
76157 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
76158 index a91e1db..cf3053f 100644
76159 --- a/net/rds/iw_cm.c
76160 +++ b/net/rds/iw_cm.c
76161 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
76162 /* Clear the ACK state */
76163 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76164 #ifdef KERNEL_HAS_ATOMIC64
76165 - atomic64_set(&ic->i_ack_next, 0);
76166 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76167 #else
76168 ic->i_ack_next = 0;
76169 #endif
76170 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
76171 index 4503335..db566b4 100644
76172 --- a/net/rds/iw_recv.c
76173 +++ b/net/rds/iw_recv.c
76174 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76175 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
76176 int ack_required)
76177 {
76178 - atomic64_set(&ic->i_ack_next, seq);
76179 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76180 if (ack_required) {
76181 smp_mb__before_clear_bit();
76182 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76183 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76184 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76185 smp_mb__after_clear_bit();
76186
76187 - return atomic64_read(&ic->i_ack_next);
76188 + return atomic64_read_unchecked(&ic->i_ack_next);
76189 }
76190 #endif
76191
76192 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
76193 index edac9ef..16bcb98 100644
76194 --- a/net/rds/tcp.c
76195 +++ b/net/rds/tcp.c
76196 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
76197 int val = 1;
76198
76199 set_fs(KERNEL_DS);
76200 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
76201 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
76202 sizeof(val));
76203 set_fs(oldfs);
76204 }
76205 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
76206 index 1b4fd68..2234175 100644
76207 --- a/net/rds/tcp_send.c
76208 +++ b/net/rds/tcp_send.c
76209 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
76210
76211 oldfs = get_fs();
76212 set_fs(KERNEL_DS);
76213 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
76214 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
76215 sizeof(val));
76216 set_fs(oldfs);
76217 }
76218 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
76219 index 74c064c..fdec26f 100644
76220 --- a/net/rxrpc/af_rxrpc.c
76221 +++ b/net/rxrpc/af_rxrpc.c
76222 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
76223 __be32 rxrpc_epoch;
76224
76225 /* current debugging ID */
76226 -atomic_t rxrpc_debug_id;
76227 +atomic_unchecked_t rxrpc_debug_id;
76228
76229 /* count of skbs currently in use */
76230 atomic_t rxrpc_n_skbs;
76231 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
76232 index c3126e8..21facc7 100644
76233 --- a/net/rxrpc/ar-ack.c
76234 +++ b/net/rxrpc/ar-ack.c
76235 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76236
76237 _enter("{%d,%d,%d,%d},",
76238 call->acks_hard, call->acks_unacked,
76239 - atomic_read(&call->sequence),
76240 + atomic_read_unchecked(&call->sequence),
76241 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
76242
76243 stop = 0;
76244 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76245
76246 /* each Tx packet has a new serial number */
76247 sp->hdr.serial =
76248 - htonl(atomic_inc_return(&call->conn->serial));
76249 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
76250
76251 hdr = (struct rxrpc_header *) txb->head;
76252 hdr->serial = sp->hdr.serial;
76253 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
76254 */
76255 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
76256 {
76257 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
76258 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
76259 }
76260
76261 /*
76262 @@ -629,7 +629,7 @@ process_further:
76263
76264 latest = ntohl(sp->hdr.serial);
76265 hard = ntohl(ack.firstPacket);
76266 - tx = atomic_read(&call->sequence);
76267 + tx = atomic_read_unchecked(&call->sequence);
76268
76269 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76270 latest,
76271 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
76272 goto maybe_reschedule;
76273
76274 send_ACK_with_skew:
76275 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
76276 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
76277 ntohl(ack.serial));
76278 send_ACK:
76279 mtu = call->conn->trans->peer->if_mtu;
76280 @@ -1173,7 +1173,7 @@ send_ACK:
76281 ackinfo.rxMTU = htonl(5692);
76282 ackinfo.jumbo_max = htonl(4);
76283
76284 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76285 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76286 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76287 ntohl(hdr.serial),
76288 ntohs(ack.maxSkew),
76289 @@ -1191,7 +1191,7 @@ send_ACK:
76290 send_message:
76291 _debug("send message");
76292
76293 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76294 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76295 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
76296 send_message_2:
76297
76298 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
76299 index bf656c2..48f9d27 100644
76300 --- a/net/rxrpc/ar-call.c
76301 +++ b/net/rxrpc/ar-call.c
76302 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
76303 spin_lock_init(&call->lock);
76304 rwlock_init(&call->state_lock);
76305 atomic_set(&call->usage, 1);
76306 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
76307 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76308 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
76309
76310 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
76311 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
76312 index 4106ca9..a338d7a 100644
76313 --- a/net/rxrpc/ar-connection.c
76314 +++ b/net/rxrpc/ar-connection.c
76315 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
76316 rwlock_init(&conn->lock);
76317 spin_lock_init(&conn->state_lock);
76318 atomic_set(&conn->usage, 1);
76319 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
76320 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76321 conn->avail_calls = RXRPC_MAXCALLS;
76322 conn->size_align = 4;
76323 conn->header_size = sizeof(struct rxrpc_header);
76324 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
76325 index e7ed43a..6afa140 100644
76326 --- a/net/rxrpc/ar-connevent.c
76327 +++ b/net/rxrpc/ar-connevent.c
76328 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
76329
76330 len = iov[0].iov_len + iov[1].iov_len;
76331
76332 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76333 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76334 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
76335
76336 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76337 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
76338 index 1a2b0633..e8d1382 100644
76339 --- a/net/rxrpc/ar-input.c
76340 +++ b/net/rxrpc/ar-input.c
76341 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
76342 /* track the latest serial number on this connection for ACK packet
76343 * information */
76344 serial = ntohl(sp->hdr.serial);
76345 - hi_serial = atomic_read(&call->conn->hi_serial);
76346 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
76347 while (serial > hi_serial)
76348 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
76349 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
76350 serial);
76351
76352 /* request ACK generation for any ACK or DATA packet that requests
76353 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
76354 index 8e22bd3..f66d1c0 100644
76355 --- a/net/rxrpc/ar-internal.h
76356 +++ b/net/rxrpc/ar-internal.h
76357 @@ -272,8 +272,8 @@ struct rxrpc_connection {
76358 int error; /* error code for local abort */
76359 int debug_id; /* debug ID for printks */
76360 unsigned call_counter; /* call ID counter */
76361 - atomic_t serial; /* packet serial number counter */
76362 - atomic_t hi_serial; /* highest serial number received */
76363 + atomic_unchecked_t serial; /* packet serial number counter */
76364 + atomic_unchecked_t hi_serial; /* highest serial number received */
76365 u8 avail_calls; /* number of calls available */
76366 u8 size_align; /* data size alignment (for security) */
76367 u8 header_size; /* rxrpc + security header size */
76368 @@ -346,7 +346,7 @@ struct rxrpc_call {
76369 spinlock_t lock;
76370 rwlock_t state_lock; /* lock for state transition */
76371 atomic_t usage;
76372 - atomic_t sequence; /* Tx data packet sequence counter */
76373 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
76374 u32 abort_code; /* local/remote abort code */
76375 enum { /* current state of call */
76376 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
76377 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
76378 */
76379 extern atomic_t rxrpc_n_skbs;
76380 extern __be32 rxrpc_epoch;
76381 -extern atomic_t rxrpc_debug_id;
76382 +extern atomic_unchecked_t rxrpc_debug_id;
76383 extern struct workqueue_struct *rxrpc_workqueue;
76384
76385 /*
76386 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
76387 index 87f7135..74d3703 100644
76388 --- a/net/rxrpc/ar-local.c
76389 +++ b/net/rxrpc/ar-local.c
76390 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
76391 spin_lock_init(&local->lock);
76392 rwlock_init(&local->services_lock);
76393 atomic_set(&local->usage, 1);
76394 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
76395 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76396 memcpy(&local->srx, srx, sizeof(*srx));
76397 }
76398
76399 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
76400 index 16ae887..d24f12b 100644
76401 --- a/net/rxrpc/ar-output.c
76402 +++ b/net/rxrpc/ar-output.c
76403 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
76404 sp->hdr.cid = call->cid;
76405 sp->hdr.callNumber = call->call_id;
76406 sp->hdr.seq =
76407 - htonl(atomic_inc_return(&call->sequence));
76408 + htonl(atomic_inc_return_unchecked(&call->sequence));
76409 sp->hdr.serial =
76410 - htonl(atomic_inc_return(&conn->serial));
76411 + htonl(atomic_inc_return_unchecked(&conn->serial));
76412 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
76413 sp->hdr.userStatus = 0;
76414 sp->hdr.securityIndex = conn->security_ix;
76415 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
76416 index 2754f09..b20e38f 100644
76417 --- a/net/rxrpc/ar-peer.c
76418 +++ b/net/rxrpc/ar-peer.c
76419 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
76420 INIT_LIST_HEAD(&peer->error_targets);
76421 spin_lock_init(&peer->lock);
76422 atomic_set(&peer->usage, 1);
76423 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
76424 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76425 memcpy(&peer->srx, srx, sizeof(*srx));
76426
76427 rxrpc_assess_MTU_size(peer);
76428 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
76429 index 38047f7..9f48511 100644
76430 --- a/net/rxrpc/ar-proc.c
76431 +++ b/net/rxrpc/ar-proc.c
76432 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
76433 atomic_read(&conn->usage),
76434 rxrpc_conn_states[conn->state],
76435 key_serial(conn->key),
76436 - atomic_read(&conn->serial),
76437 - atomic_read(&conn->hi_serial));
76438 + atomic_read_unchecked(&conn->serial),
76439 + atomic_read_unchecked(&conn->hi_serial));
76440
76441 return 0;
76442 }
76443 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
76444 index 92df566..87ec1bf 100644
76445 --- a/net/rxrpc/ar-transport.c
76446 +++ b/net/rxrpc/ar-transport.c
76447 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
76448 spin_lock_init(&trans->client_lock);
76449 rwlock_init(&trans->conn_lock);
76450 atomic_set(&trans->usage, 1);
76451 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
76452 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76453
76454 if (peer->srx.transport.family == AF_INET) {
76455 switch (peer->srx.transport_type) {
76456 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
76457 index 7635107..4670276 100644
76458 --- a/net/rxrpc/rxkad.c
76459 +++ b/net/rxrpc/rxkad.c
76460 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
76461
76462 len = iov[0].iov_len + iov[1].iov_len;
76463
76464 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76465 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76466 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
76467
76468 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76469 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
76470
76471 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
76472
76473 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
76474 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76475 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
76476
76477 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
76478 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
76479 index 1e2eee8..ce3967e 100644
76480 --- a/net/sctp/proc.c
76481 +++ b/net/sctp/proc.c
76482 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
76483 seq_printf(seq,
76484 "%8pK %8pK %-3d %-3d %-2d %-4d "
76485 "%4d %8d %8d %7d %5lu %-5d %5d ",
76486 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
76487 + assoc, sk,
76488 + sctp_sk(sk)->type, sk->sk_state,
76489 assoc->state, hash,
76490 assoc->assoc_id,
76491 assoc->sndbuf_used,
76492 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
76493 index 92ba71d..9a97902 100644
76494 --- a/net/sctp/socket.c
76495 +++ b/net/sctp/socket.c
76496 @@ -4569,7 +4569,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
76497 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
76498 if (space_left < addrlen)
76499 return -ENOMEM;
76500 - if (copy_to_user(to, &temp, addrlen))
76501 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
76502 return -EFAULT;
76503 to += addrlen;
76504 cnt++;
76505 diff --git a/net/socket.c b/net/socket.c
76506 index 851edcd..b786851 100644
76507 --- a/net/socket.c
76508 +++ b/net/socket.c
76509 @@ -88,6 +88,7 @@
76510 #include <linux/nsproxy.h>
76511 #include <linux/magic.h>
76512 #include <linux/slab.h>
76513 +#include <linux/in.h>
76514
76515 #include <asm/uaccess.h>
76516 #include <asm/unistd.h>
76517 @@ -105,6 +106,8 @@
76518 #include <linux/sockios.h>
76519 #include <linux/atalk.h>
76520
76521 +#include <linux/grsock.h>
76522 +
76523 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
76524 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
76525 unsigned long nr_segs, loff_t pos);
76526 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
76527 &sockfs_dentry_operations, SOCKFS_MAGIC);
76528 }
76529
76530 -static struct vfsmount *sock_mnt __read_mostly;
76531 +struct vfsmount *sock_mnt __read_mostly;
76532
76533 static struct file_system_type sock_fs_type = {
76534 .name = "sockfs",
76535 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
76536 return -EAFNOSUPPORT;
76537 if (type < 0 || type >= SOCK_MAX)
76538 return -EINVAL;
76539 + if (protocol < 0)
76540 + return -EINVAL;
76541
76542 /* Compatibility.
76543
76544 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
76545 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
76546 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
76547
76548 + if(!gr_search_socket(family, type, protocol)) {
76549 + retval = -EACCES;
76550 + goto out;
76551 + }
76552 +
76553 + if (gr_handle_sock_all(family, type, protocol)) {
76554 + retval = -EACCES;
76555 + goto out;
76556 + }
76557 +
76558 retval = sock_create(family, type, protocol, &sock);
76559 if (retval < 0)
76560 goto out;
76561 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76562 if (sock) {
76563 err = move_addr_to_kernel(umyaddr, addrlen, &address);
76564 if (err >= 0) {
76565 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
76566 + err = -EACCES;
76567 + goto error;
76568 + }
76569 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
76570 + if (err)
76571 + goto error;
76572 +
76573 err = security_socket_bind(sock,
76574 (struct sockaddr *)&address,
76575 addrlen);
76576 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76577 (struct sockaddr *)
76578 &address, addrlen);
76579 }
76580 +error:
76581 fput_light(sock->file, fput_needed);
76582 }
76583 return err;
76584 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
76585 if ((unsigned)backlog > somaxconn)
76586 backlog = somaxconn;
76587
76588 + if (gr_handle_sock_server_other(sock->sk)) {
76589 + err = -EPERM;
76590 + goto error;
76591 + }
76592 +
76593 + err = gr_search_listen(sock);
76594 + if (err)
76595 + goto error;
76596 +
76597 err = security_socket_listen(sock, backlog);
76598 if (!err)
76599 err = sock->ops->listen(sock, backlog);
76600
76601 +error:
76602 fput_light(sock->file, fput_needed);
76603 }
76604 return err;
76605 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76606 newsock->type = sock->type;
76607 newsock->ops = sock->ops;
76608
76609 + if (gr_handle_sock_server_other(sock->sk)) {
76610 + err = -EPERM;
76611 + sock_release(newsock);
76612 + goto out_put;
76613 + }
76614 +
76615 + err = gr_search_accept(sock);
76616 + if (err) {
76617 + sock_release(newsock);
76618 + goto out_put;
76619 + }
76620 +
76621 /*
76622 * We don't need try_module_get here, as the listening socket (sock)
76623 * has the protocol module (sock->ops->owner) held.
76624 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76625 fd_install(newfd, newfile);
76626 err = newfd;
76627
76628 + gr_attach_curr_ip(newsock->sk);
76629 +
76630 out_put:
76631 fput_light(sock->file, fput_needed);
76632 out:
76633 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76634 int, addrlen)
76635 {
76636 struct socket *sock;
76637 + struct sockaddr *sck;
76638 struct sockaddr_storage address;
76639 int err, fput_needed;
76640
76641 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76642 if (err < 0)
76643 goto out_put;
76644
76645 + sck = (struct sockaddr *)&address;
76646 +
76647 + if (gr_handle_sock_client(sck)) {
76648 + err = -EACCES;
76649 + goto out_put;
76650 + }
76651 +
76652 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
76653 + if (err)
76654 + goto out_put;
76655 +
76656 err =
76657 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
76658 if (err)
76659 @@ -1966,7 +2026,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
76660 * checking falls down on this.
76661 */
76662 if (copy_from_user(ctl_buf,
76663 - (void __user __force *)msg_sys->msg_control,
76664 + (void __force_user *)msg_sys->msg_control,
76665 ctl_len))
76666 goto out_freectl;
76667 msg_sys->msg_control = ctl_buf;
76668 @@ -2136,7 +2196,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
76669 * kernel msghdr to use the kernel address space)
76670 */
76671
76672 - uaddr = (__force void __user *)msg_sys->msg_name;
76673 + uaddr = (void __force_user *)msg_sys->msg_name;
76674 uaddr_len = COMPAT_NAMELEN(msg);
76675 if (MSG_CMSG_COMPAT & flags) {
76676 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
76677 @@ -2758,7 +2818,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76678 }
76679
76680 ifr = compat_alloc_user_space(buf_size);
76681 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
76682 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
76683
76684 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
76685 return -EFAULT;
76686 @@ -2782,12 +2842,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76687 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
76688
76689 if (copy_in_user(rxnfc, compat_rxnfc,
76690 - (void *)(&rxnfc->fs.m_ext + 1) -
76691 - (void *)rxnfc) ||
76692 + (void __user *)(&rxnfc->fs.m_ext + 1) -
76693 + (void __user *)rxnfc) ||
76694 copy_in_user(&rxnfc->fs.ring_cookie,
76695 &compat_rxnfc->fs.ring_cookie,
76696 - (void *)(&rxnfc->fs.location + 1) -
76697 - (void *)&rxnfc->fs.ring_cookie) ||
76698 + (void __user *)(&rxnfc->fs.location + 1) -
76699 + (void __user *)&rxnfc->fs.ring_cookie) ||
76700 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
76701 sizeof(rxnfc->rule_cnt)))
76702 return -EFAULT;
76703 @@ -2799,12 +2859,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76704
76705 if (convert_out) {
76706 if (copy_in_user(compat_rxnfc, rxnfc,
76707 - (const void *)(&rxnfc->fs.m_ext + 1) -
76708 - (const void *)rxnfc) ||
76709 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
76710 + (const void __user *)rxnfc) ||
76711 copy_in_user(&compat_rxnfc->fs.ring_cookie,
76712 &rxnfc->fs.ring_cookie,
76713 - (const void *)(&rxnfc->fs.location + 1) -
76714 - (const void *)&rxnfc->fs.ring_cookie) ||
76715 + (const void __user *)(&rxnfc->fs.location + 1) -
76716 + (const void __user *)&rxnfc->fs.ring_cookie) ||
76717 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
76718 sizeof(rxnfc->rule_cnt)))
76719 return -EFAULT;
76720 @@ -2874,7 +2934,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
76721 old_fs = get_fs();
76722 set_fs(KERNEL_DS);
76723 err = dev_ioctl(net, cmd,
76724 - (struct ifreq __user __force *) &kifr);
76725 + (struct ifreq __force_user *) &kifr);
76726 set_fs(old_fs);
76727
76728 return err;
76729 @@ -2983,7 +3043,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
76730
76731 old_fs = get_fs();
76732 set_fs(KERNEL_DS);
76733 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
76734 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
76735 set_fs(old_fs);
76736
76737 if (cmd == SIOCGIFMAP && !err) {
76738 @@ -3088,7 +3148,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
76739 ret |= __get_user(rtdev, &(ur4->rt_dev));
76740 if (rtdev) {
76741 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
76742 - r4.rt_dev = (char __user __force *)devname;
76743 + r4.rt_dev = (char __force_user *)devname;
76744 devname[15] = 0;
76745 } else
76746 r4.rt_dev = NULL;
76747 @@ -3314,8 +3374,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
76748 int __user *uoptlen;
76749 int err;
76750
76751 - uoptval = (char __user __force *) optval;
76752 - uoptlen = (int __user __force *) optlen;
76753 + uoptval = (char __force_user *) optval;
76754 + uoptlen = (int __force_user *) optlen;
76755
76756 set_fs(KERNEL_DS);
76757 if (level == SOL_SOCKET)
76758 @@ -3335,7 +3395,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
76759 char __user *uoptval;
76760 int err;
76761
76762 - uoptval = (char __user __force *) optval;
76763 + uoptval = (char __force_user *) optval;
76764
76765 set_fs(KERNEL_DS);
76766 if (level == SOL_SOCKET)
76767 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
76768 index 994cfea..5343b6b 100644
76769 --- a/net/sunrpc/sched.c
76770 +++ b/net/sunrpc/sched.c
76771 @@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
76772 #ifdef RPC_DEBUG
76773 static void rpc_task_set_debuginfo(struct rpc_task *task)
76774 {
76775 - static atomic_t rpc_pid;
76776 + static atomic_unchecked_t rpc_pid;
76777
76778 - task->tk_pid = atomic_inc_return(&rpc_pid);
76779 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
76780 }
76781 #else
76782 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
76783 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
76784 index 8343737..677025e 100644
76785 --- a/net/sunrpc/xprtrdma/svc_rdma.c
76786 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
76787 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
76788 static unsigned int min_max_inline = 4096;
76789 static unsigned int max_max_inline = 65536;
76790
76791 -atomic_t rdma_stat_recv;
76792 -atomic_t rdma_stat_read;
76793 -atomic_t rdma_stat_write;
76794 -atomic_t rdma_stat_sq_starve;
76795 -atomic_t rdma_stat_rq_starve;
76796 -atomic_t rdma_stat_rq_poll;
76797 -atomic_t rdma_stat_rq_prod;
76798 -atomic_t rdma_stat_sq_poll;
76799 -atomic_t rdma_stat_sq_prod;
76800 +atomic_unchecked_t rdma_stat_recv;
76801 +atomic_unchecked_t rdma_stat_read;
76802 +atomic_unchecked_t rdma_stat_write;
76803 +atomic_unchecked_t rdma_stat_sq_starve;
76804 +atomic_unchecked_t rdma_stat_rq_starve;
76805 +atomic_unchecked_t rdma_stat_rq_poll;
76806 +atomic_unchecked_t rdma_stat_rq_prod;
76807 +atomic_unchecked_t rdma_stat_sq_poll;
76808 +atomic_unchecked_t rdma_stat_sq_prod;
76809
76810 /* Temporary NFS request map and context caches */
76811 struct kmem_cache *svc_rdma_map_cachep;
76812 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
76813 len -= *ppos;
76814 if (len > *lenp)
76815 len = *lenp;
76816 - if (len && copy_to_user(buffer, str_buf, len))
76817 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
76818 return -EFAULT;
76819 *lenp = len;
76820 *ppos += len;
76821 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
76822 {
76823 .procname = "rdma_stat_read",
76824 .data = &rdma_stat_read,
76825 - .maxlen = sizeof(atomic_t),
76826 + .maxlen = sizeof(atomic_unchecked_t),
76827 .mode = 0644,
76828 .proc_handler = read_reset_stat,
76829 },
76830 {
76831 .procname = "rdma_stat_recv",
76832 .data = &rdma_stat_recv,
76833 - .maxlen = sizeof(atomic_t),
76834 + .maxlen = sizeof(atomic_unchecked_t),
76835 .mode = 0644,
76836 .proc_handler = read_reset_stat,
76837 },
76838 {
76839 .procname = "rdma_stat_write",
76840 .data = &rdma_stat_write,
76841 - .maxlen = sizeof(atomic_t),
76842 + .maxlen = sizeof(atomic_unchecked_t),
76843 .mode = 0644,
76844 .proc_handler = read_reset_stat,
76845 },
76846 {
76847 .procname = "rdma_stat_sq_starve",
76848 .data = &rdma_stat_sq_starve,
76849 - .maxlen = sizeof(atomic_t),
76850 + .maxlen = sizeof(atomic_unchecked_t),
76851 .mode = 0644,
76852 .proc_handler = read_reset_stat,
76853 },
76854 {
76855 .procname = "rdma_stat_rq_starve",
76856 .data = &rdma_stat_rq_starve,
76857 - .maxlen = sizeof(atomic_t),
76858 + .maxlen = sizeof(atomic_unchecked_t),
76859 .mode = 0644,
76860 .proc_handler = read_reset_stat,
76861 },
76862 {
76863 .procname = "rdma_stat_rq_poll",
76864 .data = &rdma_stat_rq_poll,
76865 - .maxlen = sizeof(atomic_t),
76866 + .maxlen = sizeof(atomic_unchecked_t),
76867 .mode = 0644,
76868 .proc_handler = read_reset_stat,
76869 },
76870 {
76871 .procname = "rdma_stat_rq_prod",
76872 .data = &rdma_stat_rq_prod,
76873 - .maxlen = sizeof(atomic_t),
76874 + .maxlen = sizeof(atomic_unchecked_t),
76875 .mode = 0644,
76876 .proc_handler = read_reset_stat,
76877 },
76878 {
76879 .procname = "rdma_stat_sq_poll",
76880 .data = &rdma_stat_sq_poll,
76881 - .maxlen = sizeof(atomic_t),
76882 + .maxlen = sizeof(atomic_unchecked_t),
76883 .mode = 0644,
76884 .proc_handler = read_reset_stat,
76885 },
76886 {
76887 .procname = "rdma_stat_sq_prod",
76888 .data = &rdma_stat_sq_prod,
76889 - .maxlen = sizeof(atomic_t),
76890 + .maxlen = sizeof(atomic_unchecked_t),
76891 .mode = 0644,
76892 .proc_handler = read_reset_stat,
76893 },
76894 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76895 index 41cb63b..c4a1489 100644
76896 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76897 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76898 @@ -501,7 +501,7 @@ next_sge:
76899 svc_rdma_put_context(ctxt, 0);
76900 goto out;
76901 }
76902 - atomic_inc(&rdma_stat_read);
76903 + atomic_inc_unchecked(&rdma_stat_read);
76904
76905 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
76906 chl_map->ch[ch_no].count -= read_wr.num_sge;
76907 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76908 dto_q);
76909 list_del_init(&ctxt->dto_q);
76910 } else {
76911 - atomic_inc(&rdma_stat_rq_starve);
76912 + atomic_inc_unchecked(&rdma_stat_rq_starve);
76913 clear_bit(XPT_DATA, &xprt->xpt_flags);
76914 ctxt = NULL;
76915 }
76916 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76917 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
76918 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
76919 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
76920 - atomic_inc(&rdma_stat_recv);
76921 + atomic_inc_unchecked(&rdma_stat_recv);
76922
76923 /* Build up the XDR from the receive buffers. */
76924 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
76925 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76926 index 42eb7ba..c887c45 100644
76927 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76928 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76929 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
76930 write_wr.wr.rdma.remote_addr = to;
76931
76932 /* Post It */
76933 - atomic_inc(&rdma_stat_write);
76934 + atomic_inc_unchecked(&rdma_stat_write);
76935 if (svc_rdma_send(xprt, &write_wr))
76936 goto err;
76937 return 0;
76938 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76939 index 73b428b..5f3f8f3 100644
76940 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
76941 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76942 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76943 return;
76944
76945 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
76946 - atomic_inc(&rdma_stat_rq_poll);
76947 + atomic_inc_unchecked(&rdma_stat_rq_poll);
76948
76949 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
76950 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
76951 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76952 }
76953
76954 if (ctxt)
76955 - atomic_inc(&rdma_stat_rq_prod);
76956 + atomic_inc_unchecked(&rdma_stat_rq_prod);
76957
76958 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
76959 /*
76960 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76961 return;
76962
76963 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
76964 - atomic_inc(&rdma_stat_sq_poll);
76965 + atomic_inc_unchecked(&rdma_stat_sq_poll);
76966 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
76967 if (wc.status != IB_WC_SUCCESS)
76968 /* Close the transport */
76969 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76970 }
76971
76972 if (ctxt)
76973 - atomic_inc(&rdma_stat_sq_prod);
76974 + atomic_inc_unchecked(&rdma_stat_sq_prod);
76975 }
76976
76977 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
76978 @@ -1266,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
76979 spin_lock_bh(&xprt->sc_lock);
76980 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
76981 spin_unlock_bh(&xprt->sc_lock);
76982 - atomic_inc(&rdma_stat_sq_starve);
76983 + atomic_inc_unchecked(&rdma_stat_sq_starve);
76984
76985 /* See if we can opportunistically reap SQ WR to make room */
76986 sq_cq_reap(xprt);
76987 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
76988 index c3e65ae..f512a2b 100644
76989 --- a/net/sysctl_net.c
76990 +++ b/net/sysctl_net.c
76991 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
76992 struct ctl_table *table)
76993 {
76994 /* Allow network administrator to have same access as root. */
76995 - if (capable(CAP_NET_ADMIN)) {
76996 + if (capable_nolog(CAP_NET_ADMIN)) {
76997 int mode = (table->mode >> 6) & 7;
76998 return (mode << 6) | (mode << 3) | mode;
76999 }
77000 diff --git a/net/tipc/link.c b/net/tipc/link.c
77001 index b4b9b30..5b62131 100644
77002 --- a/net/tipc/link.c
77003 +++ b/net/tipc/link.c
77004 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
77005 struct tipc_msg fragm_hdr;
77006 struct sk_buff *buf, *buf_chain, *prev;
77007 u32 fragm_crs, fragm_rest, hsz, sect_rest;
77008 - const unchar *sect_crs;
77009 + const unchar __user *sect_crs;
77010 int curr_sect;
77011 u32 fragm_no;
77012
77013 @@ -1247,7 +1247,7 @@ again:
77014
77015 if (!sect_rest) {
77016 sect_rest = msg_sect[++curr_sect].iov_len;
77017 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
77018 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
77019 }
77020
77021 if (sect_rest < fragm_rest)
77022 @@ -1266,7 +1266,7 @@ error:
77023 }
77024 } else
77025 skb_copy_to_linear_data_offset(buf, fragm_crs,
77026 - sect_crs, sz);
77027 + (const void __force_kernel *)sect_crs, sz);
77028 sect_crs += sz;
77029 sect_rest -= sz;
77030 fragm_crs += sz;
77031 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
77032 index e3afe16..333ea83 100644
77033 --- a/net/tipc/msg.c
77034 +++ b/net/tipc/msg.c
77035 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
77036 msg_sect[cnt].iov_len);
77037 else
77038 skb_copy_to_linear_data_offset(*buf, pos,
77039 - msg_sect[cnt].iov_base,
77040 + (const void __force_kernel *)msg_sect[cnt].iov_base,
77041 msg_sect[cnt].iov_len);
77042 pos += msg_sect[cnt].iov_len;
77043 }
77044 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
77045 index b2964e9..fdf2e27 100644
77046 --- a/net/tipc/subscr.c
77047 +++ b/net/tipc/subscr.c
77048 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
77049 {
77050 struct iovec msg_sect;
77051
77052 - msg_sect.iov_base = (void *)&sub->evt;
77053 + msg_sect.iov_base = (void __force_user *)&sub->evt;
77054 msg_sect.iov_len = sizeof(struct tipc_event);
77055
77056 sub->evt.event = htohl(event, sub->swap);
77057 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
77058 index d510353..26c8a32 100644
77059 --- a/net/unix/af_unix.c
77060 +++ b/net/unix/af_unix.c
77061 @@ -779,6 +779,12 @@ static struct sock *unix_find_other(struct net *net,
77062 err = -ECONNREFUSED;
77063 if (!S_ISSOCK(inode->i_mode))
77064 goto put_fail;
77065 +
77066 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
77067 + err = -EACCES;
77068 + goto put_fail;
77069 + }
77070 +
77071 u = unix_find_socket_byinode(inode);
77072 if (!u)
77073 goto put_fail;
77074 @@ -799,6 +805,13 @@ static struct sock *unix_find_other(struct net *net,
77075 if (u) {
77076 struct dentry *dentry;
77077 dentry = unix_sk(u)->path.dentry;
77078 +
77079 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
77080 + err = -EPERM;
77081 + sock_put(u);
77082 + goto fail;
77083 + }
77084 +
77085 if (dentry)
77086 touch_atime(&unix_sk(u)->path);
77087 } else
77088 @@ -881,11 +894,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
77089 err = security_path_mknod(&path, dentry, mode, 0);
77090 if (err)
77091 goto out_mknod_drop_write;
77092 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
77093 + err = -EACCES;
77094 + goto out_mknod_drop_write;
77095 + }
77096 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
77097 out_mknod_drop_write:
77098 mnt_drop_write(path.mnt);
77099 if (err)
77100 goto out_mknod_dput;
77101 +
77102 + gr_handle_create(dentry, path.mnt);
77103 +
77104 mutex_unlock(&path.dentry->d_inode->i_mutex);
77105 dput(path.dentry);
77106 path.dentry = dentry;
77107 diff --git a/net/wireless/core.h b/net/wireless/core.h
77108 index 3ac2dd0..fbe533e 100644
77109 --- a/net/wireless/core.h
77110 +++ b/net/wireless/core.h
77111 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
77112 struct mutex mtx;
77113
77114 /* rfkill support */
77115 - struct rfkill_ops rfkill_ops;
77116 + rfkill_ops_no_const rfkill_ops;
77117 struct rfkill *rfkill;
77118 struct work_struct rfkill_sync;
77119
77120 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
77121 index af648e0..6185d3a 100644
77122 --- a/net/wireless/wext-core.c
77123 +++ b/net/wireless/wext-core.c
77124 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77125 */
77126
77127 /* Support for very large requests */
77128 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
77129 - (user_length > descr->max_tokens)) {
77130 + if (user_length > descr->max_tokens) {
77131 /* Allow userspace to GET more than max so
77132 * we can support any size GET requests.
77133 * There is still a limit : -ENOMEM.
77134 @@ -787,22 +786,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77135 }
77136 }
77137
77138 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
77139 - /*
77140 - * If this is a GET, but not NOMAX, it means that the extra
77141 - * data is not bounded by userspace, but by max_tokens. Thus
77142 - * set the length to max_tokens. This matches the extra data
77143 - * allocation.
77144 - * The driver should fill it with the number of tokens it
77145 - * provided, and it may check iwp->length rather than having
77146 - * knowledge of max_tokens. If the driver doesn't change the
77147 - * iwp->length, this ioctl just copies back max_token tokens
77148 - * filled with zeroes. Hopefully the driver isn't claiming
77149 - * them to be valid data.
77150 - */
77151 - iwp->length = descr->max_tokens;
77152 - }
77153 -
77154 err = handler(dev, info, (union iwreq_data *) iwp, extra);
77155
77156 iwp->length += essid_compat;
77157 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
77158 index a15d2a0..12142af 100644
77159 --- a/net/xfrm/xfrm_policy.c
77160 +++ b/net/xfrm/xfrm_policy.c
77161 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
77162 {
77163 policy->walk.dead = 1;
77164
77165 - atomic_inc(&policy->genid);
77166 + atomic_inc_unchecked(&policy->genid);
77167
77168 if (del_timer(&policy->timer))
77169 xfrm_pol_put(policy);
77170 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
77171 hlist_add_head(&policy->bydst, chain);
77172 xfrm_pol_hold(policy);
77173 net->xfrm.policy_count[dir]++;
77174 - atomic_inc(&flow_cache_genid);
77175 + atomic_inc_unchecked(&flow_cache_genid);
77176 if (delpol)
77177 __xfrm_policy_unlink(delpol, dir);
77178 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
77179 @@ -1530,7 +1530,7 @@ free_dst:
77180 goto out;
77181 }
77182
77183 -static int inline
77184 +static inline int
77185 xfrm_dst_alloc_copy(void **target, const void *src, int size)
77186 {
77187 if (!*target) {
77188 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
77189 return 0;
77190 }
77191
77192 -static int inline
77193 +static inline int
77194 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77195 {
77196 #ifdef CONFIG_XFRM_SUB_POLICY
77197 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77198 #endif
77199 }
77200
77201 -static int inline
77202 +static inline int
77203 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
77204 {
77205 #ifdef CONFIG_XFRM_SUB_POLICY
77206 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
77207
77208 xdst->num_pols = num_pols;
77209 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
77210 - xdst->policy_genid = atomic_read(&pols[0]->genid);
77211 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
77212
77213 return xdst;
77214 }
77215 @@ -2348,7 +2348,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
77216 if (xdst->xfrm_genid != dst->xfrm->genid)
77217 return 0;
77218 if (xdst->num_pols > 0 &&
77219 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
77220 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
77221 return 0;
77222
77223 mtu = dst_mtu(dst->child);
77224 @@ -2885,7 +2885,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
77225 sizeof(pol->xfrm_vec[i].saddr));
77226 pol->xfrm_vec[i].encap_family = mp->new_family;
77227 /* flush bundles */
77228 - atomic_inc(&pol->genid);
77229 + atomic_inc_unchecked(&pol->genid);
77230 }
77231 }
77232
77233 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
77234 index ff1720d..d428ee7 100644
77235 --- a/scripts/Makefile.build
77236 +++ b/scripts/Makefile.build
77237 @@ -111,7 +111,7 @@ endif
77238 endif
77239
77240 # Do not include host rules unless needed
77241 -ifneq ($(hostprogs-y)$(hostprogs-m),)
77242 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
77243 include scripts/Makefile.host
77244 endif
77245
77246 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
77247 index 686cb0d..9d653bf 100644
77248 --- a/scripts/Makefile.clean
77249 +++ b/scripts/Makefile.clean
77250 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
77251 __clean-files := $(extra-y) $(always) \
77252 $(targets) $(clean-files) \
77253 $(host-progs) \
77254 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
77255 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
77256 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
77257
77258 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
77259
77260 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
77261 index 1ac414f..a1c1451 100644
77262 --- a/scripts/Makefile.host
77263 +++ b/scripts/Makefile.host
77264 @@ -31,6 +31,7 @@
77265 # Note: Shared libraries consisting of C++ files are not supported
77266
77267 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
77268 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
77269
77270 # C code
77271 # Executables compiled from a single .c file
77272 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
77273 # Shared libaries (only .c supported)
77274 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
77275 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
77276 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
77277 # Remove .so files from "xxx-objs"
77278 host-cobjs := $(filter-out %.so,$(host-cobjs))
77279
77280 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
77281 index cb1f50c..cef2a7c 100644
77282 --- a/scripts/basic/fixdep.c
77283 +++ b/scripts/basic/fixdep.c
77284 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
77285 /*
77286 * Lookup a value in the configuration string.
77287 */
77288 -static int is_defined_config(const char *name, int len, unsigned int hash)
77289 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
77290 {
77291 struct item *aux;
77292
77293 @@ -211,10 +211,10 @@ static void clear_config(void)
77294 /*
77295 * Record the use of a CONFIG_* word.
77296 */
77297 -static void use_config(const char *m, int slen)
77298 +static void use_config(const char *m, unsigned int slen)
77299 {
77300 unsigned int hash = strhash(m, slen);
77301 - int c, i;
77302 + unsigned int c, i;
77303
77304 if (is_defined_config(m, slen, hash))
77305 return;
77306 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
77307
77308 static void parse_config_file(const char *map, size_t len)
77309 {
77310 - const int *end = (const int *) (map + len);
77311 + const unsigned int *end = (const unsigned int *) (map + len);
77312 /* start at +1, so that p can never be < map */
77313 - const int *m = (const int *) map + 1;
77314 + const unsigned int *m = (const unsigned int *) map + 1;
77315 const char *p, *q;
77316
77317 for (; m < end; m++) {
77318 @@ -406,7 +406,7 @@ static void print_deps(void)
77319 static void traps(void)
77320 {
77321 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
77322 - int *p = (int *)test;
77323 + unsigned int *p = (unsigned int *)test;
77324
77325 if (*p != INT_CONF) {
77326 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
77327 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
77328 new file mode 100644
77329 index 0000000..8729101
77330 --- /dev/null
77331 +++ b/scripts/gcc-plugin.sh
77332 @@ -0,0 +1,2 @@
77333 +#!/bin/sh
77334 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
77335 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
77336 index 44ddaa5..a3119bd 100644
77337 --- a/scripts/mod/file2alias.c
77338 +++ b/scripts/mod/file2alias.c
77339 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
77340 unsigned long size, unsigned long id_size,
77341 void *symval)
77342 {
77343 - int i;
77344 + unsigned int i;
77345
77346 if (size % id_size || size < id_size) {
77347 if (cross_build != 0)
77348 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
77349 /* USB is special because the bcdDevice can be matched against a numeric range */
77350 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
77351 static void do_usb_entry(struct usb_device_id *id,
77352 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
77353 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
77354 unsigned char range_lo, unsigned char range_hi,
77355 unsigned char max, struct module *mod)
77356 {
77357 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
77358 {
77359 unsigned int devlo, devhi;
77360 unsigned char chi, clo, max;
77361 - int ndigits;
77362 + unsigned int ndigits;
77363
77364 id->match_flags = TO_NATIVE(id->match_flags);
77365 id->idVendor = TO_NATIVE(id->idVendor);
77366 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
77367 for (i = 0; i < count; i++) {
77368 const char *id = (char *)devs[i].id;
77369 char acpi_id[sizeof(devs[0].id)];
77370 - int j;
77371 + unsigned int j;
77372
77373 buf_printf(&mod->dev_table_buf,
77374 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77375 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77376
77377 for (j = 0; j < PNP_MAX_DEVICES; j++) {
77378 const char *id = (char *)card->devs[j].id;
77379 - int i2, j2;
77380 + unsigned int i2, j2;
77381 int dup = 0;
77382
77383 if (!id[0])
77384 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77385 /* add an individual alias for every device entry */
77386 if (!dup) {
77387 char acpi_id[sizeof(card->devs[0].id)];
77388 - int k;
77389 + unsigned int k;
77390
77391 buf_printf(&mod->dev_table_buf,
77392 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77393 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
77394 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
77395 char *alias)
77396 {
77397 - int i, j;
77398 + unsigned int i, j;
77399
77400 sprintf(alias, "dmi*");
77401
77402 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
77403 index c4e7d15..4241aef 100644
77404 --- a/scripts/mod/modpost.c
77405 +++ b/scripts/mod/modpost.c
77406 @@ -922,6 +922,7 @@ enum mismatch {
77407 ANY_INIT_TO_ANY_EXIT,
77408 ANY_EXIT_TO_ANY_INIT,
77409 EXPORT_TO_INIT_EXIT,
77410 + DATA_TO_TEXT
77411 };
77412
77413 struct sectioncheck {
77414 @@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[] = {
77415 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
77416 .mismatch = EXPORT_TO_INIT_EXIT,
77417 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
77418 +},
77419 +/* Do not reference code from writable data */
77420 +{
77421 + .fromsec = { DATA_SECTIONS, NULL },
77422 + .tosec = { TEXT_SECTIONS, NULL },
77423 + .mismatch = DATA_TO_TEXT
77424 }
77425 };
77426
77427 @@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
77428 continue;
77429 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
77430 continue;
77431 - if (sym->st_value == addr)
77432 - return sym;
77433 /* Find a symbol nearby - addr are maybe negative */
77434 d = sym->st_value - addr;
77435 + if (d == 0)
77436 + return sym;
77437 if (d < 0)
77438 d = addr - sym->st_value;
77439 if (d < distance) {
77440 @@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const char *modname,
77441 tosym, prl_to, prl_to, tosym);
77442 free(prl_to);
77443 break;
77444 + case DATA_TO_TEXT:
77445 +/*
77446 + fprintf(stderr,
77447 + "The variable %s references\n"
77448 + "the %s %s%s%s\n",
77449 + fromsym, to, sec2annotation(tosec), tosym, to_p);
77450 +*/
77451 + break;
77452 }
77453 fprintf(stderr, "\n");
77454 }
77455 @@ -1668,7 +1683,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
77456 static void check_sec_ref(struct module *mod, const char *modname,
77457 struct elf_info *elf)
77458 {
77459 - int i;
77460 + unsigned int i;
77461 Elf_Shdr *sechdrs = elf->sechdrs;
77462
77463 /* Walk through all sections */
77464 @@ -1766,7 +1781,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
77465 va_end(ap);
77466 }
77467
77468 -void buf_write(struct buffer *buf, const char *s, int len)
77469 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
77470 {
77471 if (buf->size - buf->pos < len) {
77472 buf->size += len + SZ;
77473 @@ -1984,7 +1999,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
77474 if (fstat(fileno(file), &st) < 0)
77475 goto close_write;
77476
77477 - if (st.st_size != b->pos)
77478 + if (st.st_size != (off_t)b->pos)
77479 goto close_write;
77480
77481 tmp = NOFAIL(malloc(b->pos));
77482 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
77483 index 51207e4..f7d603d 100644
77484 --- a/scripts/mod/modpost.h
77485 +++ b/scripts/mod/modpost.h
77486 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
77487
77488 struct buffer {
77489 char *p;
77490 - int pos;
77491 - int size;
77492 + unsigned int pos;
77493 + unsigned int size;
77494 };
77495
77496 void __attribute__((format(printf, 2, 3)))
77497 buf_printf(struct buffer *buf, const char *fmt, ...);
77498
77499 void
77500 -buf_write(struct buffer *buf, const char *s, int len);
77501 +buf_write(struct buffer *buf, const char *s, unsigned int len);
77502
77503 struct module {
77504 struct module *next;
77505 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
77506 index 9dfcd6d..099068e 100644
77507 --- a/scripts/mod/sumversion.c
77508 +++ b/scripts/mod/sumversion.c
77509 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
77510 goto out;
77511 }
77512
77513 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
77514 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
77515 warn("writing sum in %s failed: %s\n",
77516 filename, strerror(errno));
77517 goto out;
77518 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
77519 index 5c11312..72742b5 100644
77520 --- a/scripts/pnmtologo.c
77521 +++ b/scripts/pnmtologo.c
77522 @@ -237,14 +237,14 @@ static void write_header(void)
77523 fprintf(out, " * Linux logo %s\n", logoname);
77524 fputs(" */\n\n", out);
77525 fputs("#include <linux/linux_logo.h>\n\n", out);
77526 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
77527 + fprintf(out, "static unsigned char %s_data[] = {\n",
77528 logoname);
77529 }
77530
77531 static void write_footer(void)
77532 {
77533 fputs("\n};\n\n", out);
77534 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
77535 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
77536 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
77537 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
77538 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
77539 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
77540 fputs("\n};\n\n", out);
77541
77542 /* write logo clut */
77543 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
77544 + fprintf(out, "static unsigned char %s_clut[] = {\n",
77545 logoname);
77546 write_hex_cnt = 0;
77547 for (i = 0; i < logo_clutsize; i++) {
77548 diff --git a/security/Kconfig b/security/Kconfig
77549 index ccc61f8..5effdb4 100644
77550 --- a/security/Kconfig
77551 +++ b/security/Kconfig
77552 @@ -4,6 +4,640 @@
77553
77554 menu "Security options"
77555
77556 +source grsecurity/Kconfig
77557 +
77558 +menu "PaX"
77559 +
77560 + config ARCH_TRACK_EXEC_LIMIT
77561 + bool
77562 +
77563 + config PAX_KERNEXEC_PLUGIN
77564 + bool
77565 +
77566 + config PAX_PER_CPU_PGD
77567 + bool
77568 +
77569 + config TASK_SIZE_MAX_SHIFT
77570 + int
77571 + depends on X86_64
77572 + default 47 if !PAX_PER_CPU_PGD
77573 + default 42 if PAX_PER_CPU_PGD
77574 +
77575 + config PAX_ENABLE_PAE
77576 + bool
77577 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
77578 +
77579 +config PAX
77580 + bool "Enable various PaX features"
77581 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
77582 + help
77583 + This allows you to enable various PaX features. PaX adds
77584 + intrusion prevention mechanisms to the kernel that reduce
77585 + the risks posed by exploitable memory corruption bugs.
77586 +
77587 +menu "PaX Control"
77588 + depends on PAX
77589 +
77590 +config PAX_SOFTMODE
77591 + bool 'Support soft mode'
77592 + help
77593 + Enabling this option will allow you to run PaX in soft mode, that
77594 + is, PaX features will not be enforced by default, only on executables
77595 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
77596 + support as they are the only way to mark executables for soft mode use.
77597 +
77598 + Soft mode can be activated by using the "pax_softmode=1" kernel command
77599 + line option on boot. Furthermore you can control various PaX features
77600 + at runtime via the entries in /proc/sys/kernel/pax.
77601 +
77602 +config PAX_EI_PAX
77603 + bool 'Use legacy ELF header marking'
77604 + help
77605 + Enabling this option will allow you to control PaX features on
77606 + a per executable basis via the 'chpax' utility available at
77607 + http://pax.grsecurity.net/. The control flags will be read from
77608 + an otherwise reserved part of the ELF header. This marking has
77609 + numerous drawbacks (no support for soft-mode, toolchain does not
77610 + know about the non-standard use of the ELF header) therefore it
77611 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
77612 + support.
77613 +
77614 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77615 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
77616 + option otherwise they will not get any protection.
77617 +
77618 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
77619 + support as well, they will override the legacy EI_PAX marks.
77620 +
77621 +config PAX_PT_PAX_FLAGS
77622 + bool 'Use ELF program header marking'
77623 + help
77624 + Enabling this option will allow you to control PaX features on
77625 + a per executable basis via the 'paxctl' utility available at
77626 + http://pax.grsecurity.net/. The control flags will be read from
77627 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
77628 + has the benefits of supporting both soft mode and being fully
77629 + integrated into the toolchain (the binutils patch is available
77630 + from http://pax.grsecurity.net).
77631 +
77632 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77633 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77634 + support otherwise they will not get any protection.
77635 +
77636 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77637 + must make sure that the marks are the same if a binary has both marks.
77638 +
77639 + Note that if you enable the legacy EI_PAX marking support as well,
77640 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
77641 +
77642 +config PAX_XATTR_PAX_FLAGS
77643 + bool 'Use filesystem extended attributes marking'
77644 + select CIFS_XATTR if CIFS
77645 + select EXT2_FS_XATTR if EXT2_FS
77646 + select EXT3_FS_XATTR if EXT3_FS
77647 + select EXT4_FS_XATTR if EXT4_FS
77648 + select JFFS2_FS_XATTR if JFFS2_FS
77649 + select REISERFS_FS_XATTR if REISERFS_FS
77650 + select SQUASHFS_XATTR if SQUASHFS
77651 + select TMPFS_XATTR if TMPFS
77652 + select UBIFS_FS_XATTR if UBIFS_FS
77653 + help
77654 + Enabling this option will allow you to control PaX features on
77655 + a per executable basis via the 'setfattr' utility. The control
77656 + flags will be read from the user.pax.flags extended attribute of
77657 + the file. This marking has the benefit of supporting binary-only
77658 + applications that self-check themselves (e.g., skype) and would
77659 + not tolerate chpax/paxctl changes. The main drawback is that
77660 + extended attributes are not supported by some filesystems (e.g.,
77661 + isofs, udf, vfat) so copying files through such filesystems will
77662 + lose the extended attributes and these PaX markings.
77663 +
77664 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77665 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77666 + support otherwise they will not get any protection.
77667 +
77668 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77669 + must make sure that the marks are the same if a binary has both marks.
77670 +
77671 + Note that if you enable the legacy EI_PAX marking support as well,
77672 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
77673 +
77674 +choice
77675 + prompt 'MAC system integration'
77676 + default PAX_HAVE_ACL_FLAGS
77677 + help
77678 + Mandatory Access Control systems have the option of controlling
77679 + PaX flags on a per executable basis, choose the method supported
77680 + by your particular system.
77681 +
77682 + - "none": if your MAC system does not interact with PaX,
77683 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
77684 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
77685 +
77686 + NOTE: this option is for developers/integrators only.
77687 +
77688 + config PAX_NO_ACL_FLAGS
77689 + bool 'none'
77690 +
77691 + config PAX_HAVE_ACL_FLAGS
77692 + bool 'direct'
77693 +
77694 + config PAX_HOOK_ACL_FLAGS
77695 + bool 'hook'
77696 +endchoice
77697 +
77698 +endmenu
77699 +
77700 +menu "Non-executable pages"
77701 + depends on PAX
77702 +
77703 +config PAX_NOEXEC
77704 + bool "Enforce non-executable pages"
77705 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
77706 + help
77707 + By design some architectures do not allow for protecting memory
77708 + pages against execution or even if they do, Linux does not make
77709 + use of this feature. In practice this means that if a page is
77710 + readable (such as the stack or heap) it is also executable.
77711 +
77712 + There is a well known exploit technique that makes use of this
77713 + fact and a common programming mistake where an attacker can
77714 + introduce code of his choice somewhere in the attacked program's
77715 + memory (typically the stack or the heap) and then execute it.
77716 +
77717 + If the attacked program was running with different (typically
77718 + higher) privileges than that of the attacker, then he can elevate
77719 + his own privilege level (e.g. get a root shell, write to files for
77720 + which he does not have write access to, etc).
77721 +
77722 + Enabling this option will let you choose from various features
77723 + that prevent the injection and execution of 'foreign' code in
77724 + a program.
77725 +
77726 + This will also break programs that rely on the old behaviour and
77727 + expect that dynamically allocated memory via the malloc() family
77728 + of functions is executable (which it is not). Notable examples
77729 + are the XFree86 4.x server, the java runtime and wine.
77730 +
77731 +config PAX_PAGEEXEC
77732 + bool "Paging based non-executable pages"
77733 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
77734 + select S390_SWITCH_AMODE if S390
77735 + select S390_EXEC_PROTECT if S390
77736 + select ARCH_TRACK_EXEC_LIMIT if X86_32
77737 + help
77738 + This implementation is based on the paging feature of the CPU.
77739 + On i386 without hardware non-executable bit support there is a
77740 + variable but usually low performance impact, however on Intel's
77741 + P4 core based CPUs it is very high so you should not enable this
77742 + for kernels meant to be used on such CPUs.
77743 +
77744 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
77745 + with hardware non-executable bit support there is no performance
77746 + impact, on ppc the impact is negligible.
77747 +
77748 + Note that several architectures require various emulations due to
77749 + badly designed userland ABIs, this will cause a performance impact
77750 + but will disappear as soon as userland is fixed. For example, ppc
77751 + userland MUST have been built with secure-plt by a recent toolchain.
77752 +
77753 +config PAX_SEGMEXEC
77754 + bool "Segmentation based non-executable pages"
77755 + depends on PAX_NOEXEC && X86_32
77756 + help
77757 + This implementation is based on the segmentation feature of the
77758 + CPU and has a very small performance impact, however applications
77759 + will be limited to a 1.5 GB address space instead of the normal
77760 + 3 GB.
77761 +
77762 +config PAX_EMUTRAMP
77763 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
77764 + default y if PARISC
77765 + help
77766 + There are some programs and libraries that for one reason or
77767 + another attempt to execute special small code snippets from
77768 + non-executable memory pages. Most notable examples are the
77769 + signal handler return code generated by the kernel itself and
77770 + the GCC trampolines.
77771 +
77772 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
77773 + such programs will no longer work under your kernel.
77774 +
77775 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
77776 + utilities to enable trampoline emulation for the affected programs
77777 + yet still have the protection provided by the non-executable pages.
77778 +
77779 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
77780 + your system will not even boot.
77781 +
77782 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
77783 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
77784 + for the affected files.
77785 +
77786 + NOTE: enabling this feature *may* open up a loophole in the
77787 + protection provided by non-executable pages that an attacker
77788 + could abuse. Therefore the best solution is to not have any
77789 + files on your system that would require this option. This can
77790 + be achieved by not using libc5 (which relies on the kernel
77791 + signal handler return code) and not using or rewriting programs
77792 + that make use of the nested function implementation of GCC.
77793 + Skilled users can just fix GCC itself so that it implements
77794 + nested function calls in a way that does not interfere with PaX.
77795 +
77796 +config PAX_EMUSIGRT
77797 + bool "Automatically emulate sigreturn trampolines"
77798 + depends on PAX_EMUTRAMP && PARISC
77799 + default y
77800 + help
77801 + Enabling this option will have the kernel automatically detect
77802 + and emulate signal return trampolines executing on the stack
77803 + that would otherwise lead to task termination.
77804 +
77805 + This solution is intended as a temporary one for users with
77806 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
77807 + Modula-3 runtime, etc) or executables linked to such, basically
77808 + everything that does not specify its own SA_RESTORER function in
77809 + normal executable memory like glibc 2.1+ does.
77810 +
77811 + On parisc you MUST enable this option, otherwise your system will
77812 + not even boot.
77813 +
77814 + NOTE: this feature cannot be disabled on a per executable basis
77815 + and since it *does* open up a loophole in the protection provided
77816 + by non-executable pages, the best solution is to not have any
77817 + files on your system that would require this option.
77818 +
77819 +config PAX_MPROTECT
77820 + bool "Restrict mprotect()"
77821 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
77822 + help
77823 + Enabling this option will prevent programs from
77824 + - changing the executable status of memory pages that were
77825 + not originally created as executable,
77826 + - making read-only executable pages writable again,
77827 + - creating executable pages from anonymous memory,
77828 + - making read-only-after-relocations (RELRO) data pages writable again.
77829 +
77830 + You should say Y here to complete the protection provided by
77831 + the enforcement of non-executable pages.
77832 +
77833 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77834 + this feature on a per file basis.
77835 +
77836 +config PAX_MPROTECT_COMPAT
77837 + bool "Use legacy/compat protection demoting (read help)"
77838 + depends on PAX_MPROTECT
77839 + default n
77840 + help
77841 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
77842 + by sending the proper error code to the application. For some broken
77843 + userland, this can cause problems with Python or other applications. The
77844 + current implementation however allows for applications like clamav to
77845 + detect if JIT compilation/execution is allowed and to fall back gracefully
77846 + to an interpreter-based mode if it does not. While we encourage everyone
77847 + to use the current implementation as-is and push upstream to fix broken
77848 + userland (note that the RWX logging option can assist with this), in some
77849 + environments this may not be possible. Having to disable MPROTECT
77850 + completely on certain binaries reduces the security benefit of PaX,
77851 + so this option is provided for those environments to revert to the old
77852 + behavior.
77853 +
77854 +config PAX_ELFRELOCS
77855 + bool "Allow ELF text relocations (read help)"
77856 + depends on PAX_MPROTECT
77857 + default n
77858 + help
77859 + Non-executable pages and mprotect() restrictions are effective
77860 + in preventing the introduction of new executable code into an
77861 + attacked task's address space. There remain only two venues
77862 + for this kind of attack: if the attacker can execute already
77863 + existing code in the attacked task then he can either have it
77864 + create and mmap() a file containing his code or have it mmap()
77865 + an already existing ELF library that does not have position
77866 + independent code in it and use mprotect() on it to make it
77867 + writable and copy his code there. While protecting against
77868 + the former approach is beyond PaX, the latter can be prevented
77869 + by having only PIC ELF libraries on one's system (which do not
77870 + need to relocate their code). If you are sure this is your case,
77871 + as is the case with all modern Linux distributions, then leave
77872 + this option disabled. You should say 'n' here.
77873 +
77874 +config PAX_ETEXECRELOCS
77875 + bool "Allow ELF ET_EXEC text relocations"
77876 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
77877 + select PAX_ELFRELOCS
77878 + default y
77879 + help
77880 + On some architectures there are incorrectly created applications
77881 + that require text relocations and would not work without enabling
77882 + this option. If you are an alpha, ia64 or parisc user, you should
77883 + enable this option and disable it once you have made sure that
77884 + none of your applications need it.
77885 +
77886 +config PAX_EMUPLT
77887 + bool "Automatically emulate ELF PLT"
77888 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
77889 + default y
77890 + help
77891 + Enabling this option will have the kernel automatically detect
77892 + and emulate the Procedure Linkage Table entries in ELF files.
77893 + On some architectures such entries are in writable memory, and
77894 + become non-executable leading to task termination. Therefore
77895 + it is mandatory that you enable this option on alpha, parisc,
77896 + sparc and sparc64, otherwise your system would not even boot.
77897 +
77898 + NOTE: this feature *does* open up a loophole in the protection
77899 + provided by the non-executable pages, therefore the proper
77900 + solution is to modify the toolchain to produce a PLT that does
77901 + not need to be writable.
77902 +
77903 +config PAX_DLRESOLVE
77904 + bool 'Emulate old glibc resolver stub'
77905 + depends on PAX_EMUPLT && SPARC
77906 + default n
77907 + help
77908 + This option is needed if userland has an old glibc (before 2.4)
77909 + that puts a 'save' instruction into the runtime generated resolver
77910 + stub that needs special emulation.
77911 +
77912 +config PAX_KERNEXEC
77913 + bool "Enforce non-executable kernel pages"
77914 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
77915 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
77916 + select PAX_KERNEXEC_PLUGIN if X86_64
77917 + help
77918 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
77919 + that is, enabling this option will make it harder to inject
77920 + and execute 'foreign' code in kernel memory itself.
77921 +
77922 + Note that on x86_64 kernels there is a known regression when
77923 + this feature and KVM/VMX are both enabled in the host kernel.
77924 +
77925 +choice
77926 + prompt "Return Address Instrumentation Method"
77927 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
77928 + depends on PAX_KERNEXEC_PLUGIN
77929 + help
77930 + Select the method used to instrument function pointer dereferences.
77931 + Note that binary modules cannot be instrumented by this approach.
77932 +
77933 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
77934 + bool "bts"
77935 + help
77936 + This method is compatible with binary only modules but has
77937 + a higher runtime overhead.
77938 +
77939 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
77940 + bool "or"
77941 + depends on !PARAVIRT
77942 + help
77943 + This method is incompatible with binary only modules but has
77944 + a lower runtime overhead.
77945 +endchoice
77946 +
77947 +config PAX_KERNEXEC_PLUGIN_METHOD
77948 + string
77949 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
77950 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
77951 + default ""
77952 +
77953 +config PAX_KERNEXEC_MODULE_TEXT
77954 + int "Minimum amount of memory reserved for module code"
77955 + default "4"
77956 + depends on PAX_KERNEXEC && X86_32 && MODULES
77957 + help
77958 + Due to implementation details the kernel must reserve a fixed
77959 + amount of memory for module code at compile time that cannot be
77960 + changed at runtime. Here you can specify the minimum amount
77961 + in MB that will be reserved. Due to the same implementation
77962 + details this size will always be rounded up to the next 2/4 MB
77963 + boundary (depends on PAE) so the actually available memory for
77964 + module code will usually be more than this minimum.
77965 +
77966 + The default 4 MB should be enough for most users but if you have
77967 + an excessive number of modules (e.g., most distribution configs
77968 + compile many drivers as modules) or use huge modules such as
77969 + nvidia's kernel driver, you will need to adjust this amount.
77970 + A good rule of thumb is to look at your currently loaded kernel
77971 + modules and add up their sizes.
77972 +
77973 +endmenu
77974 +
77975 +menu "Address Space Layout Randomization"
77976 + depends on PAX
77977 +
77978 +config PAX_ASLR
77979 + bool "Address Space Layout Randomization"
77980 + help
77981 + Many if not most exploit techniques rely on the knowledge of
77982 + certain addresses in the attacked program. The following options
77983 + will allow the kernel to apply a certain amount of randomization
77984 + to specific parts of the program thereby forcing an attacker to
77985 + guess them in most cases. Any failed guess will most likely crash
77986 + the attacked program which allows the kernel to detect such attempts
77987 + and react on them. PaX itself provides no reaction mechanisms,
77988 + instead it is strongly encouraged that you make use of Nergal's
77989 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
77990 + (http://www.grsecurity.net/) built-in crash detection features or
77991 + develop one yourself.
77992 +
77993 + By saying Y here you can choose to randomize the following areas:
77994 + - top of the task's kernel stack
77995 + - top of the task's userland stack
77996 + - base address for mmap() requests that do not specify one
77997 + (this includes all libraries)
77998 + - base address of the main executable
77999 +
78000 + It is strongly recommended to say Y here as address space layout
78001 + randomization has negligible impact on performance yet it provides
78002 + a very effective protection.
78003 +
78004 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78005 + this feature on a per file basis.
78006 +
78007 +config PAX_RANDKSTACK
78008 + bool "Randomize kernel stack base"
78009 + depends on X86_TSC && X86
78010 + help
78011 + By saying Y here the kernel will randomize every task's kernel
78012 + stack on every system call. This will not only force an attacker
78013 + to guess it but also prevent him from making use of possible
78014 + leaked information about it.
78015 +
78016 + Since the kernel stack is a rather scarce resource, randomization
78017 + may cause unexpected stack overflows, therefore you should very
78018 + carefully test your system. Note that once enabled in the kernel
78019 + configuration, this feature cannot be disabled on a per file basis.
78020 +
78021 +config PAX_RANDUSTACK
78022 + bool "Randomize user stack base"
78023 + depends on PAX_ASLR
78024 + help
78025 + By saying Y here the kernel will randomize every task's userland
78026 + stack. The randomization is done in two steps where the second
78027 + one may apply a big amount of shift to the top of the stack and
78028 + cause problems for programs that want to use lots of memory (more
78029 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
78030 + For this reason the second step can be controlled by 'chpax' or
78031 + 'paxctl' on a per file basis.
78032 +
78033 +config PAX_RANDMMAP
78034 + bool "Randomize mmap() base"
78035 + depends on PAX_ASLR
78036 + help
78037 + By saying Y here the kernel will use a randomized base address for
78038 + mmap() requests that do not specify one themselves. As a result
78039 + all dynamically loaded libraries will appear at random addresses
78040 + and therefore be harder to exploit by a technique where an attacker
78041 + attempts to execute library code for his purposes (e.g. spawn a
78042 + shell from an exploited program that is running at an elevated
78043 + privilege level).
78044 +
78045 + Furthermore, if a program is relinked as a dynamic ELF file, its
78046 + base address will be randomized as well, completing the full
78047 + randomization of the address space layout. Attacking such programs
78048 + becomes a guess game. You can find an example of doing this at
78049 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
78050 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
78051 +
78052 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
78053 + feature on a per file basis.
78054 +
78055 +endmenu
78056 +
78057 +menu "Miscellaneous hardening features"
78058 +
78059 +config PAX_MEMORY_SANITIZE
78060 + bool "Sanitize all freed memory"
78061 + depends on !HIBERNATION
78062 + help
78063 + By saying Y here the kernel will erase memory pages as soon as they
78064 + are freed. This in turn reduces the lifetime of data stored in the
78065 + pages, making it less likely that sensitive information such as
78066 + passwords, cryptographic secrets, etc stay in memory for too long.
78067 +
78068 + This is especially useful for programs whose runtime is short, long
78069 + lived processes and the kernel itself benefit from this as long as
78070 + they operate on whole memory pages and ensure timely freeing of pages
78071 + that may hold sensitive information.
78072 +
78073 + The tradeoff is performance impact, on a single CPU system kernel
78074 + compilation sees a 3% slowdown, other systems and workloads may vary
78075 + and you are advised to test this feature on your expected workload
78076 + before deploying it.
78077 +
78078 + Note that this feature does not protect data stored in live pages,
78079 + e.g., process memory swapped to disk may stay there for a long time.
78080 +
78081 +config PAX_MEMORY_STACKLEAK
78082 + bool "Sanitize kernel stack"
78083 + depends on X86
78084 + help
78085 + By saying Y here the kernel will erase the kernel stack before it
78086 + returns from a system call. This in turn reduces the information
78087 + that a kernel stack leak bug can reveal.
78088 +
78089 + Note that such a bug can still leak information that was put on
78090 + the stack by the current system call (the one eventually triggering
78091 + the bug) but traces of earlier system calls on the kernel stack
78092 + cannot leak anymore.
78093 +
78094 + The tradeoff is performance impact: on a single CPU system kernel
78095 + compilation sees a 1% slowdown, other systems and workloads may vary
78096 + and you are advised to test this feature on your expected workload
78097 + before deploying it.
78098 +
78099 + Note: full support for this feature requires gcc with plugin support
78100 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
78101 + versions means that functions with large enough stack frames may
78102 + leave uninitialized memory behind that may be exposed to a later
78103 + syscall leaking the stack.
78104 +
78105 +config PAX_MEMORY_UDEREF
78106 + bool "Prevent invalid userland pointer dereference"
78107 + depends on X86 && !UML_X86 && !XEN
78108 + select PAX_PER_CPU_PGD if X86_64
78109 + help
78110 + By saying Y here the kernel will be prevented from dereferencing
78111 + userland pointers in contexts where the kernel expects only kernel
78112 + pointers. This is both a useful runtime debugging feature and a
78113 + security measure that prevents exploiting a class of kernel bugs.
78114 +
78115 + The tradeoff is that some virtualization solutions may experience
78116 + a huge slowdown and therefore you should not enable this feature
78117 + for kernels meant to run in such environments. Whether a given VM
78118 + solution is affected or not is best determined by simply trying it
78119 + out, the performance impact will be obvious right on boot as this
78120 + mechanism engages from very early on. A good rule of thumb is that
78121 + VMs running on CPUs without hardware virtualization support (i.e.,
78122 + the majority of IA-32 CPUs) will likely experience the slowdown.
78123 +
78124 +config PAX_REFCOUNT
78125 + bool "Prevent various kernel object reference counter overflows"
78126 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
78127 + help
78128 + By saying Y here the kernel will detect and prevent overflowing
78129 + various (but not all) kinds of object reference counters. Such
78130 + overflows can normally occur due to bugs only and are often, if
78131 + not always, exploitable.
78132 +
78133 + The tradeoff is that data structures protected by an overflowed
78134 + refcount will never be freed and therefore will leak memory. Note
78135 + that this leak also happens even without this protection but in
78136 + that case the overflow can eventually trigger the freeing of the
78137 + data structure while it is still being used elsewhere, resulting
78138 + in the exploitable situation that this feature prevents.
78139 +
78140 + Since this has a negligible performance impact, you should enable
78141 + this feature.
78142 +
78143 +config PAX_USERCOPY
78144 + bool "Harden heap object copies between kernel and userland"
78145 + depends on X86 || PPC || SPARC || ARM
78146 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
78147 + help
78148 + By saying Y here the kernel will enforce the size of heap objects
78149 + when they are copied in either direction between the kernel and
78150 + userland, even if only a part of the heap object is copied.
78151 +
78152 + Specifically, this checking prevents information leaking from the
78153 + kernel heap during kernel to userland copies (if the kernel heap
78154 + object is otherwise fully initialized) and prevents kernel heap
78155 + overflows during userland to kernel copies.
78156 +
78157 + Note that the current implementation provides the strictest bounds
78158 + checks for the SLUB allocator.
78159 +
78160 + Enabling this option also enables per-slab cache protection against
78161 + data in a given cache being copied into/out of via userland
78162 + accessors. Though the whitelist of regions will be reduced over
78163 + time, it notably protects important data structures like task structs.
78164 +
78165 + If frame pointers are enabled on x86, this option will also restrict
78166 + copies into and out of the kernel stack to local variables within a
78167 + single frame.
78168 +
78169 + Since this has a negligible performance impact, you should enable
78170 + this feature.
78171 +
78172 +config PAX_SIZE_OVERFLOW
78173 + bool "Prevent various integer overflows in function size parameters"
78174 + depends on X86
78175 + help
78176 + By saying Y here the kernel recomputes expressions of function
78177 + arguments marked by a size_overflow attribute with double integer
78178 + precision (DImode/TImode for 32/64 bit integer types).
78179 +
78180 + The recomputed argument is checked against INT_MAX and an event
78181 + is logged on overflow and the triggering process is killed.
78182 +
78183 + Homepage:
78184 + http://www.grsecurity.net/~ephox/overflow_plugin/
78185 +
78186 +endmenu
78187 +
78188 +endmenu
78189 +
78190 config KEYS
78191 bool "Enable access key retention support"
78192 help
78193 @@ -169,7 +803,7 @@ config INTEL_TXT
78194 config LSM_MMAP_MIN_ADDR
78195 int "Low address space for LSM to protect from user allocation"
78196 depends on SECURITY && SECURITY_SELINUX
78197 - default 32768 if ARM
78198 + default 32768 if ALPHA || ARM || PARISC || SPARC32
78199 default 65536
78200 help
78201 This is the portion of low virtual memory which should be protected
78202 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
78203 index ad05d39..afffccb 100644
78204 --- a/security/apparmor/lsm.c
78205 +++ b/security/apparmor/lsm.c
78206 @@ -622,7 +622,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
78207 return error;
78208 }
78209
78210 -static struct security_operations apparmor_ops = {
78211 +static struct security_operations apparmor_ops __read_only = {
78212 .name = "apparmor",
78213
78214 .ptrace_access_check = apparmor_ptrace_access_check,
78215 diff --git a/security/commoncap.c b/security/commoncap.c
78216 index 71a166a..851bb3e 100644
78217 --- a/security/commoncap.c
78218 +++ b/security/commoncap.c
78219 @@ -576,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
78220 {
78221 const struct cred *cred = current_cred();
78222
78223 + if (gr_acl_enable_at_secure())
78224 + return 1;
78225 +
78226 if (cred->uid != 0) {
78227 if (bprm->cap_effective)
78228 return 1;
78229 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
78230 index 3ccf7ac..d73ad64 100644
78231 --- a/security/integrity/ima/ima.h
78232 +++ b/security/integrity/ima/ima.h
78233 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78234 extern spinlock_t ima_queue_lock;
78235
78236 struct ima_h_table {
78237 - atomic_long_t len; /* number of stored measurements in the list */
78238 - atomic_long_t violations;
78239 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
78240 + atomic_long_unchecked_t violations;
78241 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
78242 };
78243 extern struct ima_h_table ima_htable;
78244 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
78245 index 88a2788..581ab92 100644
78246 --- a/security/integrity/ima/ima_api.c
78247 +++ b/security/integrity/ima/ima_api.c
78248 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78249 int result;
78250
78251 /* can overflow, only indicator */
78252 - atomic_long_inc(&ima_htable.violations);
78253 + atomic_long_inc_unchecked(&ima_htable.violations);
78254
78255 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
78256 if (!entry) {
78257 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
78258 index e1aa2b4..52027bf 100644
78259 --- a/security/integrity/ima/ima_fs.c
78260 +++ b/security/integrity/ima/ima_fs.c
78261 @@ -28,12 +28,12 @@
78262 static int valid_policy = 1;
78263 #define TMPBUFLEN 12
78264 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
78265 - loff_t *ppos, atomic_long_t *val)
78266 + loff_t *ppos, atomic_long_unchecked_t *val)
78267 {
78268 char tmpbuf[TMPBUFLEN];
78269 ssize_t len;
78270
78271 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
78272 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
78273 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
78274 }
78275
78276 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
78277 index 55a6271..ad829c3 100644
78278 --- a/security/integrity/ima/ima_queue.c
78279 +++ b/security/integrity/ima/ima_queue.c
78280 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
78281 INIT_LIST_HEAD(&qe->later);
78282 list_add_tail_rcu(&qe->later, &ima_measurements);
78283
78284 - atomic_long_inc(&ima_htable.len);
78285 + atomic_long_inc_unchecked(&ima_htable.len);
78286 key = ima_hash_key(entry->digest);
78287 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
78288 return 0;
78289 diff --git a/security/keys/compat.c b/security/keys/compat.c
78290 index 4c48e13..7abdac9 100644
78291 --- a/security/keys/compat.c
78292 +++ b/security/keys/compat.c
78293 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
78294 if (ret == 0)
78295 goto no_payload_free;
78296
78297 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78298 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78299
78300 if (iov != iovstack)
78301 kfree(iov);
78302 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
78303 index fb767c6..b9c49c0 100644
78304 --- a/security/keys/keyctl.c
78305 +++ b/security/keys/keyctl.c
78306 @@ -935,7 +935,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
78307 /*
78308 * Copy the iovec data from userspace
78309 */
78310 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78311 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
78312 unsigned ioc)
78313 {
78314 for (; ioc > 0; ioc--) {
78315 @@ -957,7 +957,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78316 * If successful, 0 will be returned.
78317 */
78318 long keyctl_instantiate_key_common(key_serial_t id,
78319 - const struct iovec *payload_iov,
78320 + const struct iovec __user *payload_iov,
78321 unsigned ioc,
78322 size_t plen,
78323 key_serial_t ringid)
78324 @@ -1052,7 +1052,7 @@ long keyctl_instantiate_key(key_serial_t id,
78325 [0].iov_len = plen
78326 };
78327
78328 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
78329 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
78330 }
78331
78332 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
78333 @@ -1085,7 +1085,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
78334 if (ret == 0)
78335 goto no_payload_free;
78336
78337 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78338 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78339
78340 if (iov != iovstack)
78341 kfree(iov);
78342 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
78343 index d605f75..2bc6be9 100644
78344 --- a/security/keys/keyring.c
78345 +++ b/security/keys/keyring.c
78346 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
78347 ret = -EFAULT;
78348
78349 for (loop = 0; loop < klist->nkeys; loop++) {
78350 + key_serial_t serial;
78351 key = klist->keys[loop];
78352 + serial = key->serial;
78353
78354 tmp = sizeof(key_serial_t);
78355 if (tmp > buflen)
78356 tmp = buflen;
78357
78358 - if (copy_to_user(buffer,
78359 - &key->serial,
78360 - tmp) != 0)
78361 + if (copy_to_user(buffer, &serial, tmp))
78362 goto error;
78363
78364 buflen -= tmp;
78365 diff --git a/security/min_addr.c b/security/min_addr.c
78366 index f728728..6457a0c 100644
78367 --- a/security/min_addr.c
78368 +++ b/security/min_addr.c
78369 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
78370 */
78371 static void update_mmap_min_addr(void)
78372 {
78373 +#ifndef SPARC
78374 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
78375 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
78376 mmap_min_addr = dac_mmap_min_addr;
78377 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
78378 #else
78379 mmap_min_addr = dac_mmap_min_addr;
78380 #endif
78381 +#endif
78382 }
78383
78384 /*
78385 diff --git a/security/security.c b/security/security.c
78386 index bf619ff..8179030 100644
78387 --- a/security/security.c
78388 +++ b/security/security.c
78389 @@ -20,6 +20,7 @@
78390 #include <linux/ima.h>
78391 #include <linux/evm.h>
78392 #include <linux/fsnotify.h>
78393 +#include <linux/mm.h>
78394 #include <net/flow.h>
78395
78396 #define MAX_LSM_EVM_XATTR 2
78397 @@ -28,8 +29,8 @@
78398 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
78399 CONFIG_DEFAULT_SECURITY;
78400
78401 -static struct security_operations *security_ops;
78402 -static struct security_operations default_security_ops = {
78403 +static struct security_operations *security_ops __read_only;
78404 +static struct security_operations default_security_ops __read_only = {
78405 .name = "default",
78406 };
78407
78408 @@ -70,7 +71,9 @@ int __init security_init(void)
78409
78410 void reset_security_ops(void)
78411 {
78412 + pax_open_kernel();
78413 security_ops = &default_security_ops;
78414 + pax_close_kernel();
78415 }
78416
78417 /* Save user chosen LSM */
78418 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
78419 index d85b793..a164832 100644
78420 --- a/security/selinux/hooks.c
78421 +++ b/security/selinux/hooks.c
78422 @@ -95,8 +95,6 @@
78423
78424 #define NUM_SEL_MNT_OPTS 5
78425
78426 -extern struct security_operations *security_ops;
78427 -
78428 /* SECMARK reference count */
78429 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
78430
78431 @@ -5520,7 +5518,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
78432
78433 #endif
78434
78435 -static struct security_operations selinux_ops = {
78436 +static struct security_operations selinux_ops __read_only = {
78437 .name = "selinux",
78438
78439 .ptrace_access_check = selinux_ptrace_access_check,
78440 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
78441 index c220f31..89fab3f 100644
78442 --- a/security/selinux/include/xfrm.h
78443 +++ b/security/selinux/include/xfrm.h
78444 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
78445
78446 static inline void selinux_xfrm_notify_policyload(void)
78447 {
78448 - atomic_inc(&flow_cache_genid);
78449 + atomic_inc_unchecked(&flow_cache_genid);
78450 }
78451 #else
78452 static inline int selinux_xfrm_enabled(void)
78453 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
78454 index 45c32f0..0038be2 100644
78455 --- a/security/smack/smack_lsm.c
78456 +++ b/security/smack/smack_lsm.c
78457 @@ -3500,7 +3500,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
78458 return 0;
78459 }
78460
78461 -struct security_operations smack_ops = {
78462 +struct security_operations smack_ops __read_only = {
78463 .name = "smack",
78464
78465 .ptrace_access_check = smack_ptrace_access_check,
78466 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
78467 index 620d37c..e2ad89b 100644
78468 --- a/security/tomoyo/tomoyo.c
78469 +++ b/security/tomoyo/tomoyo.c
78470 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
78471 * tomoyo_security_ops is a "struct security_operations" which is used for
78472 * registering TOMOYO.
78473 */
78474 -static struct security_operations tomoyo_security_ops = {
78475 +static struct security_operations tomoyo_security_ops __read_only = {
78476 .name = "tomoyo",
78477 .cred_alloc_blank = tomoyo_cred_alloc_blank,
78478 .cred_prepare = tomoyo_cred_prepare,
78479 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
78480 index 51d6709..1f3dbe2 100644
78481 --- a/security/yama/Kconfig
78482 +++ b/security/yama/Kconfig
78483 @@ -1,6 +1,6 @@
78484 config SECURITY_YAMA
78485 bool "Yama support"
78486 - depends on SECURITY
78487 + depends on SECURITY && !GRKERNSEC
78488 select SECURITYFS
78489 select SECURITY_PATH
78490 default n
78491 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
78492 index 270790d..c67dfcb 100644
78493 --- a/sound/aoa/codecs/onyx.c
78494 +++ b/sound/aoa/codecs/onyx.c
78495 @@ -54,7 +54,7 @@ struct onyx {
78496 spdif_locked:1,
78497 analog_locked:1,
78498 original_mute:2;
78499 - int open_count;
78500 + local_t open_count;
78501 struct codec_info *codec_info;
78502
78503 /* mutex serializes concurrent access to the device
78504 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
78505 struct onyx *onyx = cii->codec_data;
78506
78507 mutex_lock(&onyx->mutex);
78508 - onyx->open_count++;
78509 + local_inc(&onyx->open_count);
78510 mutex_unlock(&onyx->mutex);
78511
78512 return 0;
78513 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
78514 struct onyx *onyx = cii->codec_data;
78515
78516 mutex_lock(&onyx->mutex);
78517 - onyx->open_count--;
78518 - if (!onyx->open_count)
78519 + if (local_dec_and_test(&onyx->open_count))
78520 onyx->spdif_locked = onyx->analog_locked = 0;
78521 mutex_unlock(&onyx->mutex);
78522
78523 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
78524 index ffd2025..df062c9 100644
78525 --- a/sound/aoa/codecs/onyx.h
78526 +++ b/sound/aoa/codecs/onyx.h
78527 @@ -11,6 +11,7 @@
78528 #include <linux/i2c.h>
78529 #include <asm/pmac_low_i2c.h>
78530 #include <asm/prom.h>
78531 +#include <asm/local.h>
78532
78533 /* PCM3052 register definitions */
78534
78535 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
78536 index 08fde00..0bf641a 100644
78537 --- a/sound/core/oss/pcm_oss.c
78538 +++ b/sound/core/oss/pcm_oss.c
78539 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
78540 if (in_kernel) {
78541 mm_segment_t fs;
78542 fs = snd_enter_user();
78543 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78544 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78545 snd_leave_user(fs);
78546 } else {
78547 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78548 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78549 }
78550 if (ret != -EPIPE && ret != -ESTRPIPE)
78551 break;
78552 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
78553 if (in_kernel) {
78554 mm_segment_t fs;
78555 fs = snd_enter_user();
78556 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78557 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78558 snd_leave_user(fs);
78559 } else {
78560 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78561 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78562 }
78563 if (ret == -EPIPE) {
78564 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
78565 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
78566 struct snd_pcm_plugin_channel *channels;
78567 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
78568 if (!in_kernel) {
78569 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
78570 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
78571 return -EFAULT;
78572 buf = runtime->oss.buffer;
78573 }
78574 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
78575 }
78576 } else {
78577 tmp = snd_pcm_oss_write2(substream,
78578 - (const char __force *)buf,
78579 + (const char __force_kernel *)buf,
78580 runtime->oss.period_bytes, 0);
78581 if (tmp <= 0)
78582 goto err;
78583 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
78584 struct snd_pcm_runtime *runtime = substream->runtime;
78585 snd_pcm_sframes_t frames, frames1;
78586 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
78587 - char __user *final_dst = (char __force __user *)buf;
78588 + char __user *final_dst = (char __force_user *)buf;
78589 if (runtime->oss.plugin_first) {
78590 struct snd_pcm_plugin_channel *channels;
78591 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
78592 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
78593 xfer += tmp;
78594 runtime->oss.buffer_used -= tmp;
78595 } else {
78596 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
78597 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
78598 runtime->oss.period_bytes, 0);
78599 if (tmp <= 0)
78600 goto err;
78601 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
78602 size1);
78603 size1 /= runtime->channels; /* frames */
78604 fs = snd_enter_user();
78605 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
78606 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
78607 snd_leave_user(fs);
78608 }
78609 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
78610 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
78611 index 91cdf94..4085161 100644
78612 --- a/sound/core/pcm_compat.c
78613 +++ b/sound/core/pcm_compat.c
78614 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
78615 int err;
78616
78617 fs = snd_enter_user();
78618 - err = snd_pcm_delay(substream, &delay);
78619 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
78620 snd_leave_user(fs);
78621 if (err < 0)
78622 return err;
78623 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
78624 index 3fe99e6..26952e4 100644
78625 --- a/sound/core/pcm_native.c
78626 +++ b/sound/core/pcm_native.c
78627 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
78628 switch (substream->stream) {
78629 case SNDRV_PCM_STREAM_PLAYBACK:
78630 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
78631 - (void __user *)arg);
78632 + (void __force_user *)arg);
78633 break;
78634 case SNDRV_PCM_STREAM_CAPTURE:
78635 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
78636 - (void __user *)arg);
78637 + (void __force_user *)arg);
78638 break;
78639 default:
78640 result = -EINVAL;
78641 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
78642 index 5cf8d65..912a79c 100644
78643 --- a/sound/core/seq/seq_device.c
78644 +++ b/sound/core/seq/seq_device.c
78645 @@ -64,7 +64,7 @@ struct ops_list {
78646 int argsize; /* argument size */
78647
78648 /* operators */
78649 - struct snd_seq_dev_ops ops;
78650 + struct snd_seq_dev_ops *ops;
78651
78652 /* registred devices */
78653 struct list_head dev_list; /* list of devices */
78654 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
78655
78656 mutex_lock(&ops->reg_mutex);
78657 /* copy driver operators */
78658 - ops->ops = *entry;
78659 + ops->ops = entry;
78660 ops->driver |= DRIVER_LOADED;
78661 ops->argsize = argsize;
78662
78663 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
78664 dev->name, ops->id, ops->argsize, dev->argsize);
78665 return -EINVAL;
78666 }
78667 - if (ops->ops.init_device(dev) >= 0) {
78668 + if (ops->ops->init_device(dev) >= 0) {
78669 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
78670 ops->num_init_devices++;
78671 } else {
78672 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
78673 dev->name, ops->id, ops->argsize, dev->argsize);
78674 return -EINVAL;
78675 }
78676 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
78677 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
78678 dev->status = SNDRV_SEQ_DEVICE_FREE;
78679 dev->driver_data = NULL;
78680 ops->num_init_devices--;
78681 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
78682 index 621e60e..f4543f5 100644
78683 --- a/sound/drivers/mts64.c
78684 +++ b/sound/drivers/mts64.c
78685 @@ -29,6 +29,7 @@
78686 #include <sound/initval.h>
78687 #include <sound/rawmidi.h>
78688 #include <sound/control.h>
78689 +#include <asm/local.h>
78690
78691 #define CARD_NAME "Miditerminal 4140"
78692 #define DRIVER_NAME "MTS64"
78693 @@ -67,7 +68,7 @@ struct mts64 {
78694 struct pardevice *pardev;
78695 int pardev_claimed;
78696
78697 - int open_count;
78698 + local_t open_count;
78699 int current_midi_output_port;
78700 int current_midi_input_port;
78701 u8 mode[MTS64_NUM_INPUT_PORTS];
78702 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78703 {
78704 struct mts64 *mts = substream->rmidi->private_data;
78705
78706 - if (mts->open_count == 0) {
78707 + if (local_read(&mts->open_count) == 0) {
78708 /* We don't need a spinlock here, because this is just called
78709 if the device has not been opened before.
78710 So there aren't any IRQs from the device */
78711 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78712
78713 msleep(50);
78714 }
78715 - ++(mts->open_count);
78716 + local_inc(&mts->open_count);
78717
78718 return 0;
78719 }
78720 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78721 struct mts64 *mts = substream->rmidi->private_data;
78722 unsigned long flags;
78723
78724 - --(mts->open_count);
78725 - if (mts->open_count == 0) {
78726 + if (local_dec_return(&mts->open_count) == 0) {
78727 /* We need the spinlock_irqsave here because we can still
78728 have IRQs at this point */
78729 spin_lock_irqsave(&mts->lock, flags);
78730 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78731
78732 msleep(500);
78733
78734 - } else if (mts->open_count < 0)
78735 - mts->open_count = 0;
78736 + } else if (local_read(&mts->open_count) < 0)
78737 + local_set(&mts->open_count, 0);
78738
78739 return 0;
78740 }
78741 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
78742 index b953fb4..1999c01 100644
78743 --- a/sound/drivers/opl4/opl4_lib.c
78744 +++ b/sound/drivers/opl4/opl4_lib.c
78745 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
78746 MODULE_DESCRIPTION("OPL4 driver");
78747 MODULE_LICENSE("GPL");
78748
78749 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
78750 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
78751 {
78752 int timeout = 10;
78753 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
78754 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
78755 index 3e32bd3..46fc152 100644
78756 --- a/sound/drivers/portman2x4.c
78757 +++ b/sound/drivers/portman2x4.c
78758 @@ -48,6 +48,7 @@
78759 #include <sound/initval.h>
78760 #include <sound/rawmidi.h>
78761 #include <sound/control.h>
78762 +#include <asm/local.h>
78763
78764 #define CARD_NAME "Portman 2x4"
78765 #define DRIVER_NAME "portman"
78766 @@ -85,7 +86,7 @@ struct portman {
78767 struct pardevice *pardev;
78768 int pardev_claimed;
78769
78770 - int open_count;
78771 + local_t open_count;
78772 int mode[PORTMAN_NUM_INPUT_PORTS];
78773 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
78774 };
78775 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
78776 index 87657dd..a8268d4 100644
78777 --- a/sound/firewire/amdtp.c
78778 +++ b/sound/firewire/amdtp.c
78779 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
78780 ptr = s->pcm_buffer_pointer + data_blocks;
78781 if (ptr >= pcm->runtime->buffer_size)
78782 ptr -= pcm->runtime->buffer_size;
78783 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
78784 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
78785
78786 s->pcm_period_pointer += data_blocks;
78787 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
78788 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
78789 */
78790 void amdtp_out_stream_update(struct amdtp_out_stream *s)
78791 {
78792 - ACCESS_ONCE(s->source_node_id_field) =
78793 + ACCESS_ONCE_RW(s->source_node_id_field) =
78794 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
78795 }
78796 EXPORT_SYMBOL(amdtp_out_stream_update);
78797 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
78798 index 537a9cb..8e8c8e9 100644
78799 --- a/sound/firewire/amdtp.h
78800 +++ b/sound/firewire/amdtp.h
78801 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
78802 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
78803 struct snd_pcm_substream *pcm)
78804 {
78805 - ACCESS_ONCE(s->pcm) = pcm;
78806 + ACCESS_ONCE_RW(s->pcm) = pcm;
78807 }
78808
78809 /**
78810 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
78811 index d428ffe..751ef78 100644
78812 --- a/sound/firewire/isight.c
78813 +++ b/sound/firewire/isight.c
78814 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
78815 ptr += count;
78816 if (ptr >= runtime->buffer_size)
78817 ptr -= runtime->buffer_size;
78818 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
78819 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
78820
78821 isight->period_counter += count;
78822 if (isight->period_counter >= runtime->period_size) {
78823 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
78824 if (err < 0)
78825 return err;
78826
78827 - ACCESS_ONCE(isight->pcm_active) = true;
78828 + ACCESS_ONCE_RW(isight->pcm_active) = true;
78829
78830 return 0;
78831 }
78832 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
78833 {
78834 struct isight *isight = substream->private_data;
78835
78836 - ACCESS_ONCE(isight->pcm_active) = false;
78837 + ACCESS_ONCE_RW(isight->pcm_active) = false;
78838
78839 mutex_lock(&isight->mutex);
78840 isight_stop_streaming(isight);
78841 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
78842
78843 switch (cmd) {
78844 case SNDRV_PCM_TRIGGER_START:
78845 - ACCESS_ONCE(isight->pcm_running) = true;
78846 + ACCESS_ONCE_RW(isight->pcm_running) = true;
78847 break;
78848 case SNDRV_PCM_TRIGGER_STOP:
78849 - ACCESS_ONCE(isight->pcm_running) = false;
78850 + ACCESS_ONCE_RW(isight->pcm_running) = false;
78851 break;
78852 default:
78853 return -EINVAL;
78854 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
78855 index 7bd5e33..1fcab12 100644
78856 --- a/sound/isa/cmi8330.c
78857 +++ b/sound/isa/cmi8330.c
78858 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
78859
78860 struct snd_pcm *pcm;
78861 struct snd_cmi8330_stream {
78862 - struct snd_pcm_ops ops;
78863 + snd_pcm_ops_no_const ops;
78864 snd_pcm_open_callback_t open;
78865 void *private_data; /* sb or wss */
78866 } streams[2];
78867 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
78868 index 733b014..56ce96f 100644
78869 --- a/sound/oss/sb_audio.c
78870 +++ b/sound/oss/sb_audio.c
78871 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
78872 buf16 = (signed short *)(localbuf + localoffs);
78873 while (c)
78874 {
78875 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78876 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78877 if (copy_from_user(lbuf8,
78878 userbuf+useroffs + p,
78879 locallen))
78880 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
78881 index 09d4648..cf234c7 100644
78882 --- a/sound/oss/swarm_cs4297a.c
78883 +++ b/sound/oss/swarm_cs4297a.c
78884 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
78885 {
78886 struct cs4297a_state *s;
78887 u32 pwr, id;
78888 - mm_segment_t fs;
78889 int rval;
78890 #ifndef CONFIG_BCM_CS4297A_CSWARM
78891 u64 cfg;
78892 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
78893 if (!rval) {
78894 char *sb1250_duart_present;
78895
78896 +#if 0
78897 + mm_segment_t fs;
78898 fs = get_fs();
78899 set_fs(KERNEL_DS);
78900 -#if 0
78901 val = SOUND_MASK_LINE;
78902 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
78903 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
78904 val = initvol[i].vol;
78905 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
78906 }
78907 + set_fs(fs);
78908 // cs4297a_write_ac97(s, 0x18, 0x0808);
78909 #else
78910 // cs4297a_write_ac97(s, 0x5e, 0x180);
78911 cs4297a_write_ac97(s, 0x02, 0x0808);
78912 cs4297a_write_ac97(s, 0x18, 0x0808);
78913 #endif
78914 - set_fs(fs);
78915
78916 list_add(&s->list, &cs4297a_devs);
78917
78918 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
78919 index 56b4f74..7cfd41a 100644
78920 --- a/sound/pci/hda/hda_codec.h
78921 +++ b/sound/pci/hda/hda_codec.h
78922 @@ -611,7 +611,7 @@ struct hda_bus_ops {
78923 /* notify power-up/down from codec to controller */
78924 void (*pm_notify)(struct hda_bus *bus);
78925 #endif
78926 -};
78927 +} __no_const;
78928
78929 /* template to pass to the bus constructor */
78930 struct hda_bus_template {
78931 @@ -713,6 +713,7 @@ struct hda_codec_ops {
78932 #endif
78933 void (*reboot_notify)(struct hda_codec *codec);
78934 };
78935 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
78936
78937 /* record for amp information cache */
78938 struct hda_cache_head {
78939 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
78940 struct snd_pcm_substream *substream);
78941 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
78942 struct snd_pcm_substream *substream);
78943 -};
78944 +} __no_const;
78945
78946 /* PCM information for each substream */
78947 struct hda_pcm_stream {
78948 @@ -801,7 +802,7 @@ struct hda_codec {
78949 const char *modelname; /* model name for preset */
78950
78951 /* set by patch */
78952 - struct hda_codec_ops patch_ops;
78953 + hda_codec_ops_no_const patch_ops;
78954
78955 /* PCM to create, set by patch_ops.build_pcms callback */
78956 unsigned int num_pcms;
78957 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
78958 index 0da778a..bc38b84 100644
78959 --- a/sound/pci/ice1712/ice1712.h
78960 +++ b/sound/pci/ice1712/ice1712.h
78961 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
78962 unsigned int mask_flags; /* total mask bits */
78963 struct snd_akm4xxx_ops {
78964 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
78965 - } ops;
78966 + } __no_const ops;
78967 };
78968
78969 struct snd_ice1712_spdif {
78970 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
78971 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78972 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78973 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78974 - } ops;
78975 + } __no_const ops;
78976 };
78977
78978
78979 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
78980 index a8159b81..5f006a5 100644
78981 --- a/sound/pci/ymfpci/ymfpci_main.c
78982 +++ b/sound/pci/ymfpci/ymfpci_main.c
78983 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
78984 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
78985 break;
78986 }
78987 - if (atomic_read(&chip->interrupt_sleep_count)) {
78988 - atomic_set(&chip->interrupt_sleep_count, 0);
78989 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
78990 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78991 wake_up(&chip->interrupt_sleep);
78992 }
78993 __end:
78994 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
78995 continue;
78996 init_waitqueue_entry(&wait, current);
78997 add_wait_queue(&chip->interrupt_sleep, &wait);
78998 - atomic_inc(&chip->interrupt_sleep_count);
78999 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
79000 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
79001 remove_wait_queue(&chip->interrupt_sleep, &wait);
79002 }
79003 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
79004 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
79005 spin_unlock(&chip->reg_lock);
79006
79007 - if (atomic_read(&chip->interrupt_sleep_count)) {
79008 - atomic_set(&chip->interrupt_sleep_count, 0);
79009 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79010 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79011 wake_up(&chip->interrupt_sleep);
79012 }
79013 }
79014 @@ -2398,7 +2398,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
79015 spin_lock_init(&chip->reg_lock);
79016 spin_lock_init(&chip->voice_lock);
79017 init_waitqueue_head(&chip->interrupt_sleep);
79018 - atomic_set(&chip->interrupt_sleep_count, 0);
79019 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79020 chip->card = card;
79021 chip->pci = pci;
79022 chip->irq = -1;
79023 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
79024 index 0ad8dca..7186339 100644
79025 --- a/sound/soc/soc-pcm.c
79026 +++ b/sound/soc/soc-pcm.c
79027 @@ -641,7 +641,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
79028 struct snd_soc_platform *platform = rtd->platform;
79029 struct snd_soc_dai *codec_dai = rtd->codec_dai;
79030 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
79031 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
79032 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
79033 struct snd_pcm *pcm;
79034 char new_name[64];
79035 int ret = 0, playback = 0, capture = 0;
79036 diff --git a/sound/usb/card.h b/sound/usb/card.h
79037 index da5fa1a..113cd02 100644
79038 --- a/sound/usb/card.h
79039 +++ b/sound/usb/card.h
79040 @@ -45,6 +45,7 @@ struct snd_urb_ops {
79041 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79042 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79043 };
79044 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
79045
79046 struct snd_usb_substream {
79047 struct snd_usb_stream *stream;
79048 @@ -94,7 +95,7 @@ struct snd_usb_substream {
79049 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
79050 spinlock_t lock;
79051
79052 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
79053 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
79054 int last_frame_number; /* stored frame number */
79055 int last_delay; /* stored delay */
79056 };
79057 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
79058 new file mode 100644
79059 index 0000000..ca64170
79060 --- /dev/null
79061 +++ b/tools/gcc/Makefile
79062 @@ -0,0 +1,26 @@
79063 +#CC := gcc
79064 +#PLUGIN_SOURCE_FILES := pax_plugin.c
79065 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
79066 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
79067 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
79068 +
79069 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
79070 +CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer
79071 +
79072 +hostlibs-y := constify_plugin.so
79073 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
79074 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
79075 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
79076 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
79077 +hostlibs-y += colorize_plugin.so
79078 +hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
79079 +
79080 +always := $(hostlibs-y)
79081 +
79082 +constify_plugin-objs := constify_plugin.o
79083 +stackleak_plugin-objs := stackleak_plugin.o
79084 +kallocstat_plugin-objs := kallocstat_plugin.o
79085 +kernexec_plugin-objs := kernexec_plugin.o
79086 +checker_plugin-objs := checker_plugin.o
79087 +colorize_plugin-objs := colorize_plugin.o
79088 +size_overflow_plugin-objs := size_overflow_plugin.o
79089 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
79090 new file mode 100644
79091 index 0000000..d41b5af
79092 --- /dev/null
79093 +++ b/tools/gcc/checker_plugin.c
79094 @@ -0,0 +1,171 @@
79095 +/*
79096 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79097 + * Licensed under the GPL v2
79098 + *
79099 + * Note: the choice of the license means that the compilation process is
79100 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79101 + * but for the kernel it doesn't matter since it doesn't link against
79102 + * any of the gcc libraries
79103 + *
79104 + * gcc plugin to implement various sparse (source code checker) features
79105 + *
79106 + * TODO:
79107 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
79108 + *
79109 + * BUGS:
79110 + * - none known
79111 + */
79112 +#include "gcc-plugin.h"
79113 +#include "config.h"
79114 +#include "system.h"
79115 +#include "coretypes.h"
79116 +#include "tree.h"
79117 +#include "tree-pass.h"
79118 +#include "flags.h"
79119 +#include "intl.h"
79120 +#include "toplev.h"
79121 +#include "plugin.h"
79122 +//#include "expr.h" where are you...
79123 +#include "diagnostic.h"
79124 +#include "plugin-version.h"
79125 +#include "tm.h"
79126 +#include "function.h"
79127 +#include "basic-block.h"
79128 +#include "gimple.h"
79129 +#include "rtl.h"
79130 +#include "emit-rtl.h"
79131 +#include "tree-flow.h"
79132 +#include "target.h"
79133 +
79134 +extern void c_register_addr_space (const char *str, addr_space_t as);
79135 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
79136 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
79137 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
79138 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
79139 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
79140 +
79141 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79142 +extern rtx emit_move_insn(rtx x, rtx y);
79143 +
79144 +int plugin_is_GPL_compatible;
79145 +
79146 +static struct plugin_info checker_plugin_info = {
79147 + .version = "201111150100",
79148 +};
79149 +
79150 +#define ADDR_SPACE_KERNEL 0
79151 +#define ADDR_SPACE_FORCE_KERNEL 1
79152 +#define ADDR_SPACE_USER 2
79153 +#define ADDR_SPACE_FORCE_USER 3
79154 +#define ADDR_SPACE_IOMEM 0
79155 +#define ADDR_SPACE_FORCE_IOMEM 0
79156 +#define ADDR_SPACE_PERCPU 0
79157 +#define ADDR_SPACE_FORCE_PERCPU 0
79158 +#define ADDR_SPACE_RCU 0
79159 +#define ADDR_SPACE_FORCE_RCU 0
79160 +
79161 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
79162 +{
79163 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
79164 +}
79165 +
79166 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
79167 +{
79168 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
79169 +}
79170 +
79171 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
79172 +{
79173 + return default_addr_space_valid_pointer_mode(mode, as);
79174 +}
79175 +
79176 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
79177 +{
79178 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
79179 +}
79180 +
79181 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
79182 +{
79183 + return default_addr_space_legitimize_address(x, oldx, mode, as);
79184 +}
79185 +
79186 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
79187 +{
79188 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
79189 + return true;
79190 +
79191 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
79192 + return true;
79193 +
79194 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
79195 + return true;
79196 +
79197 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
79198 + return true;
79199 +
79200 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
79201 + return true;
79202 +
79203 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
79204 + return true;
79205 +
79206 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
79207 + return true;
79208 +
79209 + return subset == superset;
79210 +}
79211 +
79212 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
79213 +{
79214 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
79215 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
79216 +
79217 + return op;
79218 +}
79219 +
79220 +static void register_checker_address_spaces(void *event_data, void *data)
79221 +{
79222 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
79223 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
79224 + c_register_addr_space("__user", ADDR_SPACE_USER);
79225 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
79226 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
79227 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
79228 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
79229 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
79230 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
79231 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
79232 +
79233 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
79234 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
79235 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
79236 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
79237 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
79238 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
79239 + targetm.addr_space.convert = checker_addr_space_convert;
79240 +}
79241 +
79242 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79243 +{
79244 + const char * const plugin_name = plugin_info->base_name;
79245 + const int argc = plugin_info->argc;
79246 + const struct plugin_argument * const argv = plugin_info->argv;
79247 + int i;
79248 +
79249 + if (!plugin_default_version_check(version, &gcc_version)) {
79250 + error(G_("incompatible gcc/plugin versions"));
79251 + return 1;
79252 + }
79253 +
79254 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
79255 +
79256 + for (i = 0; i < argc; ++i)
79257 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79258 +
79259 + if (TARGET_64BIT == 0)
79260 + return 0;
79261 +
79262 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
79263 +
79264 + return 0;
79265 +}
79266 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
79267 new file mode 100644
79268 index 0000000..ee950d0
79269 --- /dev/null
79270 +++ b/tools/gcc/colorize_plugin.c
79271 @@ -0,0 +1,147 @@
79272 +/*
79273 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
79274 + * Licensed under the GPL v2
79275 + *
79276 + * Note: the choice of the license means that the compilation process is
79277 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79278 + * but for the kernel it doesn't matter since it doesn't link against
79279 + * any of the gcc libraries
79280 + *
79281 + * gcc plugin to colorize diagnostic output
79282 + *
79283 + */
79284 +
79285 +#include "gcc-plugin.h"
79286 +#include "config.h"
79287 +#include "system.h"
79288 +#include "coretypes.h"
79289 +#include "tree.h"
79290 +#include "tree-pass.h"
79291 +#include "flags.h"
79292 +#include "intl.h"
79293 +#include "toplev.h"
79294 +#include "plugin.h"
79295 +#include "diagnostic.h"
79296 +#include "plugin-version.h"
79297 +#include "tm.h"
79298 +
79299 +int plugin_is_GPL_compatible;
79300 +
79301 +static struct plugin_info colorize_plugin_info = {
79302 + .version = "201203092200",
79303 +};
79304 +
79305 +#define GREEN "\033[32m\033[2m"
79306 +#define LIGHTGREEN "\033[32m\033[1m"
79307 +#define YELLOW "\033[33m\033[2m"
79308 +#define LIGHTYELLOW "\033[33m\033[1m"
79309 +#define RED "\033[31m\033[2m"
79310 +#define LIGHTRED "\033[31m\033[1m"
79311 +#define BLUE "\033[34m\033[2m"
79312 +#define LIGHTBLUE "\033[34m\033[1m"
79313 +#define BRIGHT "\033[m\033[1m"
79314 +#define NORMAL "\033[m"
79315 +
79316 +static diagnostic_starter_fn old_starter;
79317 +static diagnostic_finalizer_fn old_finalizer;
79318 +
79319 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79320 +{
79321 + const char *color;
79322 + char *newprefix;
79323 +
79324 + switch (diagnostic->kind) {
79325 + case DK_NOTE:
79326 + color = LIGHTBLUE;
79327 + break;
79328 +
79329 + case DK_PEDWARN:
79330 + case DK_WARNING:
79331 + color = LIGHTYELLOW;
79332 + break;
79333 +
79334 + case DK_ERROR:
79335 + case DK_FATAL:
79336 + case DK_ICE:
79337 + case DK_PERMERROR:
79338 + case DK_SORRY:
79339 + color = LIGHTRED;
79340 + break;
79341 +
79342 + default:
79343 + color = NORMAL;
79344 + }
79345 +
79346 + old_starter(context, diagnostic);
79347 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
79348 + return;
79349 + pp_destroy_prefix(context->printer);
79350 + pp_set_prefix(context->printer, newprefix);
79351 +}
79352 +
79353 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79354 +{
79355 + old_finalizer(context, diagnostic);
79356 +}
79357 +
79358 +static void colorize_arm(void)
79359 +{
79360 + old_starter = diagnostic_starter(global_dc);
79361 + old_finalizer = diagnostic_finalizer(global_dc);
79362 +
79363 + diagnostic_starter(global_dc) = start_colorize;
79364 + diagnostic_finalizer(global_dc) = finalize_colorize;
79365 +}
79366 +
79367 +static unsigned int execute_colorize_rearm(void)
79368 +{
79369 + if (diagnostic_starter(global_dc) == start_colorize)
79370 + return 0;
79371 +
79372 + colorize_arm();
79373 + return 0;
79374 +}
79375 +
79376 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
79377 + .pass = {
79378 + .type = SIMPLE_IPA_PASS,
79379 + .name = "colorize_rearm",
79380 + .gate = NULL,
79381 + .execute = execute_colorize_rearm,
79382 + .sub = NULL,
79383 + .next = NULL,
79384 + .static_pass_number = 0,
79385 + .tv_id = TV_NONE,
79386 + .properties_required = 0,
79387 + .properties_provided = 0,
79388 + .properties_destroyed = 0,
79389 + .todo_flags_start = 0,
79390 + .todo_flags_finish = 0
79391 + }
79392 +};
79393 +
79394 +static void colorize_start_unit(void *gcc_data, void *user_data)
79395 +{
79396 + colorize_arm();
79397 +}
79398 +
79399 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79400 +{
79401 + const char * const plugin_name = plugin_info->base_name;
79402 + struct register_pass_info colorize_rearm_pass_info = {
79403 + .pass = &pass_ipa_colorize_rearm.pass,
79404 + .reference_pass_name = "*free_lang_data",
79405 + .ref_pass_instance_number = 0,
79406 + .pos_op = PASS_POS_INSERT_AFTER
79407 + };
79408 +
79409 + if (!plugin_default_version_check(version, &gcc_version)) {
79410 + error(G_("incompatible gcc/plugin versions"));
79411 + return 1;
79412 + }
79413 +
79414 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
79415 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
79416 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
79417 + return 0;
79418 +}
79419 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
79420 new file mode 100644
79421 index 0000000..89b7f56
79422 --- /dev/null
79423 +++ b/tools/gcc/constify_plugin.c
79424 @@ -0,0 +1,328 @@
79425 +/*
79426 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
79427 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
79428 + * Licensed under the GPL v2, or (at your option) v3
79429 + *
79430 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
79431 + *
79432 + * Homepage:
79433 + * http://www.grsecurity.net/~ephox/const_plugin/
79434 + *
79435 + * Usage:
79436 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
79437 + * $ gcc -fplugin=constify_plugin.so test.c -O2
79438 + */
79439 +
79440 +#include "gcc-plugin.h"
79441 +#include "config.h"
79442 +#include "system.h"
79443 +#include "coretypes.h"
79444 +#include "tree.h"
79445 +#include "tree-pass.h"
79446 +#include "flags.h"
79447 +#include "intl.h"
79448 +#include "toplev.h"
79449 +#include "plugin.h"
79450 +#include "diagnostic.h"
79451 +#include "plugin-version.h"
79452 +#include "tm.h"
79453 +#include "function.h"
79454 +#include "basic-block.h"
79455 +#include "gimple.h"
79456 +#include "rtl.h"
79457 +#include "emit-rtl.h"
79458 +#include "tree-flow.h"
79459 +
79460 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
79461 +
79462 +int plugin_is_GPL_compatible;
79463 +
79464 +static struct plugin_info const_plugin_info = {
79465 + .version = "201205300030",
79466 + .help = "no-constify\tturn off constification\n",
79467 +};
79468 +
79469 +static void deconstify_tree(tree node);
79470 +
79471 +static void deconstify_type(tree type)
79472 +{
79473 + tree field;
79474 +
79475 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
79476 + tree type = TREE_TYPE(field);
79477 +
79478 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79479 + continue;
79480 + if (!TYPE_READONLY(type))
79481 + continue;
79482 +
79483 + deconstify_tree(field);
79484 + }
79485 + TYPE_READONLY(type) = 0;
79486 + C_TYPE_FIELDS_READONLY(type) = 0;
79487 +}
79488 +
79489 +static void deconstify_tree(tree node)
79490 +{
79491 + tree old_type, new_type, field;
79492 +
79493 + old_type = TREE_TYPE(node);
79494 +
79495 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
79496 +
79497 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
79498 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
79499 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
79500 + DECL_FIELD_CONTEXT(field) = new_type;
79501 +
79502 + deconstify_type(new_type);
79503 +
79504 + TREE_READONLY(node) = 0;
79505 + TREE_TYPE(node) = new_type;
79506 +}
79507 +
79508 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79509 +{
79510 + tree type;
79511 +
79512 + *no_add_attrs = true;
79513 + if (TREE_CODE(*node) == FUNCTION_DECL) {
79514 + error("%qE attribute does not apply to functions", name);
79515 + return NULL_TREE;
79516 + }
79517 +
79518 + if (TREE_CODE(*node) == VAR_DECL) {
79519 + error("%qE attribute does not apply to variables", name);
79520 + return NULL_TREE;
79521 + }
79522 +
79523 + if (TYPE_P(*node)) {
79524 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
79525 + *no_add_attrs = false;
79526 + else
79527 + error("%qE attribute applies to struct and union types only", name);
79528 + return NULL_TREE;
79529 + }
79530 +
79531 + type = TREE_TYPE(*node);
79532 +
79533 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
79534 + error("%qE attribute applies to struct and union types only", name);
79535 + return NULL_TREE;
79536 + }
79537 +
79538 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
79539 + error("%qE attribute is already applied to the type", name);
79540 + return NULL_TREE;
79541 + }
79542 +
79543 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
79544 + error("%qE attribute used on type that is not constified", name);
79545 + return NULL_TREE;
79546 + }
79547 +
79548 + if (TREE_CODE(*node) == TYPE_DECL) {
79549 + deconstify_tree(*node);
79550 + return NULL_TREE;
79551 + }
79552 +
79553 + return NULL_TREE;
79554 +}
79555 +
79556 +static void constify_type(tree type)
79557 +{
79558 + TYPE_READONLY(type) = 1;
79559 + C_TYPE_FIELDS_READONLY(type) = 1;
79560 +}
79561 +
79562 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79563 +{
79564 + *no_add_attrs = true;
79565 + if (!TYPE_P(*node)) {
79566 + error("%qE attribute applies to types only", name);
79567 + return NULL_TREE;
79568 + }
79569 +
79570 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
79571 + error("%qE attribute applies to struct and union types only", name);
79572 + return NULL_TREE;
79573 + }
79574 +
79575 + *no_add_attrs = false;
79576 + constify_type(*node);
79577 + return NULL_TREE;
79578 +}
79579 +
79580 +static struct attribute_spec no_const_attr = {
79581 + .name = "no_const",
79582 + .min_length = 0,
79583 + .max_length = 0,
79584 + .decl_required = false,
79585 + .type_required = false,
79586 + .function_type_required = false,
79587 + .handler = handle_no_const_attribute,
79588 +#if BUILDING_GCC_VERSION >= 4007
79589 + .affects_type_identity = true
79590 +#endif
79591 +};
79592 +
79593 +static struct attribute_spec do_const_attr = {
79594 + .name = "do_const",
79595 + .min_length = 0,
79596 + .max_length = 0,
79597 + .decl_required = false,
79598 + .type_required = false,
79599 + .function_type_required = false,
79600 + .handler = handle_do_const_attribute,
79601 +#if BUILDING_GCC_VERSION >= 4007
79602 + .affects_type_identity = true
79603 +#endif
79604 +};
79605 +
79606 +static void register_attributes(void *event_data, void *data)
79607 +{
79608 + register_attribute(&no_const_attr);
79609 + register_attribute(&do_const_attr);
79610 +}
79611 +
79612 +static bool is_fptr(tree field)
79613 +{
79614 + tree ptr = TREE_TYPE(field);
79615 +
79616 + if (TREE_CODE(ptr) != POINTER_TYPE)
79617 + return false;
79618 +
79619 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
79620 +}
79621 +
79622 +static bool walk_struct(tree node)
79623 +{
79624 + tree field;
79625 +
79626 + if (TYPE_FIELDS(node) == NULL_TREE)
79627 + return false;
79628 +
79629 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
79630 + gcc_assert(!TYPE_READONLY(node));
79631 + deconstify_type(node);
79632 + return false;
79633 + }
79634 +
79635 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
79636 + tree type = TREE_TYPE(field);
79637 + enum tree_code code = TREE_CODE(type);
79638 + if (code == RECORD_TYPE || code == UNION_TYPE) {
79639 + if (!(walk_struct(type)))
79640 + return false;
79641 + } else if (!is_fptr(field) && !TREE_READONLY(field))
79642 + return false;
79643 + }
79644 + return true;
79645 +}
79646 +
79647 +static void finish_type(void *event_data, void *data)
79648 +{
79649 + tree type = (tree)event_data;
79650 +
79651 + if (type == NULL_TREE)
79652 + return;
79653 +
79654 + if (TYPE_READONLY(type))
79655 + return;
79656 +
79657 + if (walk_struct(type))
79658 + constify_type(type);
79659 +}
79660 +
79661 +static unsigned int check_local_variables(void);
79662 +
79663 +struct gimple_opt_pass pass_local_variable = {
79664 + {
79665 + .type = GIMPLE_PASS,
79666 + .name = "check_local_variables",
79667 + .gate = NULL,
79668 + .execute = check_local_variables,
79669 + .sub = NULL,
79670 + .next = NULL,
79671 + .static_pass_number = 0,
79672 + .tv_id = TV_NONE,
79673 + .properties_required = 0,
79674 + .properties_provided = 0,
79675 + .properties_destroyed = 0,
79676 + .todo_flags_start = 0,
79677 + .todo_flags_finish = 0
79678 + }
79679 +};
79680 +
79681 +static unsigned int check_local_variables(void)
79682 +{
79683 + tree var;
79684 + referenced_var_iterator rvi;
79685 +
79686 +#if BUILDING_GCC_VERSION == 4005
79687 + FOR_EACH_REFERENCED_VAR(var, rvi) {
79688 +#else
79689 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
79690 +#endif
79691 + tree type = TREE_TYPE(var);
79692 +
79693 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
79694 + continue;
79695 +
79696 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79697 + continue;
79698 +
79699 + if (!TYPE_READONLY(type))
79700 + continue;
79701 +
79702 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
79703 +// continue;
79704 +
79705 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
79706 +// continue;
79707 +
79708 + if (walk_struct(type)) {
79709 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
79710 + return 1;
79711 + }
79712 + }
79713 + return 0;
79714 +}
79715 +
79716 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79717 +{
79718 + const char * const plugin_name = plugin_info->base_name;
79719 + const int argc = plugin_info->argc;
79720 + const struct plugin_argument * const argv = plugin_info->argv;
79721 + int i;
79722 + bool constify = true;
79723 +
79724 + struct register_pass_info local_variable_pass_info = {
79725 + .pass = &pass_local_variable.pass,
79726 + .reference_pass_name = "*referenced_vars",
79727 + .ref_pass_instance_number = 0,
79728 + .pos_op = PASS_POS_INSERT_AFTER
79729 + };
79730 +
79731 + if (!plugin_default_version_check(version, &gcc_version)) {
79732 + error(G_("incompatible gcc/plugin versions"));
79733 + return 1;
79734 + }
79735 +
79736 + for (i = 0; i < argc; ++i) {
79737 + if (!(strcmp(argv[i].key, "no-constify"))) {
79738 + constify = false;
79739 + continue;
79740 + }
79741 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79742 + }
79743 +
79744 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
79745 + if (constify) {
79746 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
79747 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
79748 + }
79749 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
79750 +
79751 + return 0;
79752 +}
79753 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
79754 new file mode 100644
79755 index 0000000..a5eabce
79756 --- /dev/null
79757 +++ b/tools/gcc/kallocstat_plugin.c
79758 @@ -0,0 +1,167 @@
79759 +/*
79760 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79761 + * Licensed under the GPL v2
79762 + *
79763 + * Note: the choice of the license means that the compilation process is
79764 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79765 + * but for the kernel it doesn't matter since it doesn't link against
79766 + * any of the gcc libraries
79767 + *
79768 + * gcc plugin to find the distribution of k*alloc sizes
79769 + *
79770 + * TODO:
79771 + *
79772 + * BUGS:
79773 + * - none known
79774 + */
79775 +#include "gcc-plugin.h"
79776 +#include "config.h"
79777 +#include "system.h"
79778 +#include "coretypes.h"
79779 +#include "tree.h"
79780 +#include "tree-pass.h"
79781 +#include "flags.h"
79782 +#include "intl.h"
79783 +#include "toplev.h"
79784 +#include "plugin.h"
79785 +//#include "expr.h" where are you...
79786 +#include "diagnostic.h"
79787 +#include "plugin-version.h"
79788 +#include "tm.h"
79789 +#include "function.h"
79790 +#include "basic-block.h"
79791 +#include "gimple.h"
79792 +#include "rtl.h"
79793 +#include "emit-rtl.h"
79794 +
79795 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79796 +
79797 +int plugin_is_GPL_compatible;
79798 +
79799 +static const char * const kalloc_functions[] = {
79800 + "__kmalloc",
79801 + "kmalloc",
79802 + "kmalloc_large",
79803 + "kmalloc_node",
79804 + "kmalloc_order",
79805 + "kmalloc_order_trace",
79806 + "kmalloc_slab",
79807 + "kzalloc",
79808 + "kzalloc_node",
79809 +};
79810 +
79811 +static struct plugin_info kallocstat_plugin_info = {
79812 + .version = "201111150100",
79813 +};
79814 +
79815 +static unsigned int execute_kallocstat(void);
79816 +
79817 +static struct gimple_opt_pass kallocstat_pass = {
79818 + .pass = {
79819 + .type = GIMPLE_PASS,
79820 + .name = "kallocstat",
79821 + .gate = NULL,
79822 + .execute = execute_kallocstat,
79823 + .sub = NULL,
79824 + .next = NULL,
79825 + .static_pass_number = 0,
79826 + .tv_id = TV_NONE,
79827 + .properties_required = 0,
79828 + .properties_provided = 0,
79829 + .properties_destroyed = 0,
79830 + .todo_flags_start = 0,
79831 + .todo_flags_finish = 0
79832 + }
79833 +};
79834 +
79835 +static bool is_kalloc(const char *fnname)
79836 +{
79837 + size_t i;
79838 +
79839 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
79840 + if (!strcmp(fnname, kalloc_functions[i]))
79841 + return true;
79842 + return false;
79843 +}
79844 +
79845 +static unsigned int execute_kallocstat(void)
79846 +{
79847 + basic_block bb;
79848 +
79849 + // 1. loop through BBs and GIMPLE statements
79850 + FOR_EACH_BB(bb) {
79851 + gimple_stmt_iterator gsi;
79852 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79853 + // gimple match:
79854 + tree fndecl, size;
79855 + gimple call_stmt;
79856 + const char *fnname;
79857 +
79858 + // is it a call
79859 + call_stmt = gsi_stmt(gsi);
79860 + if (!is_gimple_call(call_stmt))
79861 + continue;
79862 + fndecl = gimple_call_fndecl(call_stmt);
79863 + if (fndecl == NULL_TREE)
79864 + continue;
79865 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
79866 + continue;
79867 +
79868 + // is it a call to k*alloc
79869 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
79870 + if (!is_kalloc(fnname))
79871 + continue;
79872 +
79873 + // is the size arg the result of a simple const assignment
79874 + size = gimple_call_arg(call_stmt, 0);
79875 + while (true) {
79876 + gimple def_stmt;
79877 + expanded_location xloc;
79878 + size_t size_val;
79879 +
79880 + if (TREE_CODE(size) != SSA_NAME)
79881 + break;
79882 + def_stmt = SSA_NAME_DEF_STMT(size);
79883 + if (!def_stmt || !is_gimple_assign(def_stmt))
79884 + break;
79885 + if (gimple_num_ops(def_stmt) != 2)
79886 + break;
79887 + size = gimple_assign_rhs1(def_stmt);
79888 + if (!TREE_CONSTANT(size))
79889 + continue;
79890 + xloc = expand_location(gimple_location(def_stmt));
79891 + if (!xloc.file)
79892 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
79893 + size_val = TREE_INT_CST_LOW(size);
79894 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
79895 + break;
79896 + }
79897 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
79898 +//debug_tree(gimple_call_fn(call_stmt));
79899 +//print_node(stderr, "pax", fndecl, 4);
79900 + }
79901 + }
79902 +
79903 + return 0;
79904 +}
79905 +
79906 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79907 +{
79908 + const char * const plugin_name = plugin_info->base_name;
79909 + struct register_pass_info kallocstat_pass_info = {
79910 + .pass = &kallocstat_pass.pass,
79911 + .reference_pass_name = "ssa",
79912 + .ref_pass_instance_number = 0,
79913 + .pos_op = PASS_POS_INSERT_AFTER
79914 + };
79915 +
79916 + if (!plugin_default_version_check(version, &gcc_version)) {
79917 + error(G_("incompatible gcc/plugin versions"));
79918 + return 1;
79919 + }
79920 +
79921 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
79922 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
79923 +
79924 + return 0;
79925 +}
79926 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
79927 new file mode 100644
79928 index 0000000..d8a8da2
79929 --- /dev/null
79930 +++ b/tools/gcc/kernexec_plugin.c
79931 @@ -0,0 +1,427 @@
79932 +/*
79933 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79934 + * Licensed under the GPL v2
79935 + *
79936 + * Note: the choice of the license means that the compilation process is
79937 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79938 + * but for the kernel it doesn't matter since it doesn't link against
79939 + * any of the gcc libraries
79940 + *
79941 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
79942 + *
79943 + * TODO:
79944 + *
79945 + * BUGS:
79946 + * - none known
79947 + */
79948 +#include "gcc-plugin.h"
79949 +#include "config.h"
79950 +#include "system.h"
79951 +#include "coretypes.h"
79952 +#include "tree.h"
79953 +#include "tree-pass.h"
79954 +#include "flags.h"
79955 +#include "intl.h"
79956 +#include "toplev.h"
79957 +#include "plugin.h"
79958 +//#include "expr.h" where are you...
79959 +#include "diagnostic.h"
79960 +#include "plugin-version.h"
79961 +#include "tm.h"
79962 +#include "function.h"
79963 +#include "basic-block.h"
79964 +#include "gimple.h"
79965 +#include "rtl.h"
79966 +#include "emit-rtl.h"
79967 +#include "tree-flow.h"
79968 +
79969 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79970 +extern rtx emit_move_insn(rtx x, rtx y);
79971 +
79972 +int plugin_is_GPL_compatible;
79973 +
79974 +static struct plugin_info kernexec_plugin_info = {
79975 + .version = "201111291120",
79976 + .help = "method=[bts|or]\tinstrumentation method\n"
79977 +};
79978 +
79979 +static unsigned int execute_kernexec_reload(void);
79980 +static unsigned int execute_kernexec_fptr(void);
79981 +static unsigned int execute_kernexec_retaddr(void);
79982 +static bool kernexec_cmodel_check(void);
79983 +
79984 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
79985 +static void (*kernexec_instrument_retaddr)(rtx);
79986 +
79987 +static struct gimple_opt_pass kernexec_reload_pass = {
79988 + .pass = {
79989 + .type = GIMPLE_PASS,
79990 + .name = "kernexec_reload",
79991 + .gate = kernexec_cmodel_check,
79992 + .execute = execute_kernexec_reload,
79993 + .sub = NULL,
79994 + .next = NULL,
79995 + .static_pass_number = 0,
79996 + .tv_id = TV_NONE,
79997 + .properties_required = 0,
79998 + .properties_provided = 0,
79999 + .properties_destroyed = 0,
80000 + .todo_flags_start = 0,
80001 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80002 + }
80003 +};
80004 +
80005 +static struct gimple_opt_pass kernexec_fptr_pass = {
80006 + .pass = {
80007 + .type = GIMPLE_PASS,
80008 + .name = "kernexec_fptr",
80009 + .gate = kernexec_cmodel_check,
80010 + .execute = execute_kernexec_fptr,
80011 + .sub = NULL,
80012 + .next = NULL,
80013 + .static_pass_number = 0,
80014 + .tv_id = TV_NONE,
80015 + .properties_required = 0,
80016 + .properties_provided = 0,
80017 + .properties_destroyed = 0,
80018 + .todo_flags_start = 0,
80019 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80020 + }
80021 +};
80022 +
80023 +static struct rtl_opt_pass kernexec_retaddr_pass = {
80024 + .pass = {
80025 + .type = RTL_PASS,
80026 + .name = "kernexec_retaddr",
80027 + .gate = kernexec_cmodel_check,
80028 + .execute = execute_kernexec_retaddr,
80029 + .sub = NULL,
80030 + .next = NULL,
80031 + .static_pass_number = 0,
80032 + .tv_id = TV_NONE,
80033 + .properties_required = 0,
80034 + .properties_provided = 0,
80035 + .properties_destroyed = 0,
80036 + .todo_flags_start = 0,
80037 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
80038 + }
80039 +};
80040 +
80041 +static bool kernexec_cmodel_check(void)
80042 +{
80043 + tree section;
80044 +
80045 + if (ix86_cmodel != CM_KERNEL)
80046 + return false;
80047 +
80048 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
80049 + if (!section || !TREE_VALUE(section))
80050 + return true;
80051 +
80052 + section = TREE_VALUE(TREE_VALUE(section));
80053 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
80054 + return true;
80055 +
80056 + return false;
80057 +}
80058 +
80059 +/*
80060 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
80061 + */
80062 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
80063 +{
80064 + gimple asm_movabs_stmt;
80065 +
80066 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
80067 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
80068 + gimple_asm_set_volatile(asm_movabs_stmt, true);
80069 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
80070 + update_stmt(asm_movabs_stmt);
80071 +}
80072 +
80073 +/*
80074 + * find all asm() stmts that clobber r10 and add a reload of r10
80075 + */
80076 +static unsigned int execute_kernexec_reload(void)
80077 +{
80078 + basic_block bb;
80079 +
80080 + // 1. loop through BBs and GIMPLE statements
80081 + FOR_EACH_BB(bb) {
80082 + gimple_stmt_iterator gsi;
80083 +
80084 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80085 + // gimple match: __asm__ ("" : : : "r10");
80086 + gimple asm_stmt;
80087 + size_t nclobbers;
80088 +
80089 + // is it an asm ...
80090 + asm_stmt = gsi_stmt(gsi);
80091 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
80092 + continue;
80093 +
80094 + // ... clobbering r10
80095 + nclobbers = gimple_asm_nclobbers(asm_stmt);
80096 + while (nclobbers--) {
80097 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
80098 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
80099 + continue;
80100 + kernexec_reload_fptr_mask(&gsi);
80101 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
80102 + break;
80103 + }
80104 + }
80105 + }
80106 +
80107 + return 0;
80108 +}
80109 +
80110 +/*
80111 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
80112 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
80113 + */
80114 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
80115 +{
80116 + gimple assign_intptr, assign_new_fptr, call_stmt;
80117 + tree intptr, old_fptr, new_fptr, kernexec_mask;
80118 +
80119 + call_stmt = gsi_stmt(*gsi);
80120 + old_fptr = gimple_call_fn(call_stmt);
80121 +
80122 + // create temporary unsigned long variable used for bitops and cast fptr to it
80123 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
80124 + add_referenced_var(intptr);
80125 + mark_sym_for_renaming(intptr);
80126 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
80127 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80128 + update_stmt(assign_intptr);
80129 +
80130 + // apply logical or to temporary unsigned long and bitmask
80131 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
80132 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
80133 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
80134 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80135 + update_stmt(assign_intptr);
80136 +
80137 + // cast temporary unsigned long back to a temporary fptr variable
80138 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
80139 + add_referenced_var(new_fptr);
80140 + mark_sym_for_renaming(new_fptr);
80141 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
80142 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
80143 + update_stmt(assign_new_fptr);
80144 +
80145 + // replace call stmt fn with the new fptr
80146 + gimple_call_set_fn(call_stmt, new_fptr);
80147 + update_stmt(call_stmt);
80148 +}
80149 +
80150 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
80151 +{
80152 + gimple asm_or_stmt, call_stmt;
80153 + tree old_fptr, new_fptr, input, output;
80154 + VEC(tree, gc) *inputs = NULL;
80155 + VEC(tree, gc) *outputs = NULL;
80156 +
80157 + call_stmt = gsi_stmt(*gsi);
80158 + old_fptr = gimple_call_fn(call_stmt);
80159 +
80160 + // create temporary fptr variable
80161 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
80162 + add_referenced_var(new_fptr);
80163 + mark_sym_for_renaming(new_fptr);
80164 +
80165 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
80166 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
80167 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
80168 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
80169 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
80170 + VEC_safe_push(tree, gc, inputs, input);
80171 + VEC_safe_push(tree, gc, outputs, output);
80172 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
80173 + gimple_asm_set_volatile(asm_or_stmt, true);
80174 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
80175 + update_stmt(asm_or_stmt);
80176 +
80177 + // replace call stmt fn with the new fptr
80178 + gimple_call_set_fn(call_stmt, new_fptr);
80179 + update_stmt(call_stmt);
80180 +}
80181 +
80182 +/*
80183 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
80184 + */
80185 +static unsigned int execute_kernexec_fptr(void)
80186 +{
80187 + basic_block bb;
80188 +
80189 + // 1. loop through BBs and GIMPLE statements
80190 + FOR_EACH_BB(bb) {
80191 + gimple_stmt_iterator gsi;
80192 +
80193 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80194 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
80195 + tree fn;
80196 + gimple call_stmt;
80197 +
80198 + // is it a call ...
80199 + call_stmt = gsi_stmt(gsi);
80200 + if (!is_gimple_call(call_stmt))
80201 + continue;
80202 + fn = gimple_call_fn(call_stmt);
80203 + if (TREE_CODE(fn) == ADDR_EXPR)
80204 + continue;
80205 + if (TREE_CODE(fn) != SSA_NAME)
80206 + gcc_unreachable();
80207 +
80208 + // ... through a function pointer
80209 + fn = SSA_NAME_VAR(fn);
80210 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
80211 + continue;
80212 + fn = TREE_TYPE(fn);
80213 + if (TREE_CODE(fn) != POINTER_TYPE)
80214 + continue;
80215 + fn = TREE_TYPE(fn);
80216 + if (TREE_CODE(fn) != FUNCTION_TYPE)
80217 + continue;
80218 +
80219 + kernexec_instrument_fptr(&gsi);
80220 +
80221 +//debug_tree(gimple_call_fn(call_stmt));
80222 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80223 + }
80224 + }
80225 +
80226 + return 0;
80227 +}
80228 +
80229 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
80230 +static void kernexec_instrument_retaddr_bts(rtx insn)
80231 +{
80232 + rtx btsq;
80233 + rtvec argvec, constraintvec, labelvec;
80234 + int line;
80235 +
80236 + // create asm volatile("btsq $63,(%%rsp)":::)
80237 + argvec = rtvec_alloc(0);
80238 + constraintvec = rtvec_alloc(0);
80239 + labelvec = rtvec_alloc(0);
80240 + line = expand_location(RTL_LOCATION(insn)).line;
80241 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80242 + MEM_VOLATILE_P(btsq) = 1;
80243 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
80244 + emit_insn_before(btsq, insn);
80245 +}
80246 +
80247 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
80248 +static void kernexec_instrument_retaddr_or(rtx insn)
80249 +{
80250 + rtx orq;
80251 + rtvec argvec, constraintvec, labelvec;
80252 + int line;
80253 +
80254 + // create asm volatile("orq %%r10,(%%rsp)":::)
80255 + argvec = rtvec_alloc(0);
80256 + constraintvec = rtvec_alloc(0);
80257 + labelvec = rtvec_alloc(0);
80258 + line = expand_location(RTL_LOCATION(insn)).line;
80259 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80260 + MEM_VOLATILE_P(orq) = 1;
80261 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
80262 + emit_insn_before(orq, insn);
80263 +}
80264 +
80265 +/*
80266 + * find all asm level function returns and forcibly set the highest bit of the return address
80267 + */
80268 +static unsigned int execute_kernexec_retaddr(void)
80269 +{
80270 + rtx insn;
80271 +
80272 + // 1. find function returns
80273 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80274 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
80275 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
80276 + rtx body;
80277 +
80278 + // is it a retn
80279 + if (!JUMP_P(insn))
80280 + continue;
80281 + body = PATTERN(insn);
80282 + if (GET_CODE(body) == PARALLEL)
80283 + body = XVECEXP(body, 0, 0);
80284 + if (GET_CODE(body) != RETURN)
80285 + continue;
80286 + kernexec_instrument_retaddr(insn);
80287 + }
80288 +
80289 +// print_simple_rtl(stderr, get_insns());
80290 +// print_rtl(stderr, get_insns());
80291 +
80292 + return 0;
80293 +}
80294 +
80295 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80296 +{
80297 + const char * const plugin_name = plugin_info->base_name;
80298 + const int argc = plugin_info->argc;
80299 + const struct plugin_argument * const argv = plugin_info->argv;
80300 + int i;
80301 + struct register_pass_info kernexec_reload_pass_info = {
80302 + .pass = &kernexec_reload_pass.pass,
80303 + .reference_pass_name = "ssa",
80304 + .ref_pass_instance_number = 0,
80305 + .pos_op = PASS_POS_INSERT_AFTER
80306 + };
80307 + struct register_pass_info kernexec_fptr_pass_info = {
80308 + .pass = &kernexec_fptr_pass.pass,
80309 + .reference_pass_name = "ssa",
80310 + .ref_pass_instance_number = 0,
80311 + .pos_op = PASS_POS_INSERT_AFTER
80312 + };
80313 + struct register_pass_info kernexec_retaddr_pass_info = {
80314 + .pass = &kernexec_retaddr_pass.pass,
80315 + .reference_pass_name = "pro_and_epilogue",
80316 + .ref_pass_instance_number = 0,
80317 + .pos_op = PASS_POS_INSERT_AFTER
80318 + };
80319 +
80320 + if (!plugin_default_version_check(version, &gcc_version)) {
80321 + error(G_("incompatible gcc/plugin versions"));
80322 + return 1;
80323 + }
80324 +
80325 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
80326 +
80327 + if (TARGET_64BIT == 0)
80328 + return 0;
80329 +
80330 + for (i = 0; i < argc; ++i) {
80331 + if (!strcmp(argv[i].key, "method")) {
80332 + if (!argv[i].value) {
80333 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80334 + continue;
80335 + }
80336 + if (!strcmp(argv[i].value, "bts")) {
80337 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
80338 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
80339 + } else if (!strcmp(argv[i].value, "or")) {
80340 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
80341 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
80342 + fix_register("r10", 1, 1);
80343 + } else
80344 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80345 + continue;
80346 + }
80347 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80348 + }
80349 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
80350 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
80351 +
80352 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
80353 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
80354 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
80355 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
80356 +
80357 + return 0;
80358 +}
80359 diff --git a/tools/gcc/size_overflow_hash.h b/tools/gcc/size_overflow_hash.h
80360 new file mode 100644
80361 index 0000000..ceb1b6a
80362 --- /dev/null
80363 +++ b/tools/gcc/size_overflow_hash.h
80364 @@ -0,0 +1,17283 @@
80365 +#define PARAM1 (1U << 1)
80366 +#define PARAM2 (1U << 2)
80367 +#define PARAM3 (1U << 3)
80368 +#define PARAM4 (1U << 4)
80369 +#define PARAM5 (1U << 5)
80370 +#define PARAM6 (1U << 6)
80371 +#define PARAM7 (1U << 7)
80372 +#define PARAM8 (1U << 8)
80373 +#define PARAM9 (1U << 9)
80374 +#define PARAM10 (1U << 10)
80375 +
80376 +struct size_overflow_hash _000001_hash = {
80377 + .next = NULL,
80378 + .name = "alloc_dr",
80379 + .param = PARAM2,
80380 +};
80381 +
80382 +struct size_overflow_hash _000002_hash = {
80383 + .next = NULL,
80384 + .name = "__copy_from_user",
80385 + .param = PARAM3,
80386 +};
80387 +
80388 +struct size_overflow_hash _000003_hash = {
80389 + .next = NULL,
80390 + .name = "copy_from_user",
80391 + .param = PARAM3,
80392 +};
80393 +
80394 +struct size_overflow_hash _000004_hash = {
80395 + .next = NULL,
80396 + .name = "__copy_from_user_inatomic",
80397 + .param = PARAM3,
80398 +};
80399 +
80400 +struct size_overflow_hash _000005_hash = {
80401 + .next = NULL,
80402 + .name = "__copy_from_user_nocache",
80403 + .param = PARAM3,
80404 +};
80405 +
80406 +struct size_overflow_hash _000006_hash = {
80407 + .next = NULL,
80408 + .name = "__copy_to_user_inatomic",
80409 + .param = PARAM3,
80410 +};
80411 +
80412 +struct size_overflow_hash _000007_hash = {
80413 + .next = NULL,
80414 + .name = "do_xip_mapping_read",
80415 + .param = PARAM5,
80416 +};
80417 +
80418 +struct size_overflow_hash _000008_hash = {
80419 + .next = NULL,
80420 + .name = "hugetlbfs_read",
80421 + .param = PARAM3,
80422 +};
80423 +
80424 +struct size_overflow_hash _000009_hash = {
80425 + .next = NULL,
80426 + .name = "kmalloc",
80427 + .param = PARAM1,
80428 +};
80429 +
80430 +struct size_overflow_hash _000010_hash = {
80431 + .next = NULL,
80432 + .name = "kmalloc_array",
80433 + .param = PARAM1|PARAM2,
80434 +};
80435 +
80436 +struct size_overflow_hash _000012_hash = {
80437 + .next = NULL,
80438 + .name = "kmalloc_slab",
80439 + .param = PARAM1,
80440 +};
80441 +
80442 +struct size_overflow_hash _000013_hash = {
80443 + .next = NULL,
80444 + .name = "kmemdup",
80445 + .param = PARAM2,
80446 +};
80447 +
80448 +struct size_overflow_hash _000014_hash = {
80449 + .next = NULL,
80450 + .name = "__krealloc",
80451 + .param = PARAM2,
80452 +};
80453 +
80454 +struct size_overflow_hash _000015_hash = {
80455 + .next = NULL,
80456 + .name = "memdup_user",
80457 + .param = PARAM2,
80458 +};
80459 +
80460 +struct size_overflow_hash _000016_hash = {
80461 + .next = NULL,
80462 + .name = "module_alloc",
80463 + .param = PARAM1,
80464 +};
80465 +
80466 +struct size_overflow_hash _000017_hash = {
80467 + .next = NULL,
80468 + .name = "read_default_ldt",
80469 + .param = PARAM2,
80470 +};
80471 +
80472 +struct size_overflow_hash _000018_hash = {
80473 + .next = NULL,
80474 + .name = "read_kcore",
80475 + .param = PARAM3,
80476 +};
80477 +
80478 +struct size_overflow_hash _000019_hash = {
80479 + .next = NULL,
80480 + .name = "read_ldt",
80481 + .param = PARAM2,
80482 +};
80483 +
80484 +struct size_overflow_hash _000020_hash = {
80485 + .next = NULL,
80486 + .name = "read_zero",
80487 + .param = PARAM3,
80488 +};
80489 +
80490 +struct size_overflow_hash _000021_hash = {
80491 + .next = NULL,
80492 + .name = "__vmalloc_node",
80493 + .param = PARAM1,
80494 +};
80495 +
80496 +struct size_overflow_hash _000022_hash = {
80497 + .next = NULL,
80498 + .name = "vm_map_ram",
80499 + .param = PARAM2,
80500 +};
80501 +
80502 +struct size_overflow_hash _000023_hash = {
80503 + .next = NULL,
80504 + .name = "aa_simple_write_to_buffer",
80505 + .param = PARAM4|PARAM3,
80506 +};
80507 +
80508 +struct size_overflow_hash _000024_hash = {
80509 + .next = NULL,
80510 + .name = "ablkcipher_copy_iv",
80511 + .param = PARAM3,
80512 +};
80513 +
80514 +struct size_overflow_hash _000025_hash = {
80515 + .next = NULL,
80516 + .name = "ablkcipher_next_slow",
80517 + .param = PARAM4,
80518 +};
80519 +
80520 +struct size_overflow_hash _000026_hash = {
80521 + .next = NULL,
80522 + .name = "acpi_battery_write_alarm",
80523 + .param = PARAM3,
80524 +};
80525 +
80526 +struct size_overflow_hash _000027_hash = {
80527 + .next = NULL,
80528 + .name = "acpi_os_allocate",
80529 + .param = PARAM1,
80530 +};
80531 +
80532 +struct size_overflow_hash _000028_hash = {
80533 + .next = NULL,
80534 + .name = "acpi_system_write_wakeup_device",
80535 + .param = PARAM3,
80536 +};
80537 +
80538 +struct size_overflow_hash _000029_hash = {
80539 + .next = NULL,
80540 + .name = "adu_write",
80541 + .param = PARAM3,
80542 +};
80543 +
80544 +struct size_overflow_hash _000030_hash = {
80545 + .next = NULL,
80546 + .name = "aer_inject_write",
80547 + .param = PARAM3,
80548 +};
80549 +
80550 +struct size_overflow_hash _000031_hash = {
80551 + .next = NULL,
80552 + .name = "afs_alloc_flat_call",
80553 + .param = PARAM2|PARAM3,
80554 +};
80555 +
80556 +struct size_overflow_hash _000033_hash = {
80557 + .next = NULL,
80558 + .name = "afs_proc_cells_write",
80559 + .param = PARAM3,
80560 +};
80561 +
80562 +struct size_overflow_hash _000034_hash = {
80563 + .next = NULL,
80564 + .name = "afs_proc_rootcell_write",
80565 + .param = PARAM3,
80566 +};
80567 +
80568 +struct size_overflow_hash _000035_hash = {
80569 + .next = NULL,
80570 + .name = "agp_3_5_isochronous_node_enable",
80571 + .param = PARAM3,
80572 +};
80573 +
80574 +struct size_overflow_hash _000036_hash = {
80575 + .next = NULL,
80576 + .name = "agp_alloc_page_array",
80577 + .param = PARAM1,
80578 +};
80579 +
80580 +struct size_overflow_hash _000037_hash = {
80581 + .next = NULL,
80582 + .name = "ah_alloc_tmp",
80583 + .param = PARAM2,
80584 +};
80585 +
80586 +struct size_overflow_hash _000038_hash = {
80587 + .next = NULL,
80588 + .name = "ahash_setkey_unaligned",
80589 + .param = PARAM3,
80590 +};
80591 +
80592 +struct size_overflow_hash _000039_hash = {
80593 + .next = NULL,
80594 + .name = "alg_setkey",
80595 + .param = PARAM3,
80596 +};
80597 +
80598 +struct size_overflow_hash _000040_hash = {
80599 + .next = NULL,
80600 + .name = "aligned_kmalloc",
80601 + .param = PARAM1,
80602 +};
80603 +
80604 +struct size_overflow_hash _000041_hash = {
80605 + .next = NULL,
80606 + .name = "alloc_context",
80607 + .param = PARAM1,
80608 +};
80609 +
80610 +struct size_overflow_hash _000042_hash = {
80611 + .next = NULL,
80612 + .name = "alloc_ep_req",
80613 + .param = PARAM2,
80614 +};
80615 +
80616 +struct size_overflow_hash _000043_hash = {
80617 + .next = NULL,
80618 + .name = "alloc_fdmem",
80619 + .param = PARAM1,
80620 +};
80621 +
80622 +struct size_overflow_hash _000044_hash = {
80623 + .next = NULL,
80624 + .name = "alloc_flex_gd",
80625 + .param = PARAM1,
80626 +};
80627 +
80628 +struct size_overflow_hash _000045_hash = {
80629 + .next = NULL,
80630 + .name = "alloc_sglist",
80631 + .param = PARAM1|PARAM3|PARAM2,
80632 +};
80633 +
80634 +struct size_overflow_hash _000046_hash = {
80635 + .next = NULL,
80636 + .name = "aoedev_flush",
80637 + .param = PARAM2,
80638 +};
80639 +
80640 +struct size_overflow_hash _000047_hash = {
80641 + .next = NULL,
80642 + .name = "append_to_buffer",
80643 + .param = PARAM3,
80644 +};
80645 +
80646 +struct size_overflow_hash _000048_hash = {
80647 + .next = NULL,
80648 + .name = "asix_read_cmd",
80649 + .param = PARAM5,
80650 +};
80651 +
80652 +struct size_overflow_hash _000049_hash = {
80653 + .next = NULL,
80654 + .name = "asix_write_cmd",
80655 + .param = PARAM5,
80656 +};
80657 +
80658 +struct size_overflow_hash _000050_hash = {
80659 + .next = NULL,
80660 + .name = "asn1_octets_decode",
80661 + .param = PARAM2,
80662 +};
80663 +
80664 +struct size_overflow_hash _000051_hash = {
80665 + .next = NULL,
80666 + .name = "asn1_oid_decode",
80667 + .param = PARAM2,
80668 +};
80669 +
80670 +struct size_overflow_hash _000052_hash = {
80671 + .next = NULL,
80672 + .name = "at76_set_card_command",
80673 + .param = PARAM4,
80674 +};
80675 +
80676 +struct size_overflow_hash _000053_hash = {
80677 + .next = NULL,
80678 + .name = "ath6kl_add_bss_if_needed",
80679 + .param = PARAM6,
80680 +};
80681 +
80682 +struct size_overflow_hash _000054_hash = {
80683 + .next = NULL,
80684 + .name = "ath6kl_debug_roam_tbl_event",
80685 + .param = PARAM3,
80686 +};
80687 +
80688 +struct size_overflow_hash _000055_hash = {
80689 + .next = NULL,
80690 + .name = "ath6kl_mgmt_powersave_ap",
80691 + .param = PARAM6,
80692 +};
80693 +
80694 +struct size_overflow_hash _000056_hash = {
80695 + .next = NULL,
80696 + .name = "ath6kl_send_go_probe_resp",
80697 + .param = PARAM3,
80698 +};
80699 +
80700 +struct size_overflow_hash _000057_hash = {
80701 + .next = NULL,
80702 + .name = "ath6kl_set_ap_probe_resp_ies",
80703 + .param = PARAM3,
80704 +};
80705 +
80706 +struct size_overflow_hash _000058_hash = {
80707 + .next = NULL,
80708 + .name = "ath6kl_set_assoc_req_ies",
80709 + .param = PARAM3,
80710 +};
80711 +
80712 +struct size_overflow_hash _000059_hash = {
80713 + .next = NULL,
80714 + .name = "ath6kl_wmi_bssinfo_event_rx",
80715 + .param = PARAM3,
80716 +};
80717 +
80718 +struct size_overflow_hash _000060_hash = {
80719 + .next = NULL,
80720 + .name = "ath6kl_wmi_send_action_cmd",
80721 + .param = PARAM7,
80722 +};
80723 +
80724 +struct size_overflow_hash _000061_hash = {
80725 + .next = NULL,
80726 + .name = "__ath6kl_wmi_send_mgmt_cmd",
80727 + .param = PARAM7,
80728 +};
80729 +
80730 +struct size_overflow_hash _000062_hash = {
80731 + .next = NULL,
80732 + .name = "attach_hdlc_protocol",
80733 + .param = PARAM3,
80734 +};
80735 +
80736 +struct size_overflow_hash _000063_hash = {
80737 + .next = NULL,
80738 + .name = "audio_write",
80739 + .param = PARAM4,
80740 +};
80741 +
80742 +struct size_overflow_hash _000064_hash = {
80743 + .next = NULL,
80744 + .name = "audit_unpack_string",
80745 + .param = PARAM3,
80746 +};
80747 +
80748 +struct size_overflow_hash _000065_hash = {
80749 + .next = NULL,
80750 + .name = "av7110_vbi_write",
80751 + .param = PARAM3,
80752 +};
80753 +
80754 +struct size_overflow_hash _000066_hash = {
80755 + .next = NULL,
80756 + .name = "ax25_setsockopt",
80757 + .param = PARAM5,
80758 +};
80759 +
80760 +struct size_overflow_hash _000067_hash = {
80761 + .next = NULL,
80762 + .name = "b43_debugfs_write",
80763 + .param = PARAM3,
80764 +};
80765 +
80766 +struct size_overflow_hash _000068_hash = {
80767 + .next = NULL,
80768 + .name = "b43legacy_debugfs_write",
80769 + .param = PARAM3,
80770 +};
80771 +
80772 +struct size_overflow_hash _000069_hash = {
80773 + .next = NULL,
80774 + .name = "bch_alloc",
80775 + .param = PARAM1,
80776 +};
80777 +
80778 +struct size_overflow_hash _000070_hash = {
80779 + .next = NULL,
80780 + .name = "befs_nls2utf",
80781 + .param = PARAM3,
80782 +};
80783 +
80784 +struct size_overflow_hash _000071_hash = {
80785 + .next = NULL,
80786 + .name = "befs_utf2nls",
80787 + .param = PARAM3,
80788 +};
80789 +
80790 +struct size_overflow_hash _000072_hash = {
80791 + .next = NULL,
80792 + .name = "bfad_debugfs_write_regrd",
80793 + .param = PARAM3,
80794 +};
80795 +
80796 +struct size_overflow_hash _000073_hash = {
80797 + .next = NULL,
80798 + .name = "bfad_debugfs_write_regwr",
80799 + .param = PARAM3,
80800 +};
80801 +
80802 +struct size_overflow_hash _000074_hash = {
80803 + .next = NULL,
80804 + .name = "bio_alloc_map_data",
80805 + .param = PARAM1|PARAM2,
80806 +};
80807 +
80808 +struct size_overflow_hash _000076_hash = {
80809 + .next = NULL,
80810 + .name = "bio_kmalloc",
80811 + .param = PARAM2,
80812 +};
80813 +
80814 +struct size_overflow_hash _000077_hash = {
80815 + .next = NULL,
80816 + .name = "blkcipher_copy_iv",
80817 + .param = PARAM3,
80818 +};
80819 +
80820 +struct size_overflow_hash _000078_hash = {
80821 + .next = NULL,
80822 + .name = "blkcipher_next_slow",
80823 + .param = PARAM4,
80824 +};
80825 +
80826 +struct size_overflow_hash _000079_hash = {
80827 + .next = NULL,
80828 + .name = "bl_pipe_downcall",
80829 + .param = PARAM3,
80830 +};
80831 +
80832 +struct size_overflow_hash _000080_hash = {
80833 + .next = NULL,
80834 + .name = "bnad_debugfs_write_regrd",
80835 + .param = PARAM3,
80836 +};
80837 +
80838 +struct size_overflow_hash _000081_hash = {
80839 + .next = NULL,
80840 + .name = "bnad_debugfs_write_regwr",
80841 + .param = PARAM3,
80842 +};
80843 +
80844 +struct size_overflow_hash _000082_hash = {
80845 + .next = NULL,
80846 + .name = "bnx2fc_cmd_mgr_alloc",
80847 + .param = PARAM2|PARAM3,
80848 +};
80849 +
80850 +struct size_overflow_hash _000084_hash = {
80851 + .next = NULL,
80852 + .name = "bnx2_nvram_write",
80853 + .param = PARAM4,
80854 +};
80855 +
80856 +struct size_overflow_hash _000085_hash = {
80857 + .next = NULL,
80858 + .name = "brcmf_sdbrcm_downloadvars",
80859 + .param = PARAM3,
80860 +};
80861 +
80862 +struct size_overflow_hash _000086_hash = {
80863 + .next = NULL,
80864 + .name = "btmrvl_gpiogap_write",
80865 + .param = PARAM3,
80866 +};
80867 +
80868 +struct size_overflow_hash _000087_hash = {
80869 + .next = NULL,
80870 + .name = "btmrvl_hscfgcmd_write",
80871 + .param = PARAM3,
80872 +};
80873 +
80874 +struct size_overflow_hash _000088_hash = {
80875 + .next = NULL,
80876 + .name = "btmrvl_hscmd_write",
80877 + .param = PARAM3,
80878 +};
80879 +
80880 +struct size_overflow_hash _000089_hash = {
80881 + .next = NULL,
80882 + .name = "btmrvl_hsmode_write",
80883 + .param = PARAM3,
80884 +};
80885 +
80886 +struct size_overflow_hash _000090_hash = {
80887 + .next = NULL,
80888 + .name = "btmrvl_pscmd_write",
80889 + .param = PARAM3,
80890 +};
80891 +
80892 +struct size_overflow_hash _000091_hash = {
80893 + .next = NULL,
80894 + .name = "btmrvl_psmode_write",
80895 + .param = PARAM3,
80896 +};
80897 +
80898 +struct size_overflow_hash _000092_hash = {
80899 + .next = NULL,
80900 + .name = "btrfs_alloc_delayed_item",
80901 + .param = PARAM1,
80902 +};
80903 +
80904 +struct size_overflow_hash _000093_hash = {
80905 + .next = NULL,
80906 + .name = "cache_do_downcall",
80907 + .param = PARAM3,
80908 +};
80909 +
80910 +struct size_overflow_hash _000094_hash = {
80911 + .next = NULL,
80912 + .name = "cachefiles_cook_key",
80913 + .param = PARAM2,
80914 +};
80915 +
80916 +struct size_overflow_hash _000095_hash = {
80917 + .next = NULL,
80918 + .name = "cachefiles_daemon_write",
80919 + .param = PARAM3,
80920 +};
80921 +
80922 +struct size_overflow_hash _000096_hash = {
80923 + .next = NULL,
80924 + .name = "capi_write",
80925 + .param = PARAM3,
80926 +};
80927 +
80928 +struct size_overflow_hash _000097_hash = {
80929 + .next = NULL,
80930 + .name = "carl9170_debugfs_write",
80931 + .param = PARAM3,
80932 +};
80933 +
80934 +struct size_overflow_hash _000098_hash = {
80935 + .next = NULL,
80936 + .name = "cciss_allocate_sg_chain_blocks",
80937 + .param = PARAM2|PARAM3,
80938 +};
80939 +
80940 +struct size_overflow_hash _000100_hash = {
80941 + .next = NULL,
80942 + .name = "cciss_proc_write",
80943 + .param = PARAM3,
80944 +};
80945 +
80946 +struct size_overflow_hash _000101_hash = {
80947 + .next = NULL,
80948 + .name = "cdrom_read_cdda_old",
80949 + .param = PARAM4,
80950 +};
80951 +
80952 +struct size_overflow_hash _000102_hash = {
80953 + .next = NULL,
80954 + .name = "ceph_alloc_page_vector",
80955 + .param = PARAM1,
80956 +};
80957 +
80958 +struct size_overflow_hash _000103_hash = {
80959 + .next = NULL,
80960 + .name = "ceph_buffer_new",
80961 + .param = PARAM1,
80962 +};
80963 +
80964 +struct size_overflow_hash _000104_hash = {
80965 + .next = NULL,
80966 + .name = "ceph_copy_user_to_page_vector",
80967 + .param = PARAM4,
80968 +};
80969 +
80970 +struct size_overflow_hash _000105_hash = {
80971 + .next = NULL,
80972 + .name = "ceph_get_direct_page_vector",
80973 + .param = PARAM2,
80974 +};
80975 +
80976 +struct size_overflow_hash _000106_hash = {
80977 + .next = NULL,
80978 + .name = "ceph_msg_new",
80979 + .param = PARAM2,
80980 +};
80981 +
80982 +struct size_overflow_hash _000107_hash = {
80983 + .next = NULL,
80984 + .name = "ceph_setxattr",
80985 + .param = PARAM4,
80986 +};
80987 +
80988 +struct size_overflow_hash _000108_hash = {
80989 + .next = NULL,
80990 + .name = "cfi_read_pri",
80991 + .param = PARAM3,
80992 +};
80993 +
80994 +struct size_overflow_hash _000109_hash = {
80995 + .next = NULL,
80996 + .name = "cgroup_write_string",
80997 + .param = PARAM5,
80998 +};
80999 +
81000 +struct size_overflow_hash _000110_hash = {
81001 + .next = NULL,
81002 + .name = "cgroup_write_X64",
81003 + .param = PARAM5,
81004 +};
81005 +
81006 +struct size_overflow_hash _000111_hash = {
81007 + .next = NULL,
81008 + .name = "change_xattr",
81009 + .param = PARAM5,
81010 +};
81011 +
81012 +struct size_overflow_hash _000112_hash = {
81013 + .next = NULL,
81014 + .name = "check_load_and_stores",
81015 + .param = PARAM2,
81016 +};
81017 +
81018 +struct size_overflow_hash _000113_hash = {
81019 + .next = NULL,
81020 + .name = "cifs_idmap_key_instantiate",
81021 + .param = PARAM3,
81022 +};
81023 +
81024 +struct size_overflow_hash _000114_hash = {
81025 + .next = NULL,
81026 + .name = "cifs_security_flags_proc_write",
81027 + .param = PARAM3,
81028 +};
81029 +
81030 +struct size_overflow_hash _000115_hash = {
81031 + .next = NULL,
81032 + .name = "cifs_setxattr",
81033 + .param = PARAM4,
81034 +};
81035 +
81036 +struct size_overflow_hash _000116_hash = {
81037 + .next = NULL,
81038 + .name = "cifs_spnego_key_instantiate",
81039 + .param = PARAM3,
81040 +};
81041 +
81042 +struct size_overflow_hash _000117_hash = {
81043 + .next = NULL,
81044 + .name = "ci_ll_write",
81045 + .param = PARAM4,
81046 +};
81047 +
81048 +struct size_overflow_hash _000118_hash = {
81049 + .next = NULL,
81050 + .name = "cld_pipe_downcall",
81051 + .param = PARAM3,
81052 +};
81053 +
81054 +struct size_overflow_hash _000119_hash = {
81055 + .next = NULL,
81056 + .name = "clear_refs_write",
81057 + .param = PARAM3,
81058 +};
81059 +
81060 +struct size_overflow_hash _000120_hash = {
81061 + .next = NULL,
81062 + .name = "clusterip_proc_write",
81063 + .param = PARAM3,
81064 +};
81065 +
81066 +struct size_overflow_hash _000121_hash = {
81067 + .next = NULL,
81068 + .name = "cm4040_write",
81069 + .param = PARAM3,
81070 +};
81071 +
81072 +struct size_overflow_hash _000122_hash = {
81073 + .next = NULL,
81074 + .name = "cm_copy_private_data",
81075 + .param = PARAM2,
81076 +};
81077 +
81078 +struct size_overflow_hash _000123_hash = {
81079 + .next = NULL,
81080 + .name = "cmm_write",
81081 + .param = PARAM3,
81082 +};
81083 +
81084 +struct size_overflow_hash _000124_hash = {
81085 + .next = NULL,
81086 + .name = "cm_write",
81087 + .param = PARAM3,
81088 +};
81089 +
81090 +struct size_overflow_hash _000125_hash = {
81091 + .next = NULL,
81092 + .name = "coda_psdev_write",
81093 + .param = PARAM3,
81094 +};
81095 +
81096 +struct size_overflow_hash _000126_hash = {
81097 + .next = NULL,
81098 + .name = "codec_reg_read_file",
81099 + .param = PARAM3,
81100 +};
81101 +
81102 +struct size_overflow_hash _000127_hash = {
81103 + .next = NULL,
81104 + .name = "command_file_write",
81105 + .param = PARAM3,
81106 +};
81107 +
81108 +struct size_overflow_hash _000128_hash = {
81109 + .next = NULL,
81110 + .name = "command_write",
81111 + .param = PARAM3,
81112 +};
81113 +
81114 +struct size_overflow_hash _000129_hash = {
81115 + .next = NULL,
81116 + .name = "comm_write",
81117 + .param = PARAM3,
81118 +};
81119 +
81120 +struct size_overflow_hash _000130_hash = {
81121 + .next = NULL,
81122 + .name = "concat_writev",
81123 + .param = PARAM3,
81124 +};
81125 +
81126 +struct size_overflow_hash _000131_hash = {
81127 + .next = NULL,
81128 + .name = "copy_and_check",
81129 + .param = PARAM3,
81130 +};
81131 +
81132 +struct size_overflow_hash _000132_hash = {
81133 + .next = NULL,
81134 + .name = "copy_from_user_toio",
81135 + .param = PARAM3,
81136 +};
81137 +
81138 +struct size_overflow_hash _000133_hash = {
81139 + .next = NULL,
81140 + .name = "copy_items",
81141 + .param = PARAM6,
81142 +};
81143 +
81144 +struct size_overflow_hash _000134_hash = {
81145 + .next = NULL,
81146 + .name = "copy_macs",
81147 + .param = PARAM4,
81148 +};
81149 +
81150 +struct size_overflow_hash _000135_hash = {
81151 + .next = NULL,
81152 + .name = "__copy_to_user",
81153 + .param = PARAM3,
81154 +};
81155 +
81156 +struct size_overflow_hash _000136_hash = {
81157 + .next = NULL,
81158 + .name = "copy_vm86_regs_from_user",
81159 + .param = PARAM3,
81160 +};
81161 +
81162 +struct size_overflow_hash _000137_hash = {
81163 + .next = NULL,
81164 + .name = "cosa_write",
81165 + .param = PARAM3,
81166 +};
81167 +
81168 +struct size_overflow_hash _000138_hash = {
81169 + .next = NULL,
81170 + .name = "create_entry",
81171 + .param = PARAM2,
81172 +};
81173 +
81174 +struct size_overflow_hash _000139_hash = {
81175 + .next = NULL,
81176 + .name = "create_queues",
81177 + .param = PARAM2|PARAM3,
81178 +};
81179 +
81180 +struct size_overflow_hash _000141_hash = {
81181 + .next = NULL,
81182 + .name = "create_xattr",
81183 + .param = PARAM5,
81184 +};
81185 +
81186 +struct size_overflow_hash _000142_hash = {
81187 + .next = NULL,
81188 + .name = "create_xattr_datum",
81189 + .param = PARAM5,
81190 +};
81191 +
81192 +struct size_overflow_hash _000143_hash = {
81193 + .next = NULL,
81194 + .name = "csum_partial_copy_fromiovecend",
81195 + .param = PARAM3|PARAM4,
81196 +};
81197 +
81198 +struct size_overflow_hash _000145_hash = {
81199 + .next = NULL,
81200 + .name = "ctrl_out",
81201 + .param = PARAM3|PARAM5,
81202 +};
81203 +
81204 +struct size_overflow_hash _000147_hash = {
81205 + .next = NULL,
81206 + .name = "cx24116_writeregN",
81207 + .param = PARAM4,
81208 +};
81209 +
81210 +struct size_overflow_hash _000148_hash = {
81211 + .next = NULL,
81212 + .name = "cxacru_cm_get_array",
81213 + .param = PARAM4,
81214 +};
81215 +
81216 +struct size_overflow_hash _000149_hash = {
81217 + .next = NULL,
81218 + .name = "cxgbi_alloc_big_mem",
81219 + .param = PARAM1,
81220 +};
81221 +
81222 +struct size_overflow_hash _000150_hash = {
81223 + .next = NULL,
81224 + .name = "dac960_user_command_proc_write",
81225 + .param = PARAM3,
81226 +};
81227 +
81228 +struct size_overflow_hash _000151_hash = {
81229 + .next = NULL,
81230 + .name = "datablob_format",
81231 + .param = PARAM2,
81232 +};
81233 +
81234 +struct size_overflow_hash _000152_hash = {
81235 + .next = NULL,
81236 + .name = "dccp_feat_clone_sp_val",
81237 + .param = PARAM3,
81238 +};
81239 +
81240 +struct size_overflow_hash _000153_hash = {
81241 + .next = NULL,
81242 + .name = "dccp_setsockopt_ccid",
81243 + .param = PARAM4,
81244 +};
81245 +
81246 +struct size_overflow_hash _000154_hash = {
81247 + .next = NULL,
81248 + .name = "dccp_setsockopt_cscov",
81249 + .param = PARAM2,
81250 +};
81251 +
81252 +struct size_overflow_hash _000155_hash = {
81253 + .next = NULL,
81254 + .name = "dccp_setsockopt_service",
81255 + .param = PARAM4,
81256 +};
81257 +
81258 +struct size_overflow_hash _000156_hash = {
81259 + .next = NULL,
81260 + .name = "ddb_output_write",
81261 + .param = PARAM3,
81262 +};
81263 +
81264 +struct size_overflow_hash _000157_hash = {
81265 + .next = NULL,
81266 + .name = "ddebug_proc_write",
81267 + .param = PARAM3,
81268 +};
81269 +
81270 +struct size_overflow_hash _000158_hash = {
81271 + .next = NULL,
81272 + .name = "dev_config",
81273 + .param = PARAM3,
81274 +};
81275 +
81276 +struct size_overflow_hash _000159_hash = {
81277 + .next = NULL,
81278 + .name = "device_write",
81279 + .param = PARAM3,
81280 +};
81281 +
81282 +struct size_overflow_hash _000160_hash = {
81283 + .next = NULL,
81284 + .name = "devm_kzalloc",
81285 + .param = PARAM2,
81286 +};
81287 +
81288 +struct size_overflow_hash _000161_hash = {
81289 + .next = NULL,
81290 + .name = "devres_alloc",
81291 + .param = PARAM2,
81292 +};
81293 +
81294 +struct size_overflow_hash _000162_hash = {
81295 + .next = NULL,
81296 + .name = "dfs_file_write",
81297 + .param = PARAM3,
81298 +};
81299 +
81300 +struct size_overflow_hash _000163_hash = {
81301 + .next = NULL,
81302 + .name = "direct_entry",
81303 + .param = PARAM3,
81304 +};
81305 +
81306 +struct size_overflow_hash _000164_hash = {
81307 + .next = NULL,
81308 + .name = "dispatch_proc_write",
81309 + .param = PARAM3,
81310 +};
81311 +
81312 +struct size_overflow_hash _000165_hash = {
81313 + .next = NULL,
81314 + .name = "diva_os_copy_from_user",
81315 + .param = PARAM4,
81316 +};
81317 +
81318 +struct size_overflow_hash _000166_hash = {
81319 + .next = NULL,
81320 + .name = "dlm_alloc_pagevec",
81321 + .param = PARAM1,
81322 +};
81323 +
81324 +struct size_overflow_hash _000167_hash = {
81325 + .next = NULL,
81326 + .name = "dlmfs_file_read",
81327 + .param = PARAM3,
81328 +};
81329 +
81330 +struct size_overflow_hash _000168_hash = {
81331 + .next = NULL,
81332 + .name = "dlmfs_file_write",
81333 + .param = PARAM3,
81334 +};
81335 +
81336 +struct size_overflow_hash _000169_hash = {
81337 + .next = NULL,
81338 + .name = "dm_read",
81339 + .param = PARAM3,
81340 +};
81341 +
81342 +struct size_overflow_hash _000170_hash = {
81343 + .next = NULL,
81344 + .name = "dm_write",
81345 + .param = PARAM3,
81346 +};
81347 +
81348 +struct size_overflow_hash _000171_hash = {
81349 + .next = NULL,
81350 + .name = "__dn_setsockopt",
81351 + .param = PARAM5,
81352 +};
81353 +
81354 +struct size_overflow_hash _000172_hash = {
81355 + .next = NULL,
81356 + .name = "dns_query",
81357 + .param = PARAM3,
81358 +};
81359 +
81360 +struct size_overflow_hash _000173_hash = {
81361 + .next = NULL,
81362 + .name = "dns_resolver_instantiate",
81363 + .param = PARAM3,
81364 +};
81365 +
81366 +struct size_overflow_hash _000174_hash = {
81367 + .next = NULL,
81368 + .name = "do_add_counters",
81369 + .param = PARAM3,
81370 +};
81371 +
81372 +struct size_overflow_hash _000175_hash = {
81373 + .next = NULL,
81374 + .name = "__do_config_autodelink",
81375 + .param = PARAM3,
81376 +};
81377 +
81378 +struct size_overflow_hash _000176_hash = {
81379 + .next = NULL,
81380 + .name = "do_ip_setsockopt",
81381 + .param = PARAM5,
81382 +};
81383 +
81384 +struct size_overflow_hash _000177_hash = {
81385 + .next = NULL,
81386 + .name = "do_ipv6_setsockopt",
81387 + .param = PARAM5,
81388 +};
81389 +
81390 +struct size_overflow_hash _000178_hash = {
81391 + .next = NULL,
81392 + .name = "do_ip_vs_set_ctl",
81393 + .param = PARAM4,
81394 +};
81395 +
81396 +struct size_overflow_hash _000179_hash = {
81397 + .next = NULL,
81398 + .name = "do_kimage_alloc",
81399 + .param = PARAM3,
81400 +};
81401 +
81402 +struct size_overflow_hash _000180_hash = {
81403 + .next = NULL,
81404 + .name = "do_register_entry",
81405 + .param = PARAM4,
81406 +};
81407 +
81408 +struct size_overflow_hash _000181_hash = {
81409 + .next = NULL,
81410 + .name = "do_tty_write",
81411 + .param = PARAM5,
81412 +};
81413 +
81414 +struct size_overflow_hash _000182_hash = {
81415 + .next = NULL,
81416 + .name = "do_update_counters",
81417 + .param = PARAM4,
81418 +};
81419 +
81420 +struct size_overflow_hash _000183_hash = {
81421 + .next = NULL,
81422 + .name = "dsp_write",
81423 + .param = PARAM2,
81424 +};
81425 +
81426 +struct size_overflow_hash _000184_hash = {
81427 + .next = NULL,
81428 + .name = "dup_to_netobj",
81429 + .param = PARAM3,
81430 +};
81431 +
81432 +struct size_overflow_hash _000185_hash = {
81433 + .next = NULL,
81434 + .name = "dvb_aplay",
81435 + .param = PARAM3,
81436 +};
81437 +
81438 +struct size_overflow_hash _000186_hash = {
81439 + .next = NULL,
81440 + .name = "dvb_ca_en50221_io_write",
81441 + .param = PARAM3,
81442 +};
81443 +
81444 +struct size_overflow_hash _000187_hash = {
81445 + .next = NULL,
81446 + .name = "dvbdmx_write",
81447 + .param = PARAM3,
81448 +};
81449 +
81450 +struct size_overflow_hash _000188_hash = {
81451 + .next = NULL,
81452 + .name = "dvb_play",
81453 + .param = PARAM3,
81454 +};
81455 +
81456 +struct size_overflow_hash _000189_hash = {
81457 + .next = NULL,
81458 + .name = "dw210x_op_rw",
81459 + .param = PARAM6,
81460 +};
81461 +
81462 +struct size_overflow_hash _000190_hash = {
81463 + .next = NULL,
81464 + .name = "dwc3_link_state_write",
81465 + .param = PARAM3,
81466 +};
81467 +
81468 +struct size_overflow_hash _000191_hash = {
81469 + .next = NULL,
81470 + .name = "dwc3_mode_write",
81471 + .param = PARAM3,
81472 +};
81473 +
81474 +struct size_overflow_hash _000192_hash = {
81475 + .next = NULL,
81476 + .name = "dwc3_testmode_write",
81477 + .param = PARAM3,
81478 +};
81479 +
81480 +struct size_overflow_hash _000193_hash = {
81481 + .next = NULL,
81482 + .name = "ecryptfs_copy_filename",
81483 + .param = PARAM4,
81484 +};
81485 +
81486 +struct size_overflow_hash _000194_hash = {
81487 + .next = NULL,
81488 + .name = "ecryptfs_miscdev_write",
81489 + .param = PARAM3,
81490 +};
81491 +
81492 +struct size_overflow_hash _000195_hash = {
81493 + .next = NULL,
81494 + .name = "ecryptfs_send_miscdev",
81495 + .param = PARAM2,
81496 +};
81497 +
81498 +struct size_overflow_hash _000196_hash = {
81499 + .next = NULL,
81500 + .name = "efx_tsoh_heap_alloc",
81501 + .param = PARAM2,
81502 +};
81503 +
81504 +struct size_overflow_hash _000197_hash = {
81505 + .next = NULL,
81506 + .name = "emi26_writememory",
81507 + .param = PARAM4,
81508 +};
81509 +
81510 +struct size_overflow_hash _000198_hash = {
81511 + .next = NULL,
81512 + .name = "emi62_writememory",
81513 + .param = PARAM4,
81514 +};
81515 +
81516 +struct size_overflow_hash _000199_hash = {
81517 + .next = NULL,
81518 + .name = "encrypted_instantiate",
81519 + .param = PARAM3,
81520 +};
81521 +
81522 +struct size_overflow_hash _000200_hash = {
81523 + .next = NULL,
81524 + .name = "encrypted_update",
81525 + .param = PARAM3,
81526 +};
81527 +
81528 +struct size_overflow_hash _000201_hash = {
81529 + .next = NULL,
81530 + .name = "ep0_write",
81531 + .param = PARAM3,
81532 +};
81533 +
81534 +struct size_overflow_hash _000202_hash = {
81535 + .next = NULL,
81536 + .name = "ep_read",
81537 + .param = PARAM3,
81538 +};
81539 +
81540 +struct size_overflow_hash _000203_hash = {
81541 + .next = NULL,
81542 + .name = "ep_write",
81543 + .param = PARAM3,
81544 +};
81545 +
81546 +struct size_overflow_hash _000204_hash = {
81547 + .next = NULL,
81548 + .name = "erst_dbg_write",
81549 + .param = PARAM3,
81550 +};
81551 +
81552 +struct size_overflow_hash _000205_hash = {
81553 + .next = NULL,
81554 + .name = "esp_alloc_tmp",
81555 + .param = PARAM2,
81556 +};
81557 +
81558 +struct size_overflow_hash _000206_hash = {
81559 + .next = NULL,
81560 + .name = "exofs_read_lookup_dev_table",
81561 + .param = PARAM3,
81562 +};
81563 +
81564 +struct size_overflow_hash _000207_hash = {
81565 + .next = NULL,
81566 + .name = "ext4_kvmalloc",
81567 + .param = PARAM1,
81568 +};
81569 +
81570 +struct size_overflow_hash _000208_hash = {
81571 + .next = NULL,
81572 + .name = "ezusb_writememory",
81573 + .param = PARAM4,
81574 +};
81575 +
81576 +struct size_overflow_hash _000209_hash = {
81577 + .next = NULL,
81578 + .name = "fanotify_write",
81579 + .param = PARAM3,
81580 +};
81581 +
81582 +struct size_overflow_hash _000210_hash = {
81583 + .next = NULL,
81584 + .name = "fd_copyin",
81585 + .param = PARAM3,
81586 +};
81587 +
81588 +struct size_overflow_hash _000211_hash = {
81589 + .next = NULL,
81590 + .name = "ffs_epfile_io",
81591 + .param = PARAM3,
81592 +};
81593 +
81594 +struct size_overflow_hash _000212_hash = {
81595 + .next = NULL,
81596 + .name = "ffs_prepare_buffer",
81597 + .param = PARAM2,
81598 +};
81599 +
81600 +struct size_overflow_hash _000213_hash = {
81601 + .next = NULL,
81602 + .name = "f_hidg_write",
81603 + .param = PARAM3,
81604 +};
81605 +
81606 +struct size_overflow_hash _000214_hash = {
81607 + .next = NULL,
81608 + .name = "file_read_actor",
81609 + .param = PARAM4,
81610 +};
81611 +
81612 +struct size_overflow_hash _000215_hash = {
81613 + .next = NULL,
81614 + .name = "fill_write_buffer",
81615 + .param = PARAM3,
81616 +};
81617 +
81618 +struct size_overflow_hash _000216_hash = {
81619 + .next = NULL,
81620 + .name = "fl_create",
81621 + .param = PARAM5,
81622 +};
81623 +
81624 +struct size_overflow_hash _000217_hash = {
81625 + .next = NULL,
81626 + .name = "ftdi_elan_write",
81627 + .param = PARAM3,
81628 +};
81629 +
81630 +struct size_overflow_hash _000218_hash = {
81631 + .next = NULL,
81632 + .name = "fuse_conn_limit_write",
81633 + .param = PARAM3,
81634 +};
81635 +
81636 +struct size_overflow_hash _000219_hash = {
81637 + .next = NULL,
81638 + .name = "fw_iso_buffer_init",
81639 + .param = PARAM3,
81640 +};
81641 +
81642 +struct size_overflow_hash _000220_hash = {
81643 + .next = NULL,
81644 + .name = "garmin_write_bulk",
81645 + .param = PARAM3,
81646 +};
81647 +
81648 +struct size_overflow_hash _000221_hash = {
81649 + .next = NULL,
81650 + .name = "garp_attr_create",
81651 + .param = PARAM3,
81652 +};
81653 +
81654 +struct size_overflow_hash _000222_hash = {
81655 + .next = NULL,
81656 + .name = "get_arg",
81657 + .param = PARAM3,
81658 +};
81659 +
81660 +struct size_overflow_hash _000223_hash = {
81661 + .next = NULL,
81662 + .name = "getdqbuf",
81663 + .param = PARAM1,
81664 +};
81665 +
81666 +struct size_overflow_hash _000224_hash = {
81667 + .next = NULL,
81668 + .name = "get_fdb_entries",
81669 + .param = PARAM3,
81670 +};
81671 +
81672 +struct size_overflow_hash _000225_hash = {
81673 + .next = NULL,
81674 + .name = "get_indirect_ea",
81675 + .param = PARAM4,
81676 +};
81677 +
81678 +struct size_overflow_hash _000226_hash = {
81679 + .next = NULL,
81680 + .name = "get_registers",
81681 + .param = PARAM3,
81682 +};
81683 +
81684 +struct size_overflow_hash _000227_hash = {
81685 + .next = NULL,
81686 + .name = "get_scq",
81687 + .param = PARAM2,
81688 +};
81689 +
81690 +struct size_overflow_hash _000228_hash = {
81691 + .next = NULL,
81692 + .name = "get_server_iovec",
81693 + .param = PARAM2,
81694 +};
81695 +
81696 +struct size_overflow_hash _000229_hash = {
81697 + .next = NULL,
81698 + .name = "get_ucode_user",
81699 + .param = PARAM3,
81700 +};
81701 +
81702 +struct size_overflow_hash _000230_hash = {
81703 + .next = NULL,
81704 + .name = "get_user_cpu_mask",
81705 + .param = PARAM2,
81706 +};
81707 +
81708 +struct size_overflow_hash _000231_hash = {
81709 + .next = NULL,
81710 + .name = "gfs2_alloc_sort_buffer",
81711 + .param = PARAM1,
81712 +};
81713 +
81714 +struct size_overflow_hash _000232_hash = {
81715 + .next = NULL,
81716 + .name = "gfs2_glock_nq_m",
81717 + .param = PARAM1,
81718 +};
81719 +
81720 +struct size_overflow_hash _000233_hash = {
81721 + .next = NULL,
81722 + .name = "gigaset_initcs",
81723 + .param = PARAM2,
81724 +};
81725 +
81726 +struct size_overflow_hash _000234_hash = {
81727 + .next = NULL,
81728 + .name = "gigaset_initdriver",
81729 + .param = PARAM2,
81730 +};
81731 +
81732 +struct size_overflow_hash _000235_hash = {
81733 + .next = NULL,
81734 + .name = "gs_alloc_req",
81735 + .param = PARAM2,
81736 +};
81737 +
81738 +struct size_overflow_hash _000236_hash = {
81739 + .next = NULL,
81740 + .name = "gs_buf_alloc",
81741 + .param = PARAM2,
81742 +};
81743 +
81744 +struct size_overflow_hash _000237_hash = {
81745 + .next = NULL,
81746 + .name = "gsm_data_alloc",
81747 + .param = PARAM3,
81748 +};
81749 +
81750 +struct size_overflow_hash _000238_hash = {
81751 + .next = NULL,
81752 + .name = "gss_pipe_downcall",
81753 + .param = PARAM3,
81754 +};
81755 +
81756 +struct size_overflow_hash _000239_hash = {
81757 + .next = NULL,
81758 + .name = "handle_request",
81759 + .param = PARAM9,
81760 +};
81761 +
81762 +struct size_overflow_hash _000240_hash = {
81763 + .next = NULL,
81764 + .name = "hash_new",
81765 + .param = PARAM1,
81766 +};
81767 +
81768 +struct size_overflow_hash _000241_hash = {
81769 + .next = NULL,
81770 + .name = "hashtab_create",
81771 + .param = PARAM3,
81772 +};
81773 +
81774 +struct size_overflow_hash _000242_hash = {
81775 + .next = NULL,
81776 + .name = "hcd_buffer_alloc",
81777 + .param = PARAM2,
81778 +};
81779 +
81780 +struct size_overflow_hash _000243_hash = {
81781 + .next = NULL,
81782 + .name = "hci_sock_setsockopt",
81783 + .param = PARAM5,
81784 +};
81785 +
81786 +struct size_overflow_hash _000244_hash = {
81787 + .next = NULL,
81788 + .name = "heap_init",
81789 + .param = PARAM2,
81790 +};
81791 +
81792 +struct size_overflow_hash _000245_hash = {
81793 + .next = NULL,
81794 + .name = "hest_ghes_dev_register",
81795 + .param = PARAM1,
81796 +};
81797 +
81798 +struct size_overflow_hash _000246_hash = {
81799 + .next = NULL,
81800 + .name = "hidraw_get_report",
81801 + .param = PARAM3,
81802 +};
81803 +
81804 +struct size_overflow_hash _000247_hash = {
81805 + .next = NULL,
81806 + .name = "hidraw_report_event",
81807 + .param = PARAM3,
81808 +};
81809 +
81810 +struct size_overflow_hash _000248_hash = {
81811 + .next = NULL,
81812 + .name = "hidraw_send_report",
81813 + .param = PARAM3,
81814 +};
81815 +
81816 +struct size_overflow_hash _000249_hash = {
81817 + .next = NULL,
81818 + .name = "hpfs_translate_name",
81819 + .param = PARAM3,
81820 +};
81821 +
81822 +struct size_overflow_hash _000250_hash = {
81823 + .next = NULL,
81824 + .name = "hysdn_conf_write",
81825 + .param = PARAM3,
81826 +};
81827 +
81828 +struct size_overflow_hash _000251_hash = {
81829 + .next = NULL,
81830 + .name = "hysdn_log_write",
81831 + .param = PARAM3,
81832 +};
81833 +
81834 +struct size_overflow_hash _000252_hash = {
81835 + .next = NULL,
81836 + .name = "__i2400mu_send_barker",
81837 + .param = PARAM3,
81838 +};
81839 +
81840 +struct size_overflow_hash _000253_hash = {
81841 + .next = NULL,
81842 + .name = "i2cdev_read",
81843 + .param = PARAM3,
81844 +};
81845 +
81846 +struct size_overflow_hash _000254_hash = {
81847 + .next = NULL,
81848 + .name = "i2cdev_write",
81849 + .param = PARAM3,
81850 +};
81851 +
81852 +struct size_overflow_hash _000255_hash = {
81853 + .next = NULL,
81854 + .name = "i2o_parm_field_get",
81855 + .param = PARAM5,
81856 +};
81857 +
81858 +struct size_overflow_hash _000256_hash = {
81859 + .next = NULL,
81860 + .name = "i2o_parm_table_get",
81861 + .param = PARAM6,
81862 +};
81863 +
81864 +struct size_overflow_hash _000257_hash = {
81865 + .next = NULL,
81866 + .name = "ib_copy_from_udata",
81867 + .param = PARAM3,
81868 +};
81869 +
81870 +struct size_overflow_hash _000258_hash = {
81871 + .next = NULL,
81872 + .name = "ib_ucm_alloc_data",
81873 + .param = PARAM3,
81874 +};
81875 +
81876 +struct size_overflow_hash _000259_hash = {
81877 + .next = NULL,
81878 + .name = "ib_umad_write",
81879 + .param = PARAM3,
81880 +};
81881 +
81882 +struct size_overflow_hash _000260_hash = {
81883 + .next = NULL,
81884 + .name = "ib_uverbs_unmarshall_recv",
81885 + .param = PARAM5,
81886 +};
81887 +
81888 +struct size_overflow_hash _000261_hash = {
81889 + .next = NULL,
81890 + .name = "icn_writecmd",
81891 + .param = PARAM2,
81892 +};
81893 +
81894 +struct size_overflow_hash _000262_hash = {
81895 + .next = NULL,
81896 + .name = "ide_driver_proc_write",
81897 + .param = PARAM3,
81898 +};
81899 +
81900 +struct size_overflow_hash _000263_hash = {
81901 + .next = NULL,
81902 + .name = "ide_settings_proc_write",
81903 + .param = PARAM3,
81904 +};
81905 +
81906 +struct size_overflow_hash _000264_hash = {
81907 + .next = NULL,
81908 + .name = "idetape_chrdev_write",
81909 + .param = PARAM3,
81910 +};
81911 +
81912 +struct size_overflow_hash _000265_hash = {
81913 + .next = NULL,
81914 + .name = "idmap_pipe_downcall",
81915 + .param = PARAM3,
81916 +};
81917 +
81918 +struct size_overflow_hash _000266_hash = {
81919 + .next = NULL,
81920 + .name = "ieee80211_build_probe_req",
81921 + .param = PARAM7,
81922 +};
81923 +
81924 +struct size_overflow_hash _000267_hash = {
81925 + .next = NULL,
81926 + .name = "ieee80211_if_write",
81927 + .param = PARAM3,
81928 +};
81929 +
81930 +struct size_overflow_hash _000268_hash = {
81931 + .next = NULL,
81932 + .name = "if_write",
81933 + .param = PARAM3,
81934 +};
81935 +
81936 +struct size_overflow_hash _000269_hash = {
81937 + .next = NULL,
81938 + .name = "ilo_write",
81939 + .param = PARAM3,
81940 +};
81941 +
81942 +struct size_overflow_hash _000270_hash = {
81943 + .next = NULL,
81944 + .name = "ima_write_policy",
81945 + .param = PARAM3,
81946 +};
81947 +
81948 +struct size_overflow_hash _000271_hash = {
81949 + .next = NULL,
81950 + .name = "init_data_container",
81951 + .param = PARAM1,
81952 +};
81953 +
81954 +struct size_overflow_hash _000272_hash = {
81955 + .next = NULL,
81956 + .name = "init_send_hfcd",
81957 + .param = PARAM1,
81958 +};
81959 +
81960 +struct size_overflow_hash _000273_hash = {
81961 + .next = NULL,
81962 + .name = "insert_dent",
81963 + .param = PARAM7,
81964 +};
81965 +
81966 +struct size_overflow_hash _000274_hash = {
81967 + .next = NULL,
81968 + .name = "interpret_user_input",
81969 + .param = PARAM2,
81970 +};
81971 +
81972 +struct size_overflow_hash _000275_hash = {
81973 + .next = NULL,
81974 + .name = "int_proc_write",
81975 + .param = PARAM3,
81976 +};
81977 +
81978 +struct size_overflow_hash _000276_hash = {
81979 + .next = NULL,
81980 + .name = "ioctl_private_iw_point",
81981 + .param = PARAM7,
81982 +};
81983 +
81984 +struct size_overflow_hash _000277_hash = {
81985 + .next = NULL,
81986 + .name = "iov_iter_copy_from_user",
81987 + .param = PARAM4,
81988 +};
81989 +
81990 +struct size_overflow_hash _000278_hash = {
81991 + .next = NULL,
81992 + .name = "iov_iter_copy_from_user_atomic",
81993 + .param = PARAM4,
81994 +};
81995 +
81996 +struct size_overflow_hash _000279_hash = {
81997 + .next = NULL,
81998 + .name = "iowarrior_write",
81999 + .param = PARAM3,
82000 +};
82001 +
82002 +struct size_overflow_hash _000280_hash = {
82003 + .next = NULL,
82004 + .name = "ipc_alloc",
82005 + .param = PARAM1,
82006 +};
82007 +
82008 +struct size_overflow_hash _000281_hash = {
82009 + .next = NULL,
82010 + .name = "ipc_rcu_alloc",
82011 + .param = PARAM1,
82012 +};
82013 +
82014 +struct size_overflow_hash _000282_hash = {
82015 + .next = NULL,
82016 + .name = "ip_options_get_from_user",
82017 + .param = PARAM4,
82018 +};
82019 +
82020 +struct size_overflow_hash _000283_hash = {
82021 + .next = NULL,
82022 + .name = "ipv6_renew_option",
82023 + .param = PARAM3,
82024 +};
82025 +
82026 +struct size_overflow_hash _000284_hash = {
82027 + .next = NULL,
82028 + .name = "ip_vs_conn_fill_param_sync",
82029 + .param = PARAM6,
82030 +};
82031 +
82032 +struct size_overflow_hash _000285_hash = {
82033 + .next = NULL,
82034 + .name = "ip_vs_create_timeout_table",
82035 + .param = PARAM2,
82036 +};
82037 +
82038 +struct size_overflow_hash _000286_hash = {
82039 + .next = NULL,
82040 + .name = "ipw_queue_tx_init",
82041 + .param = PARAM3,
82042 +};
82043 +
82044 +struct size_overflow_hash _000287_hash = {
82045 + .next = NULL,
82046 + .name = "irda_setsockopt",
82047 + .param = PARAM5,
82048 +};
82049 +
82050 +struct size_overflow_hash _000288_hash = {
82051 + .next = NULL,
82052 + .name = "irias_new_octseq_value",
82053 + .param = PARAM2,
82054 +};
82055 +
82056 +struct size_overflow_hash _000289_hash = {
82057 + .next = NULL,
82058 + .name = "ir_lirc_transmit_ir",
82059 + .param = PARAM3,
82060 +};
82061 +
82062 +struct size_overflow_hash _000290_hash = {
82063 + .next = NULL,
82064 + .name = "irnet_ctrl_write",
82065 + .param = PARAM3,
82066 +};
82067 +
82068 +struct size_overflow_hash _000291_hash = {
82069 + .next = NULL,
82070 + .name = "isdn_add_channels",
82071 + .param = PARAM3,
82072 +};
82073 +
82074 +struct size_overflow_hash _000292_hash = {
82075 + .next = NULL,
82076 + .name = "isdn_ppp_fill_rq",
82077 + .param = PARAM2,
82078 +};
82079 +
82080 +struct size_overflow_hash _000293_hash = {
82081 + .next = NULL,
82082 + .name = "isdn_ppp_write",
82083 + .param = PARAM4,
82084 +};
82085 +
82086 +struct size_overflow_hash _000294_hash = {
82087 + .next = NULL,
82088 + .name = "isdn_read",
82089 + .param = PARAM3,
82090 +};
82091 +
82092 +struct size_overflow_hash _000295_hash = {
82093 + .next = NULL,
82094 + .name = "isdn_v110_open",
82095 + .param = PARAM3,
82096 +};
82097 +
82098 +struct size_overflow_hash _000296_hash = {
82099 + .next = NULL,
82100 + .name = "isdn_writebuf_stub",
82101 + .param = PARAM4,
82102 +};
82103 +
82104 +struct size_overflow_hash _000297_hash = {
82105 + .next = NULL,
82106 + .name = "islpci_mgt_transmit",
82107 + .param = PARAM5,
82108 +};
82109 +
82110 +struct size_overflow_hash _000298_hash = {
82111 + .next = NULL,
82112 + .name = "iso_callback",
82113 + .param = PARAM3,
82114 +};
82115 +
82116 +struct size_overflow_hash _000299_hash = {
82117 + .next = NULL,
82118 + .name = "iso_packets_buffer_init",
82119 + .param = PARAM3,
82120 +};
82121 +
82122 +struct size_overflow_hash _000300_hash = {
82123 + .next = NULL,
82124 + .name = "it821x_firmware_command",
82125 + .param = PARAM3,
82126 +};
82127 +
82128 +struct size_overflow_hash _000301_hash = {
82129 + .next = NULL,
82130 + .name = "ivtv_buf_copy_from_user",
82131 + .param = PARAM4,
82132 +};
82133 +
82134 +struct size_overflow_hash _000302_hash = {
82135 + .next = NULL,
82136 + .name = "iwch_alloc_fastreg_pbl",
82137 + .param = PARAM2,
82138 +};
82139 +
82140 +struct size_overflow_hash _000303_hash = {
82141 + .next = NULL,
82142 + .name = "iwl_calib_set",
82143 + .param = PARAM3,
82144 +};
82145 +
82146 +struct size_overflow_hash _000304_hash = {
82147 + .next = NULL,
82148 + .name = "jbd2_journal_init_revoke_table",
82149 + .param = PARAM1,
82150 +};
82151 +
82152 +struct size_overflow_hash _000305_hash = {
82153 + .next = NULL,
82154 + .name = "jffs2_alloc_full_dirent",
82155 + .param = PARAM1,
82156 +};
82157 +
82158 +struct size_overflow_hash _000306_hash = {
82159 + .next = NULL,
82160 + .name = "journal_init_revoke_table",
82161 + .param = PARAM1,
82162 +};
82163 +
82164 +struct size_overflow_hash _000307_hash = {
82165 + .next = NULL,
82166 + .name = "kcalloc",
82167 + .param = PARAM1|PARAM2,
82168 +};
82169 +
82170 +struct size_overflow_hash _000309_hash = {
82171 + .next = NULL,
82172 + .name = "keyctl_instantiate_key_common",
82173 + .param = PARAM4,
82174 +};
82175 +
82176 +struct size_overflow_hash _000310_hash = {
82177 + .next = NULL,
82178 + .name = "keyctl_update_key",
82179 + .param = PARAM3,
82180 +};
82181 +
82182 +struct size_overflow_hash _000311_hash = {
82183 + .next = NULL,
82184 + .name = "__kfifo_alloc",
82185 + .param = PARAM2|PARAM3,
82186 +};
82187 +
82188 +struct size_overflow_hash _000313_hash = {
82189 + .next = NULL,
82190 + .name = "kfifo_copy_from_user",
82191 + .param = PARAM3,
82192 +};
82193 +
82194 +struct size_overflow_hash _000314_hash = {
82195 + .next = NULL,
82196 + .name = "kmalloc_node",
82197 + .param = PARAM1,
82198 +};
82199 +
82200 +struct size_overflow_hash _000315_hash = {
82201 + .next = NULL,
82202 + .name = "kmalloc_parameter",
82203 + .param = PARAM1,
82204 +};
82205 +
82206 +struct size_overflow_hash _000316_hash = {
82207 + .next = NULL,
82208 + .name = "kmem_alloc",
82209 + .param = PARAM1,
82210 +};
82211 +
82212 +struct size_overflow_hash _000317_hash = {
82213 + .next = NULL,
82214 + .name = "kobj_map",
82215 + .param = PARAM2|PARAM3,
82216 +};
82217 +
82218 +struct size_overflow_hash _000319_hash = {
82219 + .next = NULL,
82220 + .name = "kone_receive",
82221 + .param = PARAM4,
82222 +};
82223 +
82224 +struct size_overflow_hash _000320_hash = {
82225 + .next = NULL,
82226 + .name = "kone_send",
82227 + .param = PARAM4,
82228 +};
82229 +
82230 +struct size_overflow_hash _000321_hash = {
82231 + .next = NULL,
82232 + .name = "krealloc",
82233 + .param = PARAM2,
82234 +};
82235 +
82236 +struct size_overflow_hash _000322_hash = {
82237 + .next = NULL,
82238 + .name = "kvmalloc",
82239 + .param = PARAM1,
82240 +};
82241 +
82242 +struct size_overflow_hash _000323_hash = {
82243 + .next = NULL,
82244 + .name = "kvm_read_guest_atomic",
82245 + .param = PARAM4,
82246 +};
82247 +
82248 +struct size_overflow_hash _000324_hash = {
82249 + .next = NULL,
82250 + .name = "kvm_read_guest_cached",
82251 + .param = PARAM4,
82252 +};
82253 +
82254 +struct size_overflow_hash _000325_hash = {
82255 + .next = NULL,
82256 + .name = "kvm_read_guest_page",
82257 + .param = PARAM5,
82258 +};
82259 +
82260 +struct size_overflow_hash _000326_hash = {
82261 + .next = NULL,
82262 + .name = "kzalloc",
82263 + .param = PARAM1,
82264 +};
82265 +
82266 +struct size_overflow_hash _000327_hash = {
82267 + .next = NULL,
82268 + .name = "l2cap_sock_setsockopt",
82269 + .param = PARAM5,
82270 +};
82271 +
82272 +struct size_overflow_hash _000328_hash = {
82273 + .next = NULL,
82274 + .name = "l2cap_sock_setsockopt_old",
82275 + .param = PARAM4,
82276 +};
82277 +
82278 +struct size_overflow_hash _000329_hash = {
82279 + .next = NULL,
82280 + .name = "lane2_associate_req",
82281 + .param = PARAM4,
82282 +};
82283 +
82284 +struct size_overflow_hash _000330_hash = {
82285 + .next = NULL,
82286 + .name = "lbs_debugfs_write",
82287 + .param = PARAM3,
82288 +};
82289 +
82290 +struct size_overflow_hash _000331_hash = {
82291 + .next = &_000014_hash,
82292 + .name = "lcd_write",
82293 + .param = PARAM3,
82294 +};
82295 +
82296 +struct size_overflow_hash _000332_hash = {
82297 + .next = NULL,
82298 + .name = "ldm_frag_add",
82299 + .param = PARAM2,
82300 +};
82301 +
82302 +struct size_overflow_hash _000333_hash = {
82303 + .next = NULL,
82304 + .name = "__lgread",
82305 + .param = PARAM4,
82306 +};
82307 +
82308 +struct size_overflow_hash _000334_hash = {
82309 + .next = NULL,
82310 + .name = "libipw_alloc_txb",
82311 + .param = PARAM1,
82312 +};
82313 +
82314 +struct size_overflow_hash _000335_hash = {
82315 + .next = NULL,
82316 + .name = "link_send_sections_long",
82317 + .param = PARAM4,
82318 +};
82319 +
82320 +struct size_overflow_hash _000336_hash = {
82321 + .next = NULL,
82322 + .name = "listxattr",
82323 + .param = PARAM3,
82324 +};
82325 +
82326 +struct size_overflow_hash _000337_hash = {
82327 + .next = NULL,
82328 + .name = "LoadBitmap",
82329 + .param = PARAM2,
82330 +};
82331 +
82332 +struct size_overflow_hash _000338_hash = {
82333 + .next = NULL,
82334 + .name = "load_msg",
82335 + .param = PARAM2,
82336 +};
82337 +
82338 +struct size_overflow_hash _000339_hash = {
82339 + .next = NULL,
82340 + .name = "lpfc_debugfs_dif_err_write",
82341 + .param = PARAM3,
82342 +};
82343 +
82344 +struct size_overflow_hash _000340_hash = {
82345 + .next = NULL,
82346 + .name = "lp_write",
82347 + .param = PARAM3,
82348 +};
82349 +
82350 +struct size_overflow_hash _000341_hash = {
82351 + .next = NULL,
82352 + .name = "mb_cache_create",
82353 + .param = PARAM2,
82354 +};
82355 +
82356 +struct size_overflow_hash _000342_hash = {
82357 + .next = NULL,
82358 + .name = "mce_write",
82359 + .param = PARAM3,
82360 +};
82361 +
82362 +struct size_overflow_hash _000343_hash = {
82363 + .next = NULL,
82364 + .name = "mcs7830_get_reg",
82365 + .param = PARAM3,
82366 +};
82367 +
82368 +struct size_overflow_hash _000344_hash = {
82369 + .next = NULL,
82370 + .name = "mcs7830_set_reg",
82371 + .param = PARAM3,
82372 +};
82373 +
82374 +struct size_overflow_hash _000345_hash = {
82375 + .next = NULL,
82376 + .name = "memcpy_fromiovec",
82377 + .param = PARAM3,
82378 +};
82379 +
82380 +struct size_overflow_hash _000346_hash = {
82381 + .next = NULL,
82382 + .name = "memcpy_fromiovecend",
82383 + .param = PARAM3|PARAM4,
82384 +};
82385 +
82386 +struct size_overflow_hash _000348_hash = {
82387 + .next = NULL,
82388 + .name = "mempool_kmalloc",
82389 + .param = PARAM2,
82390 +};
82391 +
82392 +struct size_overflow_hash _000349_hash = {
82393 + .next = NULL,
82394 + .name = "mempool_resize",
82395 + .param = PARAM2,
82396 +};
82397 +
82398 +struct size_overflow_hash _000350_hash = {
82399 + .next = NULL,
82400 + .name = "mem_rw",
82401 + .param = PARAM3,
82402 +};
82403 +
82404 +struct size_overflow_hash _000351_hash = {
82405 + .next = NULL,
82406 + .name = "mgmt_control",
82407 + .param = PARAM3,
82408 +};
82409 +
82410 +struct size_overflow_hash _000352_hash = {
82411 + .next = NULL,
82412 + .name = "mgmt_pending_add",
82413 + .param = PARAM5,
82414 +};
82415 +
82416 +struct size_overflow_hash _000353_hash = {
82417 + .next = NULL,
82418 + .name = "mlx4_ib_alloc_fast_reg_page_list",
82419 + .param = PARAM2,
82420 +};
82421 +
82422 +struct size_overflow_hash _000354_hash = {
82423 + .next = NULL,
82424 + .name = "mmc_alloc_sg",
82425 + .param = PARAM1,
82426 +};
82427 +
82428 +struct size_overflow_hash _000355_hash = {
82429 + .next = NULL,
82430 + .name = "mmc_send_bus_test",
82431 + .param = PARAM4,
82432 +};
82433 +
82434 +struct size_overflow_hash _000356_hash = {
82435 + .next = NULL,
82436 + .name = "mmc_send_cxd_data",
82437 + .param = PARAM5,
82438 +};
82439 +
82440 +struct size_overflow_hash _000357_hash = {
82441 + .next = NULL,
82442 + .name = "module_alloc_update_bounds",
82443 + .param = PARAM1,
82444 +};
82445 +
82446 +struct size_overflow_hash _000358_hash = {
82447 + .next = NULL,
82448 + .name = "move_addr_to_kernel",
82449 + .param = PARAM2,
82450 +};
82451 +
82452 +struct size_overflow_hash _000359_hash = {
82453 + .next = NULL,
82454 + .name = "mpi_alloc_limb_space",
82455 + .param = PARAM1,
82456 +};
82457 +
82458 +struct size_overflow_hash _000360_hash = {
82459 + .next = NULL,
82460 + .name = "mpi_resize",
82461 + .param = PARAM2,
82462 +};
82463 +
82464 +struct size_overflow_hash _000361_hash = {
82465 + .next = NULL,
82466 + .name = "mptctl_getiocinfo",
82467 + .param = PARAM2,
82468 +};
82469 +
82470 +struct size_overflow_hash _000362_hash = {
82471 + .next = NULL,
82472 + .name = "mtdchar_readoob",
82473 + .param = PARAM4,
82474 +};
82475 +
82476 +struct size_overflow_hash _000363_hash = {
82477 + .next = NULL,
82478 + .name = "mtdchar_write",
82479 + .param = PARAM3,
82480 +};
82481 +
82482 +struct size_overflow_hash _000364_hash = {
82483 + .next = NULL,
82484 + .name = "mtdchar_writeoob",
82485 + .param = PARAM4,
82486 +};
82487 +
82488 +struct size_overflow_hash _000365_hash = {
82489 + .next = NULL,
82490 + .name = "mtd_device_parse_register",
82491 + .param = PARAM5,
82492 +};
82493 +
82494 +struct size_overflow_hash _000366_hash = {
82495 + .next = NULL,
82496 + .name = "mtf_test_write",
82497 + .param = PARAM3,
82498 +};
82499 +
82500 +struct size_overflow_hash _000367_hash = {
82501 + .next = NULL,
82502 + .name = "mtrr_write",
82503 + .param = PARAM3,
82504 +};
82505 +
82506 +struct size_overflow_hash _000368_hash = {
82507 + .next = NULL,
82508 + .name = "musb_test_mode_write",
82509 + .param = PARAM3,
82510 +};
82511 +
82512 +struct size_overflow_hash _000369_hash = {
82513 + .next = NULL,
82514 + .name = "mwifiex_get_common_rates",
82515 + .param = PARAM3,
82516 +};
82517 +
82518 +struct size_overflow_hash _000370_hash = {
82519 + .next = NULL,
82520 + .name = "mwifiex_update_curr_bss_params",
82521 + .param = PARAM5,
82522 +};
82523 +
82524 +struct size_overflow_hash _000371_hash = {
82525 + .next = NULL,
82526 + .name = "nand_bch_init",
82527 + .param = PARAM2|PARAM3,
82528 +};
82529 +
82530 +struct size_overflow_hash _000373_hash = {
82531 + .next = NULL,
82532 + .name = "ncp_file_write",
82533 + .param = PARAM3,
82534 +};
82535 +
82536 +struct size_overflow_hash _000374_hash = {
82537 + .next = NULL,
82538 + .name = "ncp__vol2io",
82539 + .param = PARAM5,
82540 +};
82541 +
82542 +struct size_overflow_hash _000375_hash = {
82543 + .next = NULL,
82544 + .name = "nes_alloc_fast_reg_page_list",
82545 + .param = PARAM2,
82546 +};
82547 +
82548 +struct size_overflow_hash _000376_hash = {
82549 + .next = NULL,
82550 + .name = "nfc_targets_found",
82551 + .param = PARAM3,
82552 +};
82553 +
82554 +struct size_overflow_hash _000377_hash = {
82555 + .next = NULL,
82556 + .name = "nfs4_acl_new",
82557 + .param = PARAM1,
82558 +};
82559 +
82560 +struct size_overflow_hash _000378_hash = {
82561 + .next = NULL,
82562 + .name = "nfs4_write_cached_acl",
82563 + .param = PARAM4,
82564 +};
82565 +
82566 +struct size_overflow_hash _000379_hash = {
82567 + .next = NULL,
82568 + .name = "nfsd_cache_update",
82569 + .param = PARAM3,
82570 +};
82571 +
82572 +struct size_overflow_hash _000380_hash = {
82573 + .next = NULL,
82574 + .name = "nfsd_symlink",
82575 + .param = PARAM6,
82576 +};
82577 +
82578 +struct size_overflow_hash _000381_hash = {
82579 + .next = NULL,
82580 + .name = "nfs_idmap_get_desc",
82581 + .param = PARAM2|PARAM4,
82582 +};
82583 +
82584 +struct size_overflow_hash _000383_hash = {
82585 + .next = NULL,
82586 + .name = "nfs_readdir_make_qstr",
82587 + .param = PARAM3,
82588 +};
82589 +
82590 +struct size_overflow_hash _000384_hash = {
82591 + .next = NULL,
82592 + .name = "note_last_dentry",
82593 + .param = PARAM3,
82594 +};
82595 +
82596 +struct size_overflow_hash _000385_hash = {
82597 + .next = NULL,
82598 + .name = "ntfs_copy_from_user",
82599 + .param = PARAM3|PARAM5,
82600 +};
82601 +
82602 +struct size_overflow_hash _000387_hash = {
82603 + .next = NULL,
82604 + .name = "__ntfs_copy_from_user_iovec_inatomic",
82605 + .param = PARAM3|PARAM4,
82606 +};
82607 +
82608 +struct size_overflow_hash _000389_hash = {
82609 + .next = NULL,
82610 + .name = "ntfs_ucstonls",
82611 + .param = PARAM3,
82612 +};
82613 +
82614 +struct size_overflow_hash _000390_hash = {
82615 + .next = NULL,
82616 + .name = "nvme_alloc_iod",
82617 + .param = PARAM1,
82618 +};
82619 +
82620 +struct size_overflow_hash _000391_hash = {
82621 + .next = NULL,
82622 + .name = "nvram_write",
82623 + .param = PARAM3,
82624 +};
82625 +
82626 +struct size_overflow_hash _000392_hash = {
82627 + .next = NULL,
82628 + .name = "o2hb_debug_create",
82629 + .param = PARAM4,
82630 +};
82631 +
82632 +struct size_overflow_hash _000393_hash = {
82633 + .next = NULL,
82634 + .name = "o2net_send_message_vec",
82635 + .param = PARAM4,
82636 +};
82637 +
82638 +struct size_overflow_hash _000394_hash = {
82639 + .next = NULL,
82640 + .name = "ocfs2_control_cfu",
82641 + .param = PARAM2,
82642 +};
82643 +
82644 +struct size_overflow_hash _000395_hash = {
82645 + .next = NULL,
82646 + .name = "oom_adjust_write",
82647 + .param = PARAM3,
82648 +};
82649 +
82650 +struct size_overflow_hash _000396_hash = {
82651 + .next = NULL,
82652 + .name = "oom_score_adj_write",
82653 + .param = PARAM3,
82654 +};
82655 +
82656 +struct size_overflow_hash _000397_hash = {
82657 + .next = NULL,
82658 + .name = "opera1_xilinx_rw",
82659 + .param = PARAM5,
82660 +};
82661 +
82662 +struct size_overflow_hash _000398_hash = {
82663 + .next = NULL,
82664 + .name = "oprofilefs_ulong_from_user",
82665 + .param = PARAM3,
82666 +};
82667 +
82668 +struct size_overflow_hash _000399_hash = {
82669 + .next = NULL,
82670 + .name = "opticon_write",
82671 + .param = PARAM4,
82672 +};
82673 +
82674 +struct size_overflow_hash _000400_hash = {
82675 + .next = NULL,
82676 + .name = "orig_node_add_if",
82677 + .param = PARAM2,
82678 +};
82679 +
82680 +struct size_overflow_hash _000401_hash = {
82681 + .next = NULL,
82682 + .name = "orig_node_del_if",
82683 + .param = PARAM2,
82684 +};
82685 +
82686 +struct size_overflow_hash _000402_hash = {
82687 + .next = NULL,
82688 + .name = "p9_check_zc_errors",
82689 + .param = PARAM4,
82690 +};
82691 +
82692 +struct size_overflow_hash _000403_hash = {
82693 + .next = NULL,
82694 + .name = "packet_buffer_init",
82695 + .param = PARAM2,
82696 +};
82697 +
82698 +struct size_overflow_hash _000404_hash = {
82699 + .next = NULL,
82700 + .name = "packet_setsockopt",
82701 + .param = PARAM5,
82702 +};
82703 +
82704 +struct size_overflow_hash _000405_hash = {
82705 + .next = NULL,
82706 + .name = "parse_command",
82707 + .param = PARAM2,
82708 +};
82709 +
82710 +struct size_overflow_hash _000406_hash = {
82711 + .next = NULL,
82712 + .name = "pcbit_writecmd",
82713 + .param = PARAM2,
82714 +};
82715 +
82716 +struct size_overflow_hash _000407_hash = {
82717 + .next = NULL,
82718 + .name = "pcmcia_replace_cis",
82719 + .param = PARAM3,
82720 +};
82721 +
82722 +struct size_overflow_hash _000408_hash = {
82723 + .next = NULL,
82724 + .name = "pgctrl_write",
82725 + .param = PARAM3,
82726 +};
82727 +
82728 +struct size_overflow_hash _000409_hash = {
82729 + .next = NULL,
82730 + .name = "pg_write",
82731 + .param = PARAM3,
82732 +};
82733 +
82734 +struct size_overflow_hash _000410_hash = {
82735 + .next = NULL,
82736 + .name = "pidlist_allocate",
82737 + .param = PARAM1,
82738 +};
82739 +
82740 +struct size_overflow_hash _000411_hash = {
82741 + .next = NULL,
82742 + .name = "pipe_iov_copy_from_user",
82743 + .param = PARAM3,
82744 +};
82745 +
82746 +struct size_overflow_hash _000412_hash = {
82747 + .next = NULL,
82748 + .name = "pipe_iov_copy_to_user",
82749 + .param = PARAM3,
82750 +};
82751 +
82752 +struct size_overflow_hash _000413_hash = {
82753 + .next = NULL,
82754 + .name = "pkt_add",
82755 + .param = PARAM3,
82756 +};
82757 +
82758 +struct size_overflow_hash _000414_hash = {
82759 + .next = NULL,
82760 + .name = "pktgen_if_write",
82761 + .param = PARAM3,
82762 +};
82763 +
82764 +struct size_overflow_hash _000415_hash = {
82765 + .next = NULL,
82766 + .name = "platform_device_add_data",
82767 + .param = PARAM3,
82768 +};
82769 +
82770 +struct size_overflow_hash _000416_hash = {
82771 + .next = NULL,
82772 + .name = "platform_device_add_resources",
82773 + .param = PARAM3,
82774 +};
82775 +
82776 +struct size_overflow_hash _000417_hash = {
82777 + .next = NULL,
82778 + .name = "pm_qos_power_write",
82779 + .param = PARAM3,
82780 +};
82781 +
82782 +struct size_overflow_hash _000418_hash = {
82783 + .next = NULL,
82784 + .name = "pnpbios_proc_write",
82785 + .param = PARAM3,
82786 +};
82787 +
82788 +struct size_overflow_hash _000419_hash = {
82789 + .next = NULL,
82790 + .name = "pool_allocate",
82791 + .param = PARAM3,
82792 +};
82793 +
82794 +struct size_overflow_hash _000420_hash = {
82795 + .next = NULL,
82796 + .name = "posix_acl_alloc",
82797 + .param = PARAM1,
82798 +};
82799 +
82800 +struct size_overflow_hash _000421_hash = {
82801 + .next = NULL,
82802 + .name = "ppp_cp_parse_cr",
82803 + .param = PARAM4,
82804 +};
82805 +
82806 +struct size_overflow_hash _000422_hash = {
82807 + .next = NULL,
82808 + .name = "ppp_write",
82809 + .param = PARAM3,
82810 +};
82811 +
82812 +struct size_overflow_hash _000423_hash = {
82813 + .next = NULL,
82814 + .name = "pp_read",
82815 + .param = PARAM3,
82816 +};
82817 +
82818 +struct size_overflow_hash _000424_hash = {
82819 + .next = NULL,
82820 + .name = "pp_write",
82821 + .param = PARAM3,
82822 +};
82823 +
82824 +struct size_overflow_hash _000425_hash = {
82825 + .next = NULL,
82826 + .name = "printer_req_alloc",
82827 + .param = PARAM2,
82828 +};
82829 +
82830 +struct size_overflow_hash _000426_hash = {
82831 + .next = NULL,
82832 + .name = "printer_write",
82833 + .param = PARAM3,
82834 +};
82835 +
82836 +struct size_overflow_hash _000427_hash = {
82837 + .next = NULL,
82838 + .name = "prism2_set_genericelement",
82839 + .param = PARAM3,
82840 +};
82841 +
82842 +struct size_overflow_hash _000428_hash = {
82843 + .next = NULL,
82844 + .name = "__probe_kernel_read",
82845 + .param = PARAM3,
82846 +};
82847 +
82848 +struct size_overflow_hash _000429_hash = {
82849 + .next = NULL,
82850 + .name = "__probe_kernel_write",
82851 + .param = PARAM3,
82852 +};
82853 +
82854 +struct size_overflow_hash _000430_hash = {
82855 + .next = NULL,
82856 + .name = "proc_coredump_filter_write",
82857 + .param = PARAM3,
82858 +};
82859 +
82860 +struct size_overflow_hash _000431_hash = {
82861 + .next = NULL,
82862 + .name = "_proc_do_string",
82863 + .param = PARAM2,
82864 +};
82865 +
82866 +struct size_overflow_hash _000432_hash = {
82867 + .next = NULL,
82868 + .name = "process_vm_rw_pages",
82869 + .param = PARAM5|PARAM6,
82870 +};
82871 +
82872 +struct size_overflow_hash _000434_hash = {
82873 + .next = NULL,
82874 + .name = "proc_loginuid_write",
82875 + .param = PARAM3,
82876 +};
82877 +
82878 +struct size_overflow_hash _000435_hash = {
82879 + .next = NULL,
82880 + .name = "proc_pid_attr_write",
82881 + .param = PARAM3,
82882 +};
82883 +
82884 +struct size_overflow_hash _000436_hash = {
82885 + .next = NULL,
82886 + .name = "proc_scsi_devinfo_write",
82887 + .param = PARAM3,
82888 +};
82889 +
82890 +struct size_overflow_hash _000437_hash = {
82891 + .next = NULL,
82892 + .name = "proc_scsi_write",
82893 + .param = PARAM3,
82894 +};
82895 +
82896 +struct size_overflow_hash _000438_hash = {
82897 + .next = NULL,
82898 + .name = "proc_scsi_write_proc",
82899 + .param = PARAM3,
82900 +};
82901 +
82902 +struct size_overflow_hash _000439_hash = {
82903 + .next = NULL,
82904 + .name = "pstore_mkfile",
82905 + .param = PARAM5,
82906 +};
82907 +
82908 +struct size_overflow_hash _000440_hash = {
82909 + .next = NULL,
82910 + .name = "pti_char_write",
82911 + .param = PARAM3,
82912 +};
82913 +
82914 +struct size_overflow_hash _000441_hash = {
82915 + .next = NULL,
82916 + .name = "ptrace_writedata",
82917 + .param = PARAM4,
82918 +};
82919 +
82920 +struct size_overflow_hash _000442_hash = {
82921 + .next = NULL,
82922 + .name = "pt_write",
82923 + .param = PARAM3,
82924 +};
82925 +
82926 +struct size_overflow_hash _000443_hash = {
82927 + .next = NULL,
82928 + .name = "pvr2_ioread_set_sync_key",
82929 + .param = PARAM3,
82930 +};
82931 +
82932 +struct size_overflow_hash _000444_hash = {
82933 + .next = NULL,
82934 + .name = "pvr2_stream_buffer_count",
82935 + .param = PARAM2,
82936 +};
82937 +
82938 +struct size_overflow_hash _000445_hash = {
82939 + .next = NULL,
82940 + .name = "qdisc_class_hash_alloc",
82941 + .param = PARAM1,
82942 +};
82943 +
82944 +struct size_overflow_hash _000446_hash = {
82945 + .next = NULL,
82946 + .name = "r3964_write",
82947 + .param = PARAM4,
82948 +};
82949 +
82950 +struct size_overflow_hash _000447_hash = {
82951 + .next = NULL,
82952 + .name = "raw_seticmpfilter",
82953 + .param = PARAM3,
82954 +};
82955 +
82956 +struct size_overflow_hash _000448_hash = {
82957 + .next = NULL,
82958 + .name = "raw_setsockopt",
82959 + .param = PARAM5,
82960 +};
82961 +
82962 +struct size_overflow_hash _000449_hash = {
82963 + .next = NULL,
82964 + .name = "rawv6_seticmpfilter",
82965 + .param = PARAM5,
82966 +};
82967 +
82968 +struct size_overflow_hash _000450_hash = {
82969 + .next = NULL,
82970 + .name = "ray_cs_essid_proc_write",
82971 + .param = PARAM3,
82972 +};
82973 +
82974 +struct size_overflow_hash _000451_hash = {
82975 + .next = NULL,
82976 + .name = "rbd_add",
82977 + .param = PARAM3,
82978 +};
82979 +
82980 +struct size_overflow_hash _000452_hash = {
82981 + .next = NULL,
82982 + .name = "rbd_snap_add",
82983 + .param = PARAM4,
82984 +};
82985 +
82986 +struct size_overflow_hash _000453_hash = {
82987 + .next = NULL,
82988 + .name = "rdma_set_ib_paths",
82989 + .param = PARAM3,
82990 +};
82991 +
82992 +struct size_overflow_hash _000454_hash = {
82993 + .next = NULL,
82994 + .name = "rds_page_copy_user",
82995 + .param = PARAM4,
82996 +};
82997 +
82998 +struct size_overflow_hash _000455_hash = {
82999 + .next = NULL,
83000 + .name = "read",
83001 + .param = PARAM3,
83002 +};
83003 +
83004 +struct size_overflow_hash _000456_hash = {
83005 + .next = NULL,
83006 + .name = "read_buf",
83007 + .param = PARAM2,
83008 +};
83009 +
83010 +struct size_overflow_hash _000457_hash = {
83011 + .next = NULL,
83012 + .name = "read_cis_cache",
83013 + .param = PARAM4,
83014 +};
83015 +
83016 +struct size_overflow_hash _000458_hash = {
83017 + .next = NULL,
83018 + .name = "realloc_buffer",
83019 + .param = PARAM2,
83020 +};
83021 +
83022 +struct size_overflow_hash _000459_hash = {
83023 + .next = NULL,
83024 + .name = "realloc_packet_buffer",
83025 + .param = PARAM2,
83026 +};
83027 +
83028 +struct size_overflow_hash _000460_hash = {
83029 + .next = NULL,
83030 + .name = "receive_DataRequest",
83031 + .param = PARAM3,
83032 +};
83033 +
83034 +struct size_overflow_hash _000461_hash = {
83035 + .next = NULL,
83036 + .name = "recent_mt_proc_write",
83037 + .param = PARAM3,
83038 +};
83039 +
83040 +struct size_overflow_hash _000462_hash = {
83041 + .next = NULL,
83042 + .name = "regmap_access_read_file",
83043 + .param = PARAM3,
83044 +};
83045 +
83046 +struct size_overflow_hash _000463_hash = {
83047 + .next = NULL,
83048 + .name = "regmap_bulk_write",
83049 + .param = PARAM4,
83050 +};
83051 +
83052 +struct size_overflow_hash _000464_hash = {
83053 + .next = NULL,
83054 + .name = "regmap_map_read_file",
83055 + .param = PARAM3,
83056 +};
83057 +
83058 +struct size_overflow_hash _000465_hash = {
83059 + .next = NULL,
83060 + .name = "regset_tls_set",
83061 + .param = PARAM4,
83062 +};
83063 +
83064 +struct size_overflow_hash _000466_hash = {
83065 + .next = NULL,
83066 + .name = "reg_w_buf",
83067 + .param = PARAM3,
83068 +};
83069 +
83070 +struct size_overflow_hash _000467_hash = {
83071 + .next = NULL,
83072 + .name = "reg_w_ixbuf",
83073 + .param = PARAM4,
83074 +};
83075 +
83076 +struct size_overflow_hash _000468_hash = {
83077 + .next = NULL,
83078 + .name = "remote_settings_file_write",
83079 + .param = PARAM3,
83080 +};
83081 +
83082 +struct size_overflow_hash _000469_hash = {
83083 + .next = NULL,
83084 + .name = "request_key_auth_new",
83085 + .param = PARAM3,
83086 +};
83087 +
83088 +struct size_overflow_hash _000470_hash = {
83089 + .next = NULL,
83090 + .name = "restore_i387_fxsave",
83091 + .param = PARAM2,
83092 +};
83093 +
83094 +struct size_overflow_hash _000471_hash = {
83095 + .next = NULL,
83096 + .name = "revalidate",
83097 + .param = PARAM2,
83098 +};
83099 +
83100 +struct size_overflow_hash _000472_hash = {
83101 + .next = NULL,
83102 + .name = "rfcomm_sock_setsockopt",
83103 + .param = PARAM5,
83104 +};
83105 +
83106 +struct size_overflow_hash _000473_hash = {
83107 + .next = NULL,
83108 + .name = "rndis_add_response",
83109 + .param = PARAM2,
83110 +};
83111 +
83112 +struct size_overflow_hash _000474_hash = {
83113 + .next = NULL,
83114 + .name = "rndis_set_oid",
83115 + .param = PARAM4,
83116 +};
83117 +
83118 +struct size_overflow_hash _000475_hash = {
83119 + .next = NULL,
83120 + .name = "rngapi_reset",
83121 + .param = PARAM3,
83122 +};
83123 +
83124 +struct size_overflow_hash _000476_hash = {
83125 + .next = NULL,
83126 + .name = "roccat_common_receive",
83127 + .param = PARAM4,
83128 +};
83129 +
83130 +struct size_overflow_hash _000477_hash = {
83131 + .next = NULL,
83132 + .name = "roccat_common_send",
83133 + .param = PARAM4,
83134 +};
83135 +
83136 +struct size_overflow_hash _000478_hash = {
83137 + .next = NULL,
83138 + .name = "rpc_malloc",
83139 + .param = PARAM2,
83140 +};
83141 +
83142 +struct size_overflow_hash _000479_hash = {
83143 + .next = NULL,
83144 + .name = "rt2x00debug_write_bbp",
83145 + .param = PARAM3,
83146 +};
83147 +
83148 +struct size_overflow_hash _000480_hash = {
83149 + .next = NULL,
83150 + .name = "rt2x00debug_write_csr",
83151 + .param = PARAM3,
83152 +};
83153 +
83154 +struct size_overflow_hash _000481_hash = {
83155 + .next = NULL,
83156 + .name = "rt2x00debug_write_eeprom",
83157 + .param = PARAM3,
83158 +};
83159 +
83160 +struct size_overflow_hash _000482_hash = {
83161 + .next = NULL,
83162 + .name = "rt2x00debug_write_rf",
83163 + .param = PARAM3,
83164 +};
83165 +
83166 +struct size_overflow_hash _000483_hash = {
83167 + .next = NULL,
83168 + .name = "rts51x_read_mem",
83169 + .param = PARAM4,
83170 +};
83171 +
83172 +struct size_overflow_hash _000484_hash = {
83173 + .next = NULL,
83174 + .name = "rts51x_read_status",
83175 + .param = PARAM4,
83176 +};
83177 +
83178 +struct size_overflow_hash _000485_hash = {
83179 + .next = NULL,
83180 + .name = "rts51x_write_mem",
83181 + .param = PARAM4,
83182 +};
83183 +
83184 +struct size_overflow_hash _000486_hash = {
83185 + .next = NULL,
83186 + .name = "rw_copy_check_uvector",
83187 + .param = PARAM3,
83188 +};
83189 +
83190 +struct size_overflow_hash _000487_hash = {
83191 + .next = NULL,
83192 + .name = "rxrpc_request_key",
83193 + .param = PARAM3,
83194 +};
83195 +
83196 +struct size_overflow_hash _000488_hash = {
83197 + .next = NULL,
83198 + .name = "rxrpc_server_keyring",
83199 + .param = PARAM3,
83200 +};
83201 +
83202 +struct size_overflow_hash _000489_hash = {
83203 + .next = NULL,
83204 + .name = "savemem",
83205 + .param = PARAM3,
83206 +};
83207 +
83208 +struct size_overflow_hash _000490_hash = {
83209 + .next = NULL,
83210 + .name = "sb16_copy_from_user",
83211 + .param = PARAM10|PARAM7|PARAM6,
83212 +};
83213 +
83214 +struct size_overflow_hash _000493_hash = {
83215 + .next = NULL,
83216 + .name = "sched_autogroup_write",
83217 + .param = PARAM3,
83218 +};
83219 +
83220 +struct size_overflow_hash _000494_hash = {
83221 + .next = NULL,
83222 + .name = "scsi_mode_select",
83223 + .param = PARAM6,
83224 +};
83225 +
83226 +struct size_overflow_hash _000495_hash = {
83227 + .next = NULL,
83228 + .name = "scsi_tgt_copy_sense",
83229 + .param = PARAM3,
83230 +};
83231 +
83232 +struct size_overflow_hash _000496_hash = {
83233 + .next = NULL,
83234 + .name = "sctp_auth_create_key",
83235 + .param = PARAM1,
83236 +};
83237 +
83238 +struct size_overflow_hash _000497_hash = {
83239 + .next = NULL,
83240 + .name = "sctp_getsockopt_delayed_ack",
83241 + .param = PARAM2,
83242 +};
83243 +
83244 +struct size_overflow_hash _000498_hash = {
83245 + .next = NULL,
83246 + .name = "sctp_getsockopt_local_addrs",
83247 + .param = PARAM2,
83248 +};
83249 +
83250 +struct size_overflow_hash _000499_hash = {
83251 + .next = NULL,
83252 + .name = "sctp_make_abort_user",
83253 + .param = PARAM3,
83254 +};
83255 +
83256 +struct size_overflow_hash _000500_hash = {
83257 + .next = NULL,
83258 + .name = "sctp_setsockopt_active_key",
83259 + .param = PARAM3,
83260 +};
83261 +
83262 +struct size_overflow_hash _000501_hash = {
83263 + .next = NULL,
83264 + .name = "sctp_setsockopt_adaptation_layer",
83265 + .param = PARAM3,
83266 +};
83267 +
83268 +struct size_overflow_hash _000502_hash = {
83269 + .next = NULL,
83270 + .name = "sctp_setsockopt_associnfo",
83271 + .param = PARAM3,
83272 +};
83273 +
83274 +struct size_overflow_hash _000503_hash = {
83275 + .next = NULL,
83276 + .name = "sctp_setsockopt_auth_chunk",
83277 + .param = PARAM3,
83278 +};
83279 +
83280 +struct size_overflow_hash _000504_hash = {
83281 + .next = NULL,
83282 + .name = "sctp_setsockopt_auth_key",
83283 + .param = PARAM3,
83284 +};
83285 +
83286 +struct size_overflow_hash _000505_hash = {
83287 + .next = NULL,
83288 + .name = "sctp_setsockopt_autoclose",
83289 + .param = PARAM3,
83290 +};
83291 +
83292 +struct size_overflow_hash _000506_hash = {
83293 + .next = NULL,
83294 + .name = "sctp_setsockopt_bindx",
83295 + .param = PARAM3,
83296 +};
83297 +
83298 +struct size_overflow_hash _000507_hash = {
83299 + .next = NULL,
83300 + .name = "__sctp_setsockopt_connectx",
83301 + .param = PARAM3,
83302 +};
83303 +
83304 +struct size_overflow_hash _000508_hash = {
83305 + .next = NULL,
83306 + .name = "sctp_setsockopt_context",
83307 + .param = PARAM3,
83308 +};
83309 +
83310 +struct size_overflow_hash _000509_hash = {
83311 + .next = &_000247_hash,
83312 + .name = "sctp_setsockopt_default_send_param",
83313 + .param = PARAM3,
83314 +};
83315 +
83316 +struct size_overflow_hash _000510_hash = {
83317 + .next = NULL,
83318 + .name = "sctp_setsockopt_delayed_ack",
83319 + .param = PARAM3,
83320 +};
83321 +
83322 +struct size_overflow_hash _000511_hash = {
83323 + .next = NULL,
83324 + .name = "sctp_setsockopt_del_key",
83325 + .param = PARAM3,
83326 +};
83327 +
83328 +struct size_overflow_hash _000512_hash = {
83329 + .next = NULL,
83330 + .name = "sctp_setsockopt_events",
83331 + .param = PARAM3,
83332 +};
83333 +
83334 +struct size_overflow_hash _000513_hash = {
83335 + .next = NULL,
83336 + .name = "sctp_setsockopt_hmac_ident",
83337 + .param = PARAM3,
83338 +};
83339 +
83340 +struct size_overflow_hash _000514_hash = {
83341 + .next = NULL,
83342 + .name = "sctp_setsockopt_initmsg",
83343 + .param = PARAM3,
83344 +};
83345 +
83346 +struct size_overflow_hash _000515_hash = {
83347 + .next = NULL,
83348 + .name = "sctp_setsockopt_maxburst",
83349 + .param = PARAM3,
83350 +};
83351 +
83352 +struct size_overflow_hash _000516_hash = {
83353 + .next = NULL,
83354 + .name = "sctp_setsockopt_maxseg",
83355 + .param = PARAM3,
83356 +};
83357 +
83358 +struct size_overflow_hash _000517_hash = {
83359 + .next = NULL,
83360 + .name = "sctp_setsockopt_peer_addr_params",
83361 + .param = PARAM3,
83362 +};
83363 +
83364 +struct size_overflow_hash _000518_hash = {
83365 + .next = NULL,
83366 + .name = "sctp_setsockopt_peer_primary_addr",
83367 + .param = PARAM3,
83368 +};
83369 +
83370 +struct size_overflow_hash _000519_hash = {
83371 + .next = NULL,
83372 + .name = "sctp_setsockopt_rtoinfo",
83373 + .param = PARAM3,
83374 +};
83375 +
83376 +struct size_overflow_hash _000520_hash = {
83377 + .next = NULL,
83378 + .name = "security_context_to_sid_core",
83379 + .param = PARAM2,
83380 +};
83381 +
83382 +struct size_overflow_hash _000521_hash = {
83383 + .next = NULL,
83384 + .name = "sel_commit_bools_write",
83385 + .param = PARAM3,
83386 +};
83387 +
83388 +struct size_overflow_hash _000522_hash = {
83389 + .next = NULL,
83390 + .name = "sel_write_avc_cache_threshold",
83391 + .param = PARAM3,
83392 +};
83393 +
83394 +struct size_overflow_hash _000523_hash = {
83395 + .next = NULL,
83396 + .name = "sel_write_bool",
83397 + .param = PARAM3,
83398 +};
83399 +
83400 +struct size_overflow_hash _000524_hash = {
83401 + .next = NULL,
83402 + .name = "sel_write_checkreqprot",
83403 + .param = PARAM3,
83404 +};
83405 +
83406 +struct size_overflow_hash _000525_hash = {
83407 + .next = NULL,
83408 + .name = "sel_write_disable",
83409 + .param = PARAM3,
83410 +};
83411 +
83412 +struct size_overflow_hash _000526_hash = {
83413 + .next = NULL,
83414 + .name = "sel_write_enforce",
83415 + .param = PARAM3,
83416 +};
83417 +
83418 +struct size_overflow_hash _000527_hash = {
83419 + .next = NULL,
83420 + .name = "sel_write_load",
83421 + .param = PARAM3,
83422 +};
83423 +
83424 +struct size_overflow_hash _000528_hash = {
83425 + .next = NULL,
83426 + .name = "send_bulk_static_data",
83427 + .param = PARAM3,
83428 +};
83429 +
83430 +struct size_overflow_hash _000529_hash = {
83431 + .next = NULL,
83432 + .name = "send_control_msg",
83433 + .param = PARAM6,
83434 +};
83435 +
83436 +struct size_overflow_hash _000530_hash = {
83437 + .next = NULL,
83438 + .name = "set_aoe_iflist",
83439 + .param = PARAM2,
83440 +};
83441 +
83442 +struct size_overflow_hash _000531_hash = {
83443 + .next = NULL,
83444 + .name = "setkey_unaligned",
83445 + .param = PARAM3,
83446 +};
83447 +
83448 +struct size_overflow_hash _000532_hash = {
83449 + .next = NULL,
83450 + .name = "set_registers",
83451 + .param = PARAM3,
83452 +};
83453 +
83454 +struct size_overflow_hash _000533_hash = {
83455 + .next = NULL,
83456 + .name = "setsockopt",
83457 + .param = PARAM5,
83458 +};
83459 +
83460 +struct size_overflow_hash _000534_hash = {
83461 + .next = NULL,
83462 + .name = "setup_req",
83463 + .param = PARAM3,
83464 +};
83465 +
83466 +struct size_overflow_hash _000535_hash = {
83467 + .next = NULL,
83468 + .name = "setup_window",
83469 + .param = PARAM7,
83470 +};
83471 +
83472 +struct size_overflow_hash _000536_hash = {
83473 + .next = NULL,
83474 + .name = "setxattr",
83475 + .param = PARAM4,
83476 +};
83477 +
83478 +struct size_overflow_hash _000537_hash = {
83479 + .next = NULL,
83480 + .name = "sfq_alloc",
83481 + .param = PARAM1,
83482 +};
83483 +
83484 +struct size_overflow_hash _000538_hash = {
83485 + .next = NULL,
83486 + .name = "sg_kmalloc",
83487 + .param = PARAM1,
83488 +};
83489 +
83490 +struct size_overflow_hash _000539_hash = {
83491 + .next = NULL,
83492 + .name = "sgl_map_user_pages",
83493 + .param = PARAM2,
83494 +};
83495 +
83496 +struct size_overflow_hash _000540_hash = {
83497 + .next = NULL,
83498 + .name = "shash_setkey_unaligned",
83499 + .param = PARAM3,
83500 +};
83501 +
83502 +struct size_overflow_hash _000541_hash = {
83503 + .next = NULL,
83504 + .name = "shmem_xattr_alloc",
83505 + .param = PARAM2,
83506 +};
83507 +
83508 +struct size_overflow_hash _000542_hash = {
83509 + .next = NULL,
83510 + .name = "sierra_setup_urb",
83511 + .param = PARAM5,
83512 +};
83513 +
83514 +struct size_overflow_hash _000543_hash = {
83515 + .next = NULL,
83516 + .name = "simple_transaction_get",
83517 + .param = PARAM3,
83518 +};
83519 +
83520 +struct size_overflow_hash _000544_hash = {
83521 + .next = NULL,
83522 + .name = "simple_write_to_buffer",
83523 + .param = PARAM2|PARAM5,
83524 +};
83525 +
83526 +struct size_overflow_hash _000546_hash = {
83527 + .next = NULL,
83528 + .name = "sisusb_send_bulk_msg",
83529 + .param = PARAM3,
83530 +};
83531 +
83532 +struct size_overflow_hash _000547_hash = {
83533 + .next = NULL,
83534 + .name = "skb_add_data",
83535 + .param = PARAM3,
83536 +};
83537 +
83538 +struct size_overflow_hash _000548_hash = {
83539 + .next = NULL,
83540 + .name = "skb_do_copy_data_nocache",
83541 + .param = PARAM5,
83542 +};
83543 +
83544 +struct size_overflow_hash _000549_hash = {
83545 + .next = NULL,
83546 + .name = "sl_alloc_bufs",
83547 + .param = PARAM2,
83548 +};
83549 +
83550 +struct size_overflow_hash _000550_hash = {
83551 + .next = NULL,
83552 + .name = "sl_realloc_bufs",
83553 + .param = PARAM2,
83554 +};
83555 +
83556 +struct size_overflow_hash _000551_hash = {
83557 + .next = NULL,
83558 + .name = "smk_write_ambient",
83559 + .param = PARAM3,
83560 +};
83561 +
83562 +struct size_overflow_hash _000552_hash = {
83563 + .next = NULL,
83564 + .name = "smk_write_cipso",
83565 + .param = PARAM3,
83566 +};
83567 +
83568 +struct size_overflow_hash _000553_hash = {
83569 + .next = NULL,
83570 + .name = "smk_write_direct",
83571 + .param = PARAM3,
83572 +};
83573 +
83574 +struct size_overflow_hash _000554_hash = {
83575 + .next = NULL,
83576 + .name = "smk_write_doi",
83577 + .param = PARAM3,
83578 +};
83579 +
83580 +struct size_overflow_hash _000555_hash = {
83581 + .next = NULL,
83582 + .name = "smk_write_load_list",
83583 + .param = PARAM3,
83584 +};
83585 +
83586 +struct size_overflow_hash _000556_hash = {
83587 + .next = NULL,
83588 + .name = "smk_write_logging",
83589 + .param = PARAM3,
83590 +};
83591 +
83592 +struct size_overflow_hash _000557_hash = {
83593 + .next = NULL,
83594 + .name = "smk_write_netlbladdr",
83595 + .param = PARAM3,
83596 +};
83597 +
83598 +struct size_overflow_hash _000558_hash = {
83599 + .next = NULL,
83600 + .name = "smk_write_onlycap",
83601 + .param = PARAM3,
83602 +};
83603 +
83604 +struct size_overflow_hash _000559_hash = {
83605 + .next = NULL,
83606 + .name = "snd_ctl_elem_user_tlv",
83607 + .param = PARAM3,
83608 +};
83609 +
83610 +struct size_overflow_hash _000560_hash = {
83611 + .next = NULL,
83612 + .name = "snd_emu10k1_fx8010_read",
83613 + .param = PARAM5,
83614 +};
83615 +
83616 +struct size_overflow_hash _000561_hash = {
83617 + .next = NULL,
83618 + .name = "snd_emu10k1_synth_copy_from_user",
83619 + .param = PARAM3|PARAM5,
83620 +};
83621 +
83622 +struct size_overflow_hash _000563_hash = {
83623 + .next = NULL,
83624 + .name = "snd_gus_dram_poke",
83625 + .param = PARAM4,
83626 +};
83627 +
83628 +struct size_overflow_hash _000564_hash = {
83629 + .next = NULL,
83630 + .name = "snd_hdsp_playback_copy",
83631 + .param = PARAM5,
83632 +};
83633 +
83634 +struct size_overflow_hash _000565_hash = {
83635 + .next = NULL,
83636 + .name = "snd_info_entry_write",
83637 + .param = PARAM3,
83638 +};
83639 +
83640 +struct size_overflow_hash _000566_hash = {
83641 + .next = NULL,
83642 + .name = "snd_korg1212_copy_from",
83643 + .param = PARAM6,
83644 +};
83645 +
83646 +struct size_overflow_hash _000567_hash = {
83647 + .next = NULL,
83648 + .name = "snd_mem_proc_write",
83649 + .param = PARAM3,
83650 +};
83651 +
83652 +struct size_overflow_hash _000568_hash = {
83653 + .next = NULL,
83654 + .name = "snd_midi_channel_init_set",
83655 + .param = PARAM1,
83656 +};
83657 +
83658 +struct size_overflow_hash _000569_hash = {
83659 + .next = NULL,
83660 + .name = "snd_midi_event_new",
83661 + .param = PARAM1,
83662 +};
83663 +
83664 +struct size_overflow_hash _000570_hash = {
83665 + .next = NULL,
83666 + .name = "snd_opl4_mem_proc_write",
83667 + .param = PARAM5,
83668 +};
83669 +
83670 +struct size_overflow_hash _000571_hash = {
83671 + .next = NULL,
83672 + .name = "snd_pcm_aio_read",
83673 + .param = PARAM3,
83674 +};
83675 +
83676 +struct size_overflow_hash _000572_hash = {
83677 + .next = NULL,
83678 + .name = "snd_pcm_aio_write",
83679 + .param = PARAM3,
83680 +};
83681 +
83682 +struct size_overflow_hash _000573_hash = {
83683 + .next = NULL,
83684 + .name = "snd_pcm_oss_write1",
83685 + .param = PARAM3,
83686 +};
83687 +
83688 +struct size_overflow_hash _000574_hash = {
83689 + .next = NULL,
83690 + .name = "snd_pcm_oss_write2",
83691 + .param = PARAM3,
83692 +};
83693 +
83694 +struct size_overflow_hash _000575_hash = {
83695 + .next = NULL,
83696 + .name = "snd_rawmidi_kernel_write1",
83697 + .param = PARAM4,
83698 +};
83699 +
83700 +struct size_overflow_hash _000576_hash = {
83701 + .next = NULL,
83702 + .name = "snd_rme9652_playback_copy",
83703 + .param = PARAM5,
83704 +};
83705 +
83706 +struct size_overflow_hash _000577_hash = {
83707 + .next = NULL,
83708 + .name = "snd_sb_csp_load_user",
83709 + .param = PARAM3,
83710 +};
83711 +
83712 +struct size_overflow_hash _000578_hash = {
83713 + .next = NULL,
83714 + .name = "snd_usb_ctl_msg",
83715 + .param = PARAM8,
83716 +};
83717 +
83718 +struct size_overflow_hash _000579_hash = {
83719 + .next = NULL,
83720 + .name = "sock_bindtodevice",
83721 + .param = PARAM3,
83722 +};
83723 +
83724 +struct size_overflow_hash _000580_hash = {
83725 + .next = NULL,
83726 + .name = "sock_kmalloc",
83727 + .param = PARAM2,
83728 +};
83729 +
83730 +struct size_overflow_hash _000581_hash = {
83731 + .next = NULL,
83732 + .name = "spidev_write",
83733 + .param = PARAM3,
83734 +};
83735 +
83736 +struct size_overflow_hash _000582_hash = {
83737 + .next = NULL,
83738 + .name = "squashfs_read_table",
83739 + .param = PARAM3,
83740 +};
83741 +
83742 +struct size_overflow_hash _000583_hash = {
83743 + .next = NULL,
83744 + .name = "srpt_alloc_ioctx",
83745 + .param = PARAM2|PARAM3,
83746 +};
83747 +
83748 +struct size_overflow_hash _000585_hash = {
83749 + .next = NULL,
83750 + .name = "srpt_alloc_ioctx_ring",
83751 + .param = PARAM2,
83752 +};
83753 +
83754 +struct size_overflow_hash _000586_hash = {
83755 + .next = NULL,
83756 + .name = "st5481_setup_isocpipes",
83757 + .param = PARAM6|PARAM4,
83758 +};
83759 +
83760 +struct size_overflow_hash _000587_hash = {
83761 + .next = NULL,
83762 + .name = "sta_agg_status_write",
83763 + .param = PARAM3,
83764 +};
83765 +
83766 +struct size_overflow_hash _000588_hash = {
83767 + .next = NULL,
83768 + .name = "svc_setsockopt",
83769 + .param = PARAM5,
83770 +};
83771 +
83772 +struct size_overflow_hash _000589_hash = {
83773 + .next = NULL,
83774 + .name = "sys_add_key",
83775 + .param = PARAM4,
83776 +};
83777 +
83778 +struct size_overflow_hash _000590_hash = {
83779 + .next = NULL,
83780 + .name = "sys_modify_ldt",
83781 + .param = PARAM3,
83782 +};
83783 +
83784 +struct size_overflow_hash _000591_hash = {
83785 + .next = NULL,
83786 + .name = "sys_semtimedop",
83787 + .param = PARAM3,
83788 +};
83789 +
83790 +struct size_overflow_hash _000592_hash = {
83791 + .next = NULL,
83792 + .name = "sys_setdomainname",
83793 + .param = PARAM2,
83794 +};
83795 +
83796 +struct size_overflow_hash _000593_hash = {
83797 + .next = NULL,
83798 + .name = "sys_sethostname",
83799 + .param = PARAM2,
83800 +};
83801 +
83802 +struct size_overflow_hash _000594_hash = {
83803 + .next = NULL,
83804 + .name = "tda10048_writeregbulk",
83805 + .param = PARAM4,
83806 +};
83807 +
83808 +struct size_overflow_hash _000595_hash = {
83809 + .next = NULL,
83810 + .name = "tipc_log_resize",
83811 + .param = PARAM1,
83812 +};
83813 +
83814 +struct size_overflow_hash _000596_hash = {
83815 + .next = NULL,
83816 + .name = "tomoyo_write_self",
83817 + .param = PARAM3,
83818 +};
83819 +
83820 +struct size_overflow_hash _000597_hash = {
83821 + .next = NULL,
83822 + .name = "tower_write",
83823 + .param = PARAM3,
83824 +};
83825 +
83826 +struct size_overflow_hash _000598_hash = {
83827 + .next = NULL,
83828 + .name = "tpm_write",
83829 + .param = PARAM3,
83830 +};
83831 +
83832 +struct size_overflow_hash _000599_hash = {
83833 + .next = NULL,
83834 + .name = "trusted_instantiate",
83835 + .param = PARAM3,
83836 +};
83837 +
83838 +struct size_overflow_hash _000600_hash = {
83839 + .next = NULL,
83840 + .name = "trusted_update",
83841 + .param = PARAM3,
83842 +};
83843 +
83844 +struct size_overflow_hash _000601_hash = {
83845 + .next = NULL,
83846 + .name = "tt_changes_fill_buffer",
83847 + .param = PARAM3,
83848 +};
83849 +
83850 +struct size_overflow_hash _000602_hash = {
83851 + .next = NULL,
83852 + .name = "tty_buffer_alloc",
83853 + .param = PARAM2,
83854 +};
83855 +
83856 +struct size_overflow_hash _000603_hash = {
83857 + .next = NULL,
83858 + .name = "__tun_chr_ioctl",
83859 + .param = PARAM4,
83860 +};
83861 +
83862 +struct size_overflow_hash _000604_hash = {
83863 + .next = NULL,
83864 + .name = "ubi_more_leb_change_data",
83865 + .param = PARAM4,
83866 +};
83867 +
83868 +struct size_overflow_hash _000605_hash = {
83869 + .next = NULL,
83870 + .name = "ubi_more_update_data",
83871 + .param = PARAM4,
83872 +};
83873 +
83874 +struct size_overflow_hash _000606_hash = {
83875 + .next = NULL,
83876 + .name = "ubi_resize_volume",
83877 + .param = PARAM2,
83878 +};
83879 +
83880 +struct size_overflow_hash _000607_hash = {
83881 + .next = NULL,
83882 + .name = "udf_alloc_i_data",
83883 + .param = PARAM2,
83884 +};
83885 +
83886 +struct size_overflow_hash _000608_hash = {
83887 + .next = NULL,
83888 + .name = "uea_idma_write",
83889 + .param = PARAM3,
83890 +};
83891 +
83892 +struct size_overflow_hash _000609_hash = {
83893 + .next = NULL,
83894 + .name = "uea_request",
83895 + .param = PARAM4,
83896 +};
83897 +
83898 +struct size_overflow_hash _000610_hash = {
83899 + .next = NULL,
83900 + .name = "uea_send_modem_cmd",
83901 + .param = PARAM3,
83902 +};
83903 +
83904 +struct size_overflow_hash _000611_hash = {
83905 + .next = NULL,
83906 + .name = "uio_write",
83907 + .param = PARAM3,
83908 +};
83909 +
83910 +struct size_overflow_hash _000612_hash = {
83911 + .next = NULL,
83912 + .name = "um_idi_write",
83913 + .param = PARAM3,
83914 +};
83915 +
83916 +struct size_overflow_hash _000613_hash = {
83917 + .next = NULL,
83918 + .name = "us122l_ctl_msg",
83919 + .param = PARAM8,
83920 +};
83921 +
83922 +struct size_overflow_hash _000614_hash = {
83923 + .next = NULL,
83924 + .name = "usb_alloc_urb",
83925 + .param = PARAM1,
83926 +};
83927 +
83928 +struct size_overflow_hash _000615_hash = {
83929 + .next = NULL,
83930 + .name = "usblp_new_writeurb",
83931 + .param = PARAM2,
83932 +};
83933 +
83934 +struct size_overflow_hash _000616_hash = {
83935 + .next = NULL,
83936 + .name = "usblp_write",
83937 + .param = PARAM3,
83938 +};
83939 +
83940 +struct size_overflow_hash _000617_hash = {
83941 + .next = NULL,
83942 + .name = "usbtest_alloc_urb",
83943 + .param = PARAM3|PARAM5,
83944 +};
83945 +
83946 +struct size_overflow_hash _000619_hash = {
83947 + .next = NULL,
83948 + .name = "usbtmc_write",
83949 + .param = PARAM3,
83950 +};
83951 +
83952 +struct size_overflow_hash _000620_hash = {
83953 + .next = NULL,
83954 + .name = "user_instantiate",
83955 + .param = PARAM3,
83956 +};
83957 +
83958 +struct size_overflow_hash _000621_hash = {
83959 + .next = NULL,
83960 + .name = "user_update",
83961 + .param = PARAM3,
83962 +};
83963 +
83964 +struct size_overflow_hash _000622_hash = {
83965 + .next = NULL,
83966 + .name = "uvc_simplify_fraction",
83967 + .param = PARAM3,
83968 +};
83969 +
83970 +struct size_overflow_hash _000623_hash = {
83971 + .next = NULL,
83972 + .name = "uwb_rc_cmd_done",
83973 + .param = PARAM4,
83974 +};
83975 +
83976 +struct size_overflow_hash _000624_hash = {
83977 + .next = NULL,
83978 + .name = "uwb_rc_neh_grok_event",
83979 + .param = PARAM3,
83980 +};
83981 +
83982 +struct size_overflow_hash _000625_hash = {
83983 + .next = NULL,
83984 + .name = "v9fs_alloc_rdir_buf",
83985 + .param = PARAM2,
83986 +};
83987 +
83988 +struct size_overflow_hash _000626_hash = {
83989 + .next = NULL,
83990 + .name = "__vb2_perform_fileio",
83991 + .param = PARAM3,
83992 +};
83993 +
83994 +struct size_overflow_hash _000627_hash = {
83995 + .next = NULL,
83996 + .name = "vc_do_resize",
83997 + .param = PARAM3|PARAM4,
83998 +};
83999 +
84000 +struct size_overflow_hash _000629_hash = {
84001 + .next = NULL,
84002 + .name = "vcs_write",
84003 + .param = PARAM3,
84004 +};
84005 +
84006 +struct size_overflow_hash _000630_hash = {
84007 + .next = NULL,
84008 + .name = "vfd_write",
84009 + .param = PARAM3,
84010 +};
84011 +
84012 +struct size_overflow_hash _000631_hash = {
84013 + .next = NULL,
84014 + .name = "vga_arb_write",
84015 + .param = PARAM3,
84016 +};
84017 +
84018 +struct size_overflow_hash _000632_hash = {
84019 + .next = NULL,
84020 + .name = "vga_switcheroo_debugfs_write",
84021 + .param = PARAM3,
84022 +};
84023 +
84024 +struct size_overflow_hash _000633_hash = {
84025 + .next = NULL,
84026 + .name = "vhci_get_user",
84027 + .param = PARAM3,
84028 +};
84029 +
84030 +struct size_overflow_hash _000634_hash = {
84031 + .next = NULL,
84032 + .name = "video_proc_write",
84033 + .param = PARAM3,
84034 +};
84035 +
84036 +struct size_overflow_hash _000635_hash = {
84037 + .next = NULL,
84038 + .name = "vlsi_alloc_ring",
84039 + .param = PARAM3|PARAM4,
84040 +};
84041 +
84042 +struct size_overflow_hash _000637_hash = {
84043 + .next = NULL,
84044 + .name = "__vmalloc",
84045 + .param = PARAM1,
84046 +};
84047 +
84048 +struct size_overflow_hash _000638_hash = {
84049 + .next = NULL,
84050 + .name = "vmalloc_32",
84051 + .param = PARAM1,
84052 +};
84053 +
84054 +struct size_overflow_hash _000639_hash = {
84055 + .next = NULL,
84056 + .name = "vmalloc_32_user",
84057 + .param = PARAM1,
84058 +};
84059 +
84060 +struct size_overflow_hash _000640_hash = {
84061 + .next = NULL,
84062 + .name = "vmalloc_exec",
84063 + .param = PARAM1,
84064 +};
84065 +
84066 +struct size_overflow_hash _000641_hash = {
84067 + .next = NULL,
84068 + .name = "vmalloc_node",
84069 + .param = PARAM1,
84070 +};
84071 +
84072 +struct size_overflow_hash _000642_hash = {
84073 + .next = NULL,
84074 + .name = "__vmalloc_node_flags",
84075 + .param = PARAM1,
84076 +};
84077 +
84078 +struct size_overflow_hash _000643_hash = {
84079 + .next = NULL,
84080 + .name = "vmalloc_user",
84081 + .param = PARAM1,
84082 +};
84083 +
84084 +struct size_overflow_hash _000644_hash = {
84085 + .next = NULL,
84086 + .name = "vol_cdev_direct_write",
84087 + .param = PARAM3,
84088 +};
84089 +
84090 +struct size_overflow_hash _000645_hash = {
84091 + .next = NULL,
84092 + .name = "vp_request_msix_vectors",
84093 + .param = PARAM2,
84094 +};
84095 +
84096 +struct size_overflow_hash _000646_hash = {
84097 + .next = NULL,
84098 + .name = "vring_add_indirect",
84099 + .param = PARAM3|PARAM4,
84100 +};
84101 +
84102 +struct size_overflow_hash _000648_hash = {
84103 + .next = NULL,
84104 + .name = "vring_new_virtqueue",
84105 + .param = PARAM1,
84106 +};
84107 +
84108 +struct size_overflow_hash _000649_hash = {
84109 + .next = NULL,
84110 + .name = "vxge_os_dma_malloc",
84111 + .param = PARAM2,
84112 +};
84113 +
84114 +struct size_overflow_hash _000650_hash = {
84115 + .next = NULL,
84116 + .name = "vxge_os_dma_malloc_async",
84117 + .param = PARAM3,
84118 +};
84119 +
84120 +struct size_overflow_hash _000651_hash = {
84121 + .next = NULL,
84122 + .name = "wdm_write",
84123 + .param = PARAM3,
84124 +};
84125 +
84126 +struct size_overflow_hash _000652_hash = {
84127 + .next = NULL,
84128 + .name = "wiimote_hid_send",
84129 + .param = PARAM3,
84130 +};
84131 +
84132 +struct size_overflow_hash _000653_hash = {
84133 + .next = NULL,
84134 + .name = "wl1273_fm_fops_write",
84135 + .param = PARAM3,
84136 +};
84137 +
84138 +struct size_overflow_hash _000654_hash = {
84139 + .next = NULL,
84140 + .name = "wlc_phy_loadsampletable_nphy",
84141 + .param = PARAM3,
84142 +};
84143 +
84144 +struct size_overflow_hash _000655_hash = {
84145 + .next = NULL,
84146 + .name = "write",
84147 + .param = PARAM3,
84148 +};
84149 +
84150 +struct size_overflow_hash _000656_hash = {
84151 + .next = NULL,
84152 + .name = "write_flush",
84153 + .param = PARAM3,
84154 +};
84155 +
84156 +struct size_overflow_hash _000657_hash = {
84157 + .next = NULL,
84158 + .name = "write_rio",
84159 + .param = PARAM3,
84160 +};
84161 +
84162 +struct size_overflow_hash _000658_hash = {
84163 + .next = NULL,
84164 + .name = "x25_asy_change_mtu",
84165 + .param = PARAM2,
84166 +};
84167 +
84168 +struct size_overflow_hash _000659_hash = {
84169 + .next = NULL,
84170 + .name = "xdi_copy_from_user",
84171 + .param = PARAM4,
84172 +};
84173 +
84174 +struct size_overflow_hash _000660_hash = {
84175 + .next = NULL,
84176 + .name = "xfrm_dst_alloc_copy",
84177 + .param = PARAM3,
84178 +};
84179 +
84180 +struct size_overflow_hash _000661_hash = {
84181 + .next = NULL,
84182 + .name = "xfrm_user_policy",
84183 + .param = PARAM4,
84184 +};
84185 +
84186 +struct size_overflow_hash _000662_hash = {
84187 + .next = NULL,
84188 + .name = "xfs_attrmulti_attr_set",
84189 + .param = PARAM4,
84190 +};
84191 +
84192 +struct size_overflow_hash _000663_hash = {
84193 + .next = NULL,
84194 + .name = "xfs_handle_to_dentry",
84195 + .param = PARAM3,
84196 +};
84197 +
84198 +struct size_overflow_hash _000664_hash = {
84199 + .next = NULL,
84200 + .name = "__xip_file_write",
84201 + .param = PARAM3,
84202 +};
84203 +
84204 +struct size_overflow_hash _000665_hash = {
84205 + .next = NULL,
84206 + .name = "xprt_rdma_allocate",
84207 + .param = PARAM2,
84208 +};
84209 +
84210 +struct size_overflow_hash _000666_hash = {
84211 + .next = NULL,
84212 + .name = "zd_usb_iowrite16v_async",
84213 + .param = PARAM3,
84214 +};
84215 +
84216 +struct size_overflow_hash _000667_hash = {
84217 + .next = NULL,
84218 + .name = "zd_usb_read_fw",
84219 + .param = PARAM4,
84220 +};
84221 +
84222 +struct size_overflow_hash _000668_hash = {
84223 + .next = NULL,
84224 + .name = "zerocopy_sg_from_iovec",
84225 + .param = PARAM3,
84226 +};
84227 +
84228 +struct size_overflow_hash _000669_hash = {
84229 + .next = NULL,
84230 + .name = "zoran_write",
84231 + .param = PARAM3,
84232 +};
84233 +
84234 +struct size_overflow_hash _000671_hash = {
84235 + .next = NULL,
84236 + .name = "acpi_ex_allocate_name_string",
84237 + .param = PARAM2,
84238 +};
84239 +
84240 +struct size_overflow_hash _000672_hash = {
84241 + .next = NULL,
84242 + .name = "acpi_os_allocate_zeroed",
84243 + .param = PARAM1,
84244 +};
84245 +
84246 +struct size_overflow_hash _000673_hash = {
84247 + .next = NULL,
84248 + .name = "acpi_ut_initialize_buffer",
84249 + .param = PARAM2,
84250 +};
84251 +
84252 +struct size_overflow_hash _000674_hash = {
84253 + .next = NULL,
84254 + .name = "ad7879_spi_xfer",
84255 + .param = PARAM3,
84256 +};
84257 +
84258 +struct size_overflow_hash _000675_hash = {
84259 + .next = NULL,
84260 + .name = "add_new_gdb",
84261 + .param = PARAM3,
84262 +};
84263 +
84264 +struct size_overflow_hash _000676_hash = {
84265 + .next = NULL,
84266 + .name = "add_numbered_child",
84267 + .param = PARAM5,
84268 +};
84269 +
84270 +struct size_overflow_hash _000677_hash = {
84271 + .next = NULL,
84272 + .name = "add_res_range",
84273 + .param = PARAM4,
84274 +};
84275 +
84276 +struct size_overflow_hash _000678_hash = {
84277 + .next = NULL,
84278 + .name = "addtgt",
84279 + .param = PARAM3,
84280 +};
84281 +
84282 +struct size_overflow_hash _000679_hash = {
84283 + .next = NULL,
84284 + .name = "add_uuid",
84285 + .param = PARAM4,
84286 +};
84287 +
84288 +struct size_overflow_hash _000680_hash = {
84289 + .next = NULL,
84290 + .name = "afs_cell_alloc",
84291 + .param = PARAM2,
84292 +};
84293 +
84294 +struct size_overflow_hash _000681_hash = {
84295 + .next = NULL,
84296 + .name = "aggr_recv_addba_req_evt",
84297 + .param = PARAM4,
84298 +};
84299 +
84300 +struct size_overflow_hash _000682_hash = {
84301 + .next = NULL,
84302 + .name = "agp_create_memory",
84303 + .param = PARAM1,
84304 +};
84305 +
84306 +struct size_overflow_hash _000683_hash = {
84307 + .next = NULL,
84308 + .name = "agp_create_user_memory",
84309 + .param = PARAM1,
84310 +};
84311 +
84312 +struct size_overflow_hash _000684_hash = {
84313 + .next = NULL,
84314 + .name = "alg_setsockopt",
84315 + .param = PARAM5,
84316 +};
84317 +
84318 +struct size_overflow_hash _000685_hash = {
84319 + .next = NULL,
84320 + .name = "alloc_async",
84321 + .param = PARAM1,
84322 +};
84323 +
84324 +struct size_overflow_hash _000686_hash = {
84325 + .next = NULL,
84326 + .name = "___alloc_bootmem_nopanic",
84327 + .param = PARAM1,
84328 +};
84329 +
84330 +struct size_overflow_hash _000687_hash = {
84331 + .next = NULL,
84332 + .name = "alloc_buf",
84333 + .param = PARAM1,
84334 +};
84335 +
84336 +struct size_overflow_hash _000688_hash = {
84337 + .next = NULL,
84338 + .name = "alloc_chunk",
84339 + .param = PARAM1,
84340 +};
84341 +
84342 +struct size_overflow_hash _000689_hash = {
84343 + .next = NULL,
84344 + .name = "alloc_context",
84345 + .param = PARAM1,
84346 +};
84347 +
84348 +struct size_overflow_hash _000690_hash = {
84349 + .next = NULL,
84350 + .name = "alloc_ctrl_packet",
84351 + .param = PARAM1,
84352 +};
84353 +
84354 +struct size_overflow_hash _000691_hash = {
84355 + .next = NULL,
84356 + .name = "alloc_data_packet",
84357 + .param = PARAM1,
84358 +};
84359 +
84360 +struct size_overflow_hash _000692_hash = {
84361 + .next = NULL,
84362 + .name = "alloc_dca_provider",
84363 + .param = PARAM2,
84364 +};
84365 +
84366 +struct size_overflow_hash _000693_hash = {
84367 + .next = NULL,
84368 + .name = "__alloc_dev_table",
84369 + .param = PARAM2,
84370 +};
84371 +
84372 +struct size_overflow_hash _000694_hash = {
84373 + .next = NULL,
84374 + .name = "alloc_ep",
84375 + .param = PARAM1,
84376 +};
84377 +
84378 +struct size_overflow_hash _000695_hash = {
84379 + .next = NULL,
84380 + .name = "__alloc_extent_buffer",
84381 + .param = PARAM3,
84382 +};
84383 +
84384 +struct size_overflow_hash _000696_hash = {
84385 + .next = NULL,
84386 + .name = "alloc_group_attrs",
84387 + .param = PARAM2,
84388 +};
84389 +
84390 +struct size_overflow_hash _000697_hash = {
84391 + .next = NULL,
84392 + .name = "alloc_large_system_hash",
84393 + .param = PARAM2,
84394 +};
84395 +
84396 +struct size_overflow_hash _000698_hash = {
84397 + .next = NULL,
84398 + .name = "alloc_netdev_mqs",
84399 + .param = PARAM1,
84400 +};
84401 +
84402 +struct size_overflow_hash _000699_hash = {
84403 + .next = NULL,
84404 + .name = "__alloc_objio_seg",
84405 + .param = PARAM1,
84406 +};
84407 +
84408 +struct size_overflow_hash _000700_hash = {
84409 + .next = NULL,
84410 + .name = "alloc_ring",
84411 + .param = PARAM2|PARAM4,
84412 +};
84413 +
84414 +struct size_overflow_hash _000701_hash = {
84415 + .next = NULL,
84416 + .name = "alloc_ring",
84417 + .param = PARAM2|PARAM4,
84418 +};
84419 +
84420 +struct size_overflow_hash _000704_hash = {
84421 + .next = NULL,
84422 + .name = "alloc_session",
84423 + .param = PARAM1|PARAM2,
84424 +};
84425 +
84426 +struct size_overflow_hash _000708_hash = {
84427 + .next = NULL,
84428 + .name = "alloc_smp_req",
84429 + .param = PARAM1,
84430 +};
84431 +
84432 +struct size_overflow_hash _000709_hash = {
84433 + .next = NULL,
84434 + .name = "alloc_smp_resp",
84435 + .param = PARAM1,
84436 +};
84437 +
84438 +struct size_overflow_hash _000710_hash = {
84439 + .next = NULL,
84440 + .name = "alloc_ts_config",
84441 + .param = PARAM1,
84442 +};
84443 +
84444 +struct size_overflow_hash _000711_hash = {
84445 + .next = NULL,
84446 + .name = "alloc_upcall",
84447 + .param = PARAM2,
84448 +};
84449 +
84450 +struct size_overflow_hash _000712_hash = {
84451 + .next = NULL,
84452 + .name = "altera_drscan",
84453 + .param = PARAM2,
84454 +};
84455 +
84456 +struct size_overflow_hash _000713_hash = {
84457 + .next = NULL,
84458 + .name = "altera_irscan",
84459 + .param = PARAM2,
84460 +};
84461 +
84462 +struct size_overflow_hash _000714_hash = {
84463 + .next = NULL,
84464 + .name = "altera_set_dr_post",
84465 + .param = PARAM2,
84466 +};
84467 +
84468 +struct size_overflow_hash _000715_hash = {
84469 + .next = NULL,
84470 + .name = "altera_set_dr_pre",
84471 + .param = PARAM2,
84472 +};
84473 +
84474 +struct size_overflow_hash _000716_hash = {
84475 + .next = NULL,
84476 + .name = "altera_set_ir_post",
84477 + .param = PARAM2,
84478 +};
84479 +
84480 +struct size_overflow_hash _000717_hash = {
84481 + .next = NULL,
84482 + .name = "altera_set_ir_pre",
84483 + .param = PARAM2,
84484 +};
84485 +
84486 +struct size_overflow_hash _000718_hash = {
84487 + .next = NULL,
84488 + .name = "altera_swap_dr",
84489 + .param = PARAM2,
84490 +};
84491 +
84492 +struct size_overflow_hash _000719_hash = {
84493 + .next = &_000696_hash,
84494 + .name = "altera_swap_ir",
84495 + .param = PARAM2,
84496 +};
84497 +
84498 +struct size_overflow_hash _000720_hash = {
84499 + .next = NULL,
84500 + .name = "amd_create_gatt_pages",
84501 + .param = PARAM1,
84502 +};
84503 +
84504 +struct size_overflow_hash _000721_hash = {
84505 + .next = NULL,
84506 + .name = "aoechr_write",
84507 + .param = PARAM3,
84508 +};
84509 +
84510 +struct size_overflow_hash _000722_hash = {
84511 + .next = NULL,
84512 + .name = "applesmc_create_nodes",
84513 + .param = PARAM2,
84514 +};
84515 +
84516 +struct size_overflow_hash _000723_hash = {
84517 + .next = NULL,
84518 + .name = "array_zalloc",
84519 + .param = PARAM1|PARAM2,
84520 +};
84521 +
84522 +struct size_overflow_hash _000725_hash = {
84523 + .next = NULL,
84524 + .name = "arvo_sysfs_read",
84525 + .param = PARAM6,
84526 +};
84527 +
84528 +struct size_overflow_hash _000726_hash = {
84529 + .next = NULL,
84530 + .name = "arvo_sysfs_write",
84531 + .param = PARAM6,
84532 +};
84533 +
84534 +struct size_overflow_hash _000727_hash = {
84535 + .next = NULL,
84536 + .name = "asd_store_update_bios",
84537 + .param = PARAM4,
84538 +};
84539 +
84540 +struct size_overflow_hash _000728_hash = {
84541 + .next = NULL,
84542 + .name = "ata_host_alloc",
84543 + .param = PARAM2,
84544 +};
84545 +
84546 +struct size_overflow_hash _000729_hash = {
84547 + .next = NULL,
84548 + .name = "atalk_sendmsg",
84549 + .param = PARAM4,
84550 +};
84551 +
84552 +struct size_overflow_hash _000730_hash = {
84553 + .next = NULL,
84554 + .name = "ath6kl_cfg80211_connect_event",
84555 + .param = PARAM7|PARAM9|PARAM8,
84556 +};
84557 +
84558 +struct size_overflow_hash _000731_hash = {
84559 + .next = NULL,
84560 + .name = "ath6kl_mgmt_tx",
84561 + .param = PARAM9,
84562 +};
84563 +
84564 +struct size_overflow_hash _000732_hash = {
84565 + .next = NULL,
84566 + .name = "ath6kl_wmi_roam_tbl_event_rx",
84567 + .param = PARAM3,
84568 +};
84569 +
84570 +struct size_overflow_hash _000733_hash = {
84571 + .next = NULL,
84572 + .name = "ath6kl_wmi_send_mgmt_cmd",
84573 + .param = PARAM7,
84574 +};
84575 +
84576 +struct size_overflow_hash _000734_hash = {
84577 + .next = NULL,
84578 + .name = "ath_descdma_setup",
84579 + .param = PARAM5,
84580 +};
84581 +
84582 +struct size_overflow_hash _000735_hash = {
84583 + .next = NULL,
84584 + .name = "ath_rx_edma_init",
84585 + .param = PARAM2,
84586 +};
84587 +
84588 +struct size_overflow_hash _000736_hash = {
84589 + .next = NULL,
84590 + .name = "ati_create_gatt_pages",
84591 + .param = PARAM1,
84592 +};
84593 +
84594 +struct size_overflow_hash _000737_hash = {
84595 + .next = NULL,
84596 + .name = "au0828_init_isoc",
84597 + .param = PARAM2|PARAM3,
84598 +};
84599 +
84600 +struct size_overflow_hash _000739_hash = {
84601 + .next = NULL,
84602 + .name = "audit_init_entry",
84603 + .param = PARAM1,
84604 +};
84605 +
84606 +struct size_overflow_hash _000740_hash = {
84607 + .next = NULL,
84608 + .name = "ax25_sendmsg",
84609 + .param = PARAM4,
84610 +};
84611 +
84612 +struct size_overflow_hash _000741_hash = {
84613 + .next = NULL,
84614 + .name = "b1_alloc_card",
84615 + .param = PARAM1,
84616 +};
84617 +
84618 +struct size_overflow_hash _000742_hash = {
84619 + .next = NULL,
84620 + .name = "b43_nphy_load_samples",
84621 + .param = PARAM3,
84622 +};
84623 +
84624 +struct size_overflow_hash _000743_hash = {
84625 + .next = NULL,
84626 + .name = "bio_copy_user_iov",
84627 + .param = PARAM4,
84628 +};
84629 +
84630 +struct size_overflow_hash _000744_hash = {
84631 + .next = NULL,
84632 + .name = "__bio_map_kern",
84633 + .param = PARAM2|PARAM3,
84634 +};
84635 +
84636 +struct size_overflow_hash _000746_hash = {
84637 + .next = NULL,
84638 + .name = "blk_register_region",
84639 + .param = PARAM1|PARAM2,
84640 +};
84641 +
84642 +struct size_overflow_hash _000748_hash = {
84643 + .next = NULL,
84644 + .name = "bm_entry_write",
84645 + .param = PARAM3,
84646 +};
84647 +
84648 +struct size_overflow_hash _000749_hash = {
84649 + .next = NULL,
84650 + .name = "bm_realloc_pages",
84651 + .param = PARAM2,
84652 +};
84653 +
84654 +struct size_overflow_hash _000750_hash = {
84655 + .next = &_000569_hash,
84656 + .name = "bm_register_write",
84657 + .param = PARAM3,
84658 +};
84659 +
84660 +struct size_overflow_hash _000751_hash = {
84661 + .next = NULL,
84662 + .name = "bm_status_write",
84663 + .param = PARAM3,
84664 +};
84665 +
84666 +struct size_overflow_hash _000752_hash = {
84667 + .next = NULL,
84668 + .name = "br_mdb_rehash",
84669 + .param = PARAM2,
84670 +};
84671 +
84672 +struct size_overflow_hash _000753_hash = {
84673 + .next = NULL,
84674 + .name = "btrfs_copy_from_user",
84675 + .param = PARAM3,
84676 +};
84677 +
84678 +struct size_overflow_hash _000754_hash = {
84679 + .next = NULL,
84680 + .name = "btrfs_insert_delayed_dir_index",
84681 + .param = PARAM4,
84682 +};
84683 +
84684 +struct size_overflow_hash _000755_hash = {
84685 + .next = NULL,
84686 + .name = "__btrfs_map_block",
84687 + .param = PARAM3,
84688 +};
84689 +
84690 +struct size_overflow_hash _000756_hash = {
84691 + .next = NULL,
84692 + .name = "__c4iw_init_resource_fifo",
84693 + .param = PARAM3,
84694 +};
84695 +
84696 +struct size_overflow_hash _000757_hash = {
84697 + .next = NULL,
84698 + .name = "cache_downcall",
84699 + .param = PARAM3,
84700 +};
84701 +
84702 +struct size_overflow_hash _000758_hash = {
84703 + .next = NULL,
84704 + .name = "cache_slow_downcall",
84705 + .param = PARAM2,
84706 +};
84707 +
84708 +struct size_overflow_hash _000759_hash = {
84709 + .next = NULL,
84710 + .name = "ca_extend",
84711 + .param = PARAM2,
84712 +};
84713 +
84714 +struct size_overflow_hash _000760_hash = {
84715 + .next = NULL,
84716 + .name = "caif_seqpkt_sendmsg",
84717 + .param = PARAM4,
84718 +};
84719 +
84720 +struct size_overflow_hash _000761_hash = {
84721 + .next = NULL,
84722 + .name = "caif_stream_sendmsg",
84723 + .param = PARAM4,
84724 +};
84725 +
84726 +struct size_overflow_hash _000762_hash = {
84727 + .next = NULL,
84728 + .name = "carl9170_cmd_buf",
84729 + .param = PARAM3,
84730 +};
84731 +
84732 +struct size_overflow_hash _000763_hash = {
84733 + .next = NULL,
84734 + .name = "cdev_add",
84735 + .param = PARAM2|PARAM3,
84736 +};
84737 +
84738 +struct size_overflow_hash _000765_hash = {
84739 + .next = NULL,
84740 + .name = "cdrom_read_cdda",
84741 + .param = PARAM4,
84742 +};
84743 +
84744 +struct size_overflow_hash _000766_hash = {
84745 + .next = NULL,
84746 + .name = "ceph_dns_resolve_name",
84747 + .param = PARAM1,
84748 +};
84749 +
84750 +struct size_overflow_hash _000767_hash = {
84751 + .next = NULL,
84752 + .name = "ceph_msgpool_get",
84753 + .param = PARAM2,
84754 +};
84755 +
84756 +struct size_overflow_hash _000768_hash = {
84757 + .next = NULL,
84758 + .name = "cfg80211_connect_result",
84759 + .param = PARAM4|PARAM6,
84760 +};
84761 +
84762 +struct size_overflow_hash _000770_hash = {
84763 + .next = NULL,
84764 + .name = "cfg80211_disconnected",
84765 + .param = PARAM4,
84766 +};
84767 +
84768 +struct size_overflow_hash _000771_hash = {
84769 + .next = NULL,
84770 + .name = "cfg80211_inform_bss",
84771 + .param = PARAM8,
84772 +};
84773 +
84774 +struct size_overflow_hash _000772_hash = {
84775 + .next = NULL,
84776 + .name = "cfg80211_inform_bss_frame",
84777 + .param = PARAM4,
84778 +};
84779 +
84780 +struct size_overflow_hash _000773_hash = {
84781 + .next = NULL,
84782 + .name = "cfg80211_mlme_register_mgmt",
84783 + .param = PARAM5,
84784 +};
84785 +
84786 +struct size_overflow_hash _000774_hash = {
84787 + .next = NULL,
84788 + .name = "cfg80211_roamed_bss",
84789 + .param = PARAM4|PARAM6,
84790 +};
84791 +
84792 +struct size_overflow_hash _000776_hash = {
84793 + .next = NULL,
84794 + .name = "cifs_readdata_alloc",
84795 + .param = PARAM1,
84796 +};
84797 +
84798 +struct size_overflow_hash _000777_hash = {
84799 + .next = NULL,
84800 + .name = "cifs_readv_from_socket",
84801 + .param = PARAM3,
84802 +};
84803 +
84804 +struct size_overflow_hash _000778_hash = {
84805 + .next = NULL,
84806 + .name = "cifs_writedata_alloc",
84807 + .param = PARAM1,
84808 +};
84809 +
84810 +struct size_overflow_hash _000779_hash = {
84811 + .next = NULL,
84812 + .name = "cnic_alloc_dma",
84813 + .param = PARAM3,
84814 +};
84815 +
84816 +struct size_overflow_hash _000780_hash = {
84817 + .next = NULL,
84818 + .name = "configfs_write_file",
84819 + .param = PARAM3,
84820 +};
84821 +
84822 +struct size_overflow_hash _000781_hash = {
84823 + .next = NULL,
84824 + .name = "construct_key",
84825 + .param = PARAM3,
84826 +};
84827 +
84828 +struct size_overflow_hash _000782_hash = {
84829 + .next = NULL,
84830 + .name = "context_alloc",
84831 + .param = PARAM3,
84832 +};
84833 +
84834 +struct size_overflow_hash _000783_hash = {
84835 + .next = NULL,
84836 + .name = "copy_to_user",
84837 + .param = PARAM3,
84838 +};
84839 +
84840 +struct size_overflow_hash _000784_hash = {
84841 + .next = NULL,
84842 + .name = "create_attr_set",
84843 + .param = PARAM1,
84844 +};
84845 +
84846 +struct size_overflow_hash _000785_hash = {
84847 + .next = NULL,
84848 + .name = "create_bounce_buffer",
84849 + .param = PARAM3,
84850 +};
84851 +
84852 +struct size_overflow_hash _000786_hash = {
84853 + .next = NULL,
84854 + .name = "create_gpadl_header",
84855 + .param = PARAM2,
84856 +};
84857 +
84858 +struct size_overflow_hash _000787_hash = {
84859 + .next = NULL,
84860 + .name = "_create_sg_bios",
84861 + .param = PARAM4,
84862 +};
84863 +
84864 +struct size_overflow_hash _000788_hash = {
84865 + .next = NULL,
84866 + .name = "cryptd_alloc_instance",
84867 + .param = PARAM2|PARAM3,
84868 +};
84869 +
84870 +struct size_overflow_hash _000790_hash = {
84871 + .next = NULL,
84872 + .name = "crypto_ahash_setkey",
84873 + .param = PARAM3,
84874 +};
84875 +
84876 +struct size_overflow_hash _000791_hash = {
84877 + .next = NULL,
84878 + .name = "crypto_alloc_instance2",
84879 + .param = PARAM3,
84880 +};
84881 +
84882 +struct size_overflow_hash _000792_hash = {
84883 + .next = NULL,
84884 + .name = "crypto_shash_setkey",
84885 + .param = PARAM3,
84886 +};
84887 +
84888 +struct size_overflow_hash _000793_hash = {
84889 + .next = NULL,
84890 + .name = "cx231xx_init_bulk",
84891 + .param = PARAM3|PARAM2,
84892 +};
84893 +
84894 +struct size_overflow_hash _000794_hash = {
84895 + .next = NULL,
84896 + .name = "cx231xx_init_isoc",
84897 + .param = PARAM2|PARAM3,
84898 +};
84899 +
84900 +struct size_overflow_hash _000796_hash = {
84901 + .next = NULL,
84902 + .name = "cx231xx_init_vbi_isoc",
84903 + .param = PARAM2|PARAM3,
84904 +};
84905 +
84906 +struct size_overflow_hash _000798_hash = {
84907 + .next = NULL,
84908 + .name = "cxgb_alloc_mem",
84909 + .param = PARAM1,
84910 +};
84911 +
84912 +struct size_overflow_hash _000799_hash = {
84913 + .next = NULL,
84914 + .name = "cxgbi_device_portmap_create",
84915 + .param = PARAM3,
84916 +};
84917 +
84918 +struct size_overflow_hash _000800_hash = {
84919 + .next = NULL,
84920 + .name = "cxgbi_device_register",
84921 + .param = PARAM1|PARAM2,
84922 +};
84923 +
84924 +struct size_overflow_hash _000802_hash = {
84925 + .next = NULL,
84926 + .name = "__cxio_init_resource_fifo",
84927 + .param = PARAM3,
84928 +};
84929 +
84930 +struct size_overflow_hash _000803_hash = {
84931 + .next = NULL,
84932 + .name = "dccp_sendmsg",
84933 + .param = PARAM4,
84934 +};
84935 +
84936 +struct size_overflow_hash _000804_hash = {
84937 + .next = NULL,
84938 + .name = "ddp_make_gl",
84939 + .param = PARAM1,
84940 +};
84941 +
84942 +struct size_overflow_hash _000805_hash = {
84943 + .next = NULL,
84944 + .name = "depth_write",
84945 + .param = PARAM3,
84946 +};
84947 +
84948 +struct size_overflow_hash _000806_hash = {
84949 + .next = NULL,
84950 + .name = "dev_irnet_write",
84951 + .param = PARAM3,
84952 +};
84953 +
84954 +struct size_overflow_hash _000807_hash = {
84955 + .next = NULL,
84956 + .name = "dev_set_alias",
84957 + .param = PARAM3,
84958 +};
84959 +
84960 +struct size_overflow_hash _000808_hash = {
84961 + .next = NULL,
84962 + .name = "dev_write",
84963 + .param = PARAM3,
84964 +};
84965 +
84966 +struct size_overflow_hash _000809_hash = {
84967 + .next = NULL,
84968 + .name = "dfs_global_file_write",
84969 + .param = PARAM3,
84970 +};
84971 +
84972 +struct size_overflow_hash _000810_hash = {
84973 + .next = NULL,
84974 + .name = "dgram_sendmsg",
84975 + .param = PARAM4,
84976 +};
84977 +
84978 +struct size_overflow_hash _000811_hash = {
84979 + .next = NULL,
84980 + .name = "disconnect",
84981 + .param = PARAM4,
84982 +};
84983 +
84984 +struct size_overflow_hash _000812_hash = {
84985 + .next = NULL,
84986 + .name = "dma_attach",
84987 + .param = PARAM6|PARAM7,
84988 +};
84989 +
84990 +struct size_overflow_hash _000814_hash = {
84991 + .next = NULL,
84992 + .name = "dn_sendmsg",
84993 + .param = PARAM4,
84994 +};
84995 +
84996 +struct size_overflow_hash _000815_hash = {
84997 + .next = NULL,
84998 + .name = "do_dccp_setsockopt",
84999 + .param = PARAM5,
85000 +};
85001 +
85002 +struct size_overflow_hash _000816_hash = {
85003 + .next = NULL,
85004 + .name = "do_jffs2_setxattr",
85005 + .param = PARAM5,
85006 +};
85007 +
85008 +struct size_overflow_hash _000817_hash = {
85009 + .next = NULL,
85010 + .name = "do_msgsnd",
85011 + .param = PARAM4,
85012 +};
85013 +
85014 +struct size_overflow_hash _000818_hash = {
85015 + .next = NULL,
85016 + .name = "do_raw_setsockopt",
85017 + .param = PARAM5,
85018 +};
85019 +
85020 +struct size_overflow_hash _000819_hash = {
85021 + .next = NULL,
85022 + .name = "do_readv_writev",
85023 + .param = PARAM4,
85024 +};
85025 +
85026 +struct size_overflow_hash _000820_hash = {
85027 + .next = NULL,
85028 + .name = "do_sync",
85029 + .param = PARAM1,
85030 +};
85031 +
85032 +struct size_overflow_hash _000821_hash = {
85033 + .next = NULL,
85034 + .name = "dup_array",
85035 + .param = PARAM3,
85036 +};
85037 +
85038 +struct size_overflow_hash _000822_hash = {
85039 + .next = NULL,
85040 + .name = "dvb_audio_write",
85041 + .param = PARAM3,
85042 +};
85043 +
85044 +struct size_overflow_hash _000823_hash = {
85045 + .next = NULL,
85046 + .name = "dvb_ca_en50221_init",
85047 + .param = PARAM4,
85048 +};
85049 +
85050 +struct size_overflow_hash _000824_hash = {
85051 + .next = NULL,
85052 + .name = "dvb_video_write",
85053 + .param = PARAM3,
85054 +};
85055 +
85056 +struct size_overflow_hash _000825_hash = {
85057 + .next = NULL,
85058 + .name = "econet_sendmsg",
85059 + .param = PARAM4,
85060 +};
85061 +
85062 +struct size_overflow_hash _000826_hash = {
85063 + .next = NULL,
85064 + .name = "ecryptfs_decode_and_decrypt_filename",
85065 + .param = PARAM5,
85066 +};
85067 +
85068 +struct size_overflow_hash _000827_hash = {
85069 + .next = NULL,
85070 + .name = "ecryptfs_encrypt_and_encode_filename",
85071 + .param = PARAM6,
85072 +};
85073 +
85074 +struct size_overflow_hash _000828_hash = {
85075 + .next = NULL,
85076 + .name = "ecryptfs_send_message_locked",
85077 + .param = PARAM2,
85078 +};
85079 +
85080 +struct size_overflow_hash _000829_hash = {
85081 + .next = NULL,
85082 + .name = "edac_device_alloc_ctl_info",
85083 + .param = PARAM1,
85084 +};
85085 +
85086 +struct size_overflow_hash _000830_hash = {
85087 + .next = NULL,
85088 + .name = "edac_mc_alloc",
85089 + .param = PARAM1,
85090 +};
85091 +
85092 +struct size_overflow_hash _000831_hash = {
85093 + .next = NULL,
85094 + .name = "edac_pci_alloc_ctl_info",
85095 + .param = PARAM1,
85096 +};
85097 +
85098 +struct size_overflow_hash _000832_hash = {
85099 + .next = NULL,
85100 + .name = "efivar_create_sysfs_entry",
85101 + .param = PARAM2,
85102 +};
85103 +
85104 +struct size_overflow_hash _000833_hash = {
85105 + .next = NULL,
85106 + .name = "em28xx_alloc_isoc",
85107 + .param = PARAM4,
85108 +};
85109 +
85110 +struct size_overflow_hash _000834_hash = {
85111 + .next = NULL,
85112 + .name = "enable_write",
85113 + .param = PARAM3,
85114 +};
85115 +
85116 +struct size_overflow_hash _000835_hash = {
85117 + .next = NULL,
85118 + .name = "enclosure_register",
85119 + .param = PARAM3,
85120 +};
85121 +
85122 +struct size_overflow_hash _000836_hash = {
85123 + .next = NULL,
85124 + .name = "ext4_kvzalloc",
85125 + .param = PARAM1,
85126 +};
85127 +
85128 +struct size_overflow_hash _000837_hash = {
85129 + .next = NULL,
85130 + .name = "extend_netdev_table",
85131 + .param = PARAM2,
85132 +};
85133 +
85134 +struct size_overflow_hash _000838_hash = {
85135 + .next = NULL,
85136 + .name = "__feat_register_sp",
85137 + .param = PARAM6,
85138 +};
85139 +
85140 +struct size_overflow_hash _000839_hash = {
85141 + .next = NULL,
85142 + .name = "__ffs_ep0_read_events",
85143 + .param = PARAM3,
85144 +};
85145 +
85146 +struct size_overflow_hash _000840_hash = {
85147 + .next = NULL,
85148 + .name = "ffs_ep0_write",
85149 + .param = PARAM3,
85150 +};
85151 +
85152 +struct size_overflow_hash _000841_hash = {
85153 + .next = NULL,
85154 + .name = "ffs_epfile_read",
85155 + .param = PARAM3,
85156 +};
85157 +
85158 +struct size_overflow_hash _000842_hash = {
85159 + .next = NULL,
85160 + .name = "ffs_epfile_write",
85161 + .param = PARAM3,
85162 +};
85163 +
85164 +struct size_overflow_hash _000843_hash = {
85165 + .next = NULL,
85166 + .name = "fib_info_hash_alloc",
85167 + .param = PARAM1,
85168 +};
85169 +
85170 +struct size_overflow_hash _000844_hash = {
85171 + .next = NULL,
85172 + .name = "fillonedir",
85173 + .param = PARAM3,
85174 +};
85175 +
85176 +struct size_overflow_hash _000845_hash = {
85177 + .next = NULL,
85178 + .name = "flexcop_device_kmalloc",
85179 + .param = PARAM1,
85180 +};
85181 +
85182 +struct size_overflow_hash _000846_hash = {
85183 + .next = NULL,
85184 + .name = "frame_alloc",
85185 + .param = PARAM4,
85186 +};
85187 +
85188 +struct size_overflow_hash _000847_hash = {
85189 + .next = NULL,
85190 + .name = "fw_node_create",
85191 + .param = PARAM2,
85192 +};
85193 +
85194 +struct size_overflow_hash _000848_hash = {
85195 + .next = NULL,
85196 + .name = "garmin_read_process",
85197 + .param = PARAM3,
85198 +};
85199 +
85200 +struct size_overflow_hash _000849_hash = {
85201 + .next = NULL,
85202 + .name = "garp_request_join",
85203 + .param = PARAM4,
85204 +};
85205 +
85206 +struct size_overflow_hash _000850_hash = {
85207 + .next = NULL,
85208 + .name = "get_derived_key",
85209 + .param = PARAM4,
85210 +};
85211 +
85212 +struct size_overflow_hash _000851_hash = {
85213 + .next = NULL,
85214 + .name = "get_entry",
85215 + .param = PARAM4,
85216 +};
85217 +
85218 +struct size_overflow_hash _000852_hash = {
85219 + .next = NULL,
85220 + .name = "get_free_de",
85221 + .param = PARAM2,
85222 +};
85223 +
85224 +struct size_overflow_hash _000853_hash = {
85225 + .next = NULL,
85226 + .name = "get_new_cssid",
85227 + .param = PARAM2,
85228 +};
85229 +
85230 +struct size_overflow_hash _000854_hash = {
85231 + .next = NULL,
85232 + .name = "getxattr",
85233 + .param = PARAM4,
85234 +};
85235 +
85236 +struct size_overflow_hash _000855_hash = {
85237 + .next = NULL,
85238 + .name = "gspca_dev_probe2",
85239 + .param = PARAM4,
85240 +};
85241 +
85242 +struct size_overflow_hash _000856_hash = {
85243 + .next = NULL,
85244 + .name = "hcd_alloc_coherent",
85245 + .param = PARAM5,
85246 +};
85247 +
85248 +struct size_overflow_hash _000857_hash = {
85249 + .next = NULL,
85250 + .name = "hci_sock_sendmsg",
85251 + .param = PARAM4,
85252 +};
85253 +
85254 +struct size_overflow_hash _000858_hash = {
85255 + .next = NULL,
85256 + .name = "hid_register_field",
85257 + .param = PARAM2|PARAM3,
85258 +};
85259 +
85260 +struct size_overflow_hash _000860_hash = {
85261 + .next = NULL,
85262 + .name = "hid_report_raw_event",
85263 + .param = PARAM4,
85264 +};
85265 +
85266 +struct size_overflow_hash _000861_hash = {
85267 + .next = NULL,
85268 + .name = "hpi_alloc_control_cache",
85269 + .param = PARAM1,
85270 +};
85271 +
85272 +struct size_overflow_hash _000862_hash = {
85273 + .next = NULL,
85274 + .name = "hugetlbfs_read_actor",
85275 + .param = PARAM2|PARAM5|PARAM4,
85276 +};
85277 +
85278 +struct size_overflow_hash _000865_hash = {
85279 + .next = NULL,
85280 + .name = "hvc_alloc",
85281 + .param = PARAM4,
85282 +};
85283 +
85284 +struct size_overflow_hash _000866_hash = {
85285 + .next = NULL,
85286 + .name = "__hwahc_dev_set_key",
85287 + .param = PARAM5,
85288 +};
85289 +
85290 +struct size_overflow_hash _000867_hash = {
85291 + .next = NULL,
85292 + .name = "i2400m_zrealloc_2x",
85293 + .param = PARAM3,
85294 +};
85295 +
85296 +struct size_overflow_hash _000868_hash = {
85297 + .next = NULL,
85298 + .name = "ib_alloc_device",
85299 + .param = PARAM1,
85300 +};
85301 +
85302 +struct size_overflow_hash _000869_hash = {
85303 + .next = NULL,
85304 + .name = "ib_create_send_mad",
85305 + .param = PARAM5,
85306 +};
85307 +
85308 +struct size_overflow_hash _000870_hash = {
85309 + .next = NULL,
85310 + .name = "ibmasm_new_command",
85311 + .param = PARAM2,
85312 +};
85313 +
85314 +struct size_overflow_hash _000871_hash = {
85315 + .next = NULL,
85316 + .name = "ib_send_cm_drep",
85317 + .param = PARAM3,
85318 +};
85319 +
85320 +struct size_overflow_hash _000872_hash = {
85321 + .next = NULL,
85322 + .name = "ib_send_cm_mra",
85323 + .param = PARAM4,
85324 +};
85325 +
85326 +struct size_overflow_hash _000873_hash = {
85327 + .next = NULL,
85328 + .name = "ib_send_cm_rtu",
85329 + .param = PARAM3,
85330 +};
85331 +
85332 +struct size_overflow_hash _000874_hash = {
85333 + .next = NULL,
85334 + .name = "ieee80211_key_alloc",
85335 + .param = PARAM3,
85336 +};
85337 +
85338 +struct size_overflow_hash _000875_hash = {
85339 + .next = NULL,
85340 + .name = "ieee80211_mgmt_tx",
85341 + .param = PARAM9,
85342 +};
85343 +
85344 +struct size_overflow_hash _000876_hash = {
85345 + .next = NULL,
85346 + .name = "ieee80211_send_probe_req",
85347 + .param = PARAM6,
85348 +};
85349 +
85350 +struct size_overflow_hash _000877_hash = {
85351 + .next = NULL,
85352 + .name = "if_writecmd",
85353 + .param = PARAM2,
85354 +};
85355 +
85356 +struct size_overflow_hash _000878_hash = {
85357 + .next = NULL,
85358 + .name = "init_bch",
85359 + .param = PARAM1|PARAM2,
85360 +};
85361 +
85362 +struct size_overflow_hash _000880_hash = {
85363 + .next = NULL,
85364 + .name = "init_ipath",
85365 + .param = PARAM1,
85366 +};
85367 +
85368 +struct size_overflow_hash _000881_hash = {
85369 + .next = NULL,
85370 + .name = "init_list_set",
85371 + .param = PARAM2|PARAM3,
85372 +};
85373 +
85374 +struct size_overflow_hash _000883_hash = {
85375 + .next = NULL,
85376 + .name = "init_q",
85377 + .param = PARAM4,
85378 +};
85379 +
85380 +struct size_overflow_hash _000884_hash = {
85381 + .next = NULL,
85382 + .name = "init_state",
85383 + .param = PARAM2,
85384 +};
85385 +
85386 +struct size_overflow_hash _000885_hash = {
85387 + .next = NULL,
85388 + .name = "init_tag_map",
85389 + .param = PARAM3,
85390 +};
85391 +
85392 +struct size_overflow_hash _000886_hash = {
85393 + .next = NULL,
85394 + .name = "input_ff_create",
85395 + .param = PARAM2,
85396 +};
85397 +
85398 +struct size_overflow_hash _000887_hash = {
85399 + .next = NULL,
85400 + .name = "input_mt_init_slots",
85401 + .param = PARAM2,
85402 +};
85403 +
85404 +struct size_overflow_hash _000888_hash = {
85405 + .next = NULL,
85406 + .name = "interfaces",
85407 + .param = PARAM2,
85408 +};
85409 +
85410 +struct size_overflow_hash _000889_hash = {
85411 + .next = NULL,
85412 + .name = "ioat2_alloc_ring",
85413 + .param = PARAM2,
85414 +};
85415 +
85416 +struct size_overflow_hash _000890_hash = {
85417 + .next = NULL,
85418 + .name = "ip_generic_getfrag",
85419 + .param = PARAM3|PARAM4,
85420 +};
85421 +
85422 +struct size_overflow_hash _000892_hash = {
85423 + .next = NULL,
85424 + .name = "ipr_alloc_ucode_buffer",
85425 + .param = PARAM1,
85426 +};
85427 +
85428 +struct size_overflow_hash _000893_hash = {
85429 + .next = NULL,
85430 + .name = "ip_set_alloc",
85431 + .param = PARAM1,
85432 +};
85433 +
85434 +struct size_overflow_hash _000894_hash = {
85435 + .next = NULL,
85436 + .name = "ipv6_flowlabel_opt",
85437 + .param = PARAM3,
85438 +};
85439 +
85440 +struct size_overflow_hash _000895_hash = {
85441 + .next = NULL,
85442 + .name = "ipv6_renew_options",
85443 + .param = PARAM5,
85444 +};
85445 +
85446 +struct size_overflow_hash _000896_hash = {
85447 + .next = NULL,
85448 + .name = "ipxrtr_route_packet",
85449 + .param = PARAM4,
85450 +};
85451 +
85452 +struct size_overflow_hash _000897_hash = {
85453 + .next = NULL,
85454 + .name = "irda_sendmsg",
85455 + .param = PARAM4,
85456 +};
85457 +
85458 +struct size_overflow_hash _000898_hash = {
85459 + .next = NULL,
85460 + .name = "irda_sendmsg_dgram",
85461 + .param = PARAM4,
85462 +};
85463 +
85464 +struct size_overflow_hash _000899_hash = {
85465 + .next = NULL,
85466 + .name = "irda_sendmsg_ultra",
85467 + .param = PARAM4,
85468 +};
85469 +
85470 +struct size_overflow_hash _000900_hash = {
85471 + .next = NULL,
85472 + .name = "irias_add_octseq_attrib",
85473 + .param = PARAM4,
85474 +};
85475 +
85476 +struct size_overflow_hash _000901_hash = {
85477 + .next = NULL,
85478 + .name = "irq_alloc_generic_chip",
85479 + .param = PARAM2,
85480 +};
85481 +
85482 +struct size_overflow_hash _000902_hash = {
85483 + .next = NULL,
85484 + .name = "irq_domain_add_linear",
85485 + .param = PARAM2,
85486 +};
85487 +
85488 +struct size_overflow_hash _000903_hash = {
85489 + .next = NULL,
85490 + .name = "iscsi_alloc_session",
85491 + .param = PARAM3,
85492 +};
85493 +
85494 +struct size_overflow_hash _000904_hash = {
85495 + .next = NULL,
85496 + .name = "iscsi_create_conn",
85497 + .param = PARAM2,
85498 +};
85499 +
85500 +struct size_overflow_hash _000905_hash = {
85501 + .next = NULL,
85502 + .name = "iscsi_create_endpoint",
85503 + .param = PARAM1,
85504 +};
85505 +
85506 +struct size_overflow_hash _000906_hash = {
85507 + .next = NULL,
85508 + .name = "iscsi_create_iface",
85509 + .param = PARAM5,
85510 +};
85511 +
85512 +struct size_overflow_hash _000907_hash = {
85513 + .next = NULL,
85514 + .name = "iscsi_decode_text_input",
85515 + .param = PARAM4,
85516 +};
85517 +
85518 +struct size_overflow_hash _000908_hash = {
85519 + .next = NULL,
85520 + .name = "iscsi_pool_init",
85521 + .param = PARAM2|PARAM4,
85522 +};
85523 +
85524 +struct size_overflow_hash _000910_hash = {
85525 + .next = NULL,
85526 + .name = "iscsit_dump_data_payload",
85527 + .param = PARAM2,
85528 +};
85529 +
85530 +struct size_overflow_hash _000911_hash = {
85531 + .next = NULL,
85532 + .name = "isdn_write",
85533 + .param = PARAM3,
85534 +};
85535 +
85536 +struct size_overflow_hash _000912_hash = {
85537 + .next = NULL,
85538 + .name = "isku_receive",
85539 + .param = PARAM4,
85540 +};
85541 +
85542 +struct size_overflow_hash _000913_hash = {
85543 + .next = NULL,
85544 + .name = "isku_send",
85545 + .param = PARAM4,
85546 +};
85547 +
85548 +struct size_overflow_hash _000914_hash = {
85549 + .next = NULL,
85550 + .name = "islpci_mgt_transaction",
85551 + .param = PARAM5,
85552 +};
85553 +
85554 +struct size_overflow_hash _000915_hash = {
85555 + .next = NULL,
85556 + .name = "iso_sched_alloc",
85557 + .param = PARAM1,
85558 +};
85559 +
85560 +struct size_overflow_hash _000916_hash = {
85561 + .next = NULL,
85562 + .name = "ivtv_v4l2_write",
85563 + .param = PARAM3,
85564 +};
85565 +
85566 +struct size_overflow_hash _000917_hash = {
85567 + .next = NULL,
85568 + .name = "iwl_trans_txq_alloc",
85569 + .param = PARAM3,
85570 +};
85571 +
85572 +struct size_overflow_hash _000918_hash = {
85573 + .next = NULL,
85574 + .name = "iwmct_fw_parser_init",
85575 + .param = PARAM4,
85576 +};
85577 +
85578 +struct size_overflow_hash _000919_hash = {
85579 + .next = NULL,
85580 + .name = "iwm_notif_send",
85581 + .param = PARAM6,
85582 +};
85583 +
85584 +struct size_overflow_hash _000920_hash = {
85585 + .next = NULL,
85586 + .name = "iwm_ntf_calib_res",
85587 + .param = PARAM3,
85588 +};
85589 +
85590 +struct size_overflow_hash _000921_hash = {
85591 + .next = NULL,
85592 + .name = "iwm_umac_set_config_var",
85593 + .param = PARAM4,
85594 +};
85595 +
85596 +struct size_overflow_hash _000922_hash = {
85597 + .next = NULL,
85598 + .name = "ixgbe_alloc_q_vector",
85599 + .param = PARAM3|PARAM5,
85600 +};
85601 +
85602 +struct size_overflow_hash _000924_hash = {
85603 + .next = NULL,
85604 + .name = "jbd2_journal_init_revoke",
85605 + .param = PARAM2,
85606 +};
85607 +
85608 +struct size_overflow_hash _000925_hash = {
85609 + .next = NULL,
85610 + .name = "jffs2_write_dirent",
85611 + .param = PARAM5,
85612 +};
85613 +
85614 +struct size_overflow_hash _000926_hash = {
85615 + .next = NULL,
85616 + .name = "journal_init_revoke",
85617 + .param = PARAM2,
85618 +};
85619 +
85620 +struct size_overflow_hash _000927_hash = {
85621 + .next = NULL,
85622 + .name = "keyctl_instantiate_key",
85623 + .param = PARAM3,
85624 +};
85625 +
85626 +struct size_overflow_hash _000928_hash = {
85627 + .next = NULL,
85628 + .name = "keyctl_instantiate_key_iov",
85629 + .param = PARAM3,
85630 +};
85631 +
85632 +struct size_overflow_hash _000929_hash = {
85633 + .next = NULL,
85634 + .name = "__kfifo_from_user",
85635 + .param = PARAM3,
85636 +};
85637 +
85638 +struct size_overflow_hash _000930_hash = {
85639 + .next = NULL,
85640 + .name = "kimage_crash_alloc",
85641 + .param = PARAM3,
85642 +};
85643 +
85644 +struct size_overflow_hash _000931_hash = {
85645 + .next = NULL,
85646 + .name = "kimage_normal_alloc",
85647 + .param = PARAM3,
85648 +};
85649 +
85650 +struct size_overflow_hash _000932_hash = {
85651 + .next = NULL,
85652 + .name = "kmem_realloc",
85653 + .param = PARAM2,
85654 +};
85655 +
85656 +struct size_overflow_hash _000933_hash = {
85657 + .next = NULL,
85658 + .name = "kmem_zalloc",
85659 + .param = PARAM1,
85660 +};
85661 +
85662 +struct size_overflow_hash _000934_hash = {
85663 + .next = NULL,
85664 + .name = "koneplus_send",
85665 + .param = PARAM4,
85666 +};
85667 +
85668 +struct size_overflow_hash _000935_hash = {
85669 + .next = NULL,
85670 + .name = "koneplus_sysfs_read",
85671 + .param = PARAM6,
85672 +};
85673 +
85674 +struct size_overflow_hash _000936_hash = {
85675 + .next = NULL,
85676 + .name = "kovaplus_send",
85677 + .param = PARAM4,
85678 +};
85679 +
85680 +struct size_overflow_hash _000937_hash = {
85681 + .next = NULL,
85682 + .name = "kvm_read_guest_page_mmu",
85683 + .param = PARAM6,
85684 +};
85685 +
85686 +struct size_overflow_hash _000938_hash = {
85687 + .next = NULL,
85688 + .name = "kvm_set_irq_routing",
85689 + .param = PARAM3,
85690 +};
85691 +
85692 +struct size_overflow_hash _000939_hash = {
85693 + .next = NULL,
85694 + .name = "kvm_write_guest_cached",
85695 + .param = PARAM4,
85696 +};
85697 +
85698 +struct size_overflow_hash _000940_hash = {
85699 + .next = NULL,
85700 + .name = "kvm_write_guest_page",
85701 + .param = PARAM5,
85702 +};
85703 +
85704 +struct size_overflow_hash _000941_hash = {
85705 + .next = NULL,
85706 + .name = "l2cap_skbuff_fromiovec",
85707 + .param = PARAM3|PARAM4,
85708 +};
85709 +
85710 +struct size_overflow_hash _000943_hash = {
85711 + .next = NULL,
85712 + .name = "l2tp_ip_sendmsg",
85713 + .param = PARAM4,
85714 +};
85715 +
85716 +struct size_overflow_hash _000944_hash = {
85717 + .next = NULL,
85718 + .name = "l2tp_session_create",
85719 + .param = PARAM1,
85720 +};
85721 +
85722 +struct size_overflow_hash _000945_hash = {
85723 + .next = NULL,
85724 + .name = "lc_create",
85725 + .param = PARAM3,
85726 +};
85727 +
85728 +struct size_overflow_hash _000946_hash = {
85729 + .next = NULL,
85730 + .name = "leaf_dealloc",
85731 + .param = PARAM3,
85732 +};
85733 +
85734 +struct size_overflow_hash _000947_hash = {
85735 + .next = NULL,
85736 + .name = "linear_conf",
85737 + .param = PARAM2,
85738 +};
85739 +
85740 +struct size_overflow_hash _000948_hash = {
85741 + .next = NULL,
85742 + .name = "lirc_buffer_init",
85743 + .param = PARAM2|PARAM3,
85744 +};
85745 +
85746 +struct size_overflow_hash _000950_hash = {
85747 + .next = NULL,
85748 + .name = "llc_ui_sendmsg",
85749 + .param = PARAM4,
85750 +};
85751 +
85752 +struct size_overflow_hash _000951_hash = {
85753 + .next = NULL,
85754 + .name = "lpfc_sli4_queue_alloc",
85755 + .param = PARAM3,
85756 +};
85757 +
85758 +struct size_overflow_hash _000952_hash = {
85759 + .next = NULL,
85760 + .name = "mce_request_packet",
85761 + .param = PARAM3,
85762 +};
85763 +
85764 +struct size_overflow_hash _000953_hash = {
85765 + .next = NULL,
85766 + .name = "mdiobus_alloc_size",
85767 + .param = PARAM1,
85768 +};
85769 +
85770 +struct size_overflow_hash _000954_hash = {
85771 + .next = NULL,
85772 + .name = "media_entity_init",
85773 + .param = PARAM2|PARAM4,
85774 +};
85775 +
85776 +struct size_overflow_hash _000956_hash = {
85777 + .next = NULL,
85778 + .name = "memstick_alloc_host",
85779 + .param = PARAM1,
85780 +};
85781 +
85782 +struct size_overflow_hash _000957_hash = {
85783 + .next = NULL,
85784 + .name = "mesh_table_alloc",
85785 + .param = PARAM1,
85786 +};
85787 +
85788 +struct size_overflow_hash _000958_hash = {
85789 + .next = NULL,
85790 + .name = "mfd_add_devices",
85791 + .param = PARAM4,
85792 +};
85793 +
85794 +struct size_overflow_hash _000959_hash = {
85795 + .next = NULL,
85796 + .name = "mISDN_sock_sendmsg",
85797 + .param = PARAM4,
85798 +};
85799 +
85800 +struct size_overflow_hash _000960_hash = {
85801 + .next = NULL,
85802 + .name = "mmc_alloc_host",
85803 + .param = PARAM1,
85804 +};
85805 +
85806 +struct size_overflow_hash _000961_hash = {
85807 + .next = NULL,
85808 + .name = "mmc_test_alloc_mem",
85809 + .param = PARAM3,
85810 +};
85811 +
85812 +struct size_overflow_hash _000962_hash = {
85813 + .next = NULL,
85814 + .name = "mpi_alloc",
85815 + .param = PARAM1,
85816 +};
85817 +
85818 +struct size_overflow_hash _000963_hash = {
85819 + .next = NULL,
85820 + .name = "mpihelp_mul_karatsuba_case",
85821 + .param = PARAM5|PARAM3,
85822 +};
85823 +
85824 +struct size_overflow_hash _000964_hash = {
85825 + .next = NULL,
85826 + .name = "mpihelp_mul_n",
85827 + .param = PARAM4,
85828 +};
85829 +
85830 +struct size_overflow_hash _000965_hash = {
85831 + .next = NULL,
85832 + .name = "mpi_set_bit",
85833 + .param = PARAM2,
85834 +};
85835 +
85836 +struct size_overflow_hash _000966_hash = {
85837 + .next = NULL,
85838 + .name = "mpi_set_highbit",
85839 + .param = PARAM2,
85840 +};
85841 +
85842 +struct size_overflow_hash _000967_hash = {
85843 + .next = NULL,
85844 + .name = "mtd_concat_create",
85845 + .param = PARAM2,
85846 +};
85847 +
85848 +struct size_overflow_hash _000968_hash = {
85849 + .next = NULL,
85850 + .name = "mvumi_alloc_mem_resource",
85851 + .param = PARAM3,
85852 +};
85853 +
85854 +struct size_overflow_hash _000969_hash = {
85855 + .next = NULL,
85856 + .name = "mwifiex_11n_create_rx_reorder_tbl",
85857 + .param = PARAM4,
85858 +};
85859 +
85860 +struct size_overflow_hash _000970_hash = {
85861 + .next = NULL,
85862 + .name = "mwifiex_alloc_sdio_mpa_buffers",
85863 + .param = PARAM2|PARAM3,
85864 +};
85865 +
85866 +struct size_overflow_hash _000972_hash = {
85867 + .next = NULL,
85868 + .name = "mwl8k_cmd_set_beacon",
85869 + .param = PARAM4,
85870 +};
85871 +
85872 +struct size_overflow_hash _000973_hash = {
85873 + .next = NULL,
85874 + .name = "neigh_hash_alloc",
85875 + .param = PARAM1,
85876 +};
85877 +
85878 +struct size_overflow_hash _000974_hash = {
85879 + .next = NULL,
85880 + .name = "netlink_sendmsg",
85881 + .param = PARAM4,
85882 +};
85883 +
85884 +struct size_overflow_hash _000975_hash = {
85885 + .next = NULL,
85886 + .name = "netxen_alloc_sds_rings",
85887 + .param = PARAM2,
85888 +};
85889 +
85890 +struct size_overflow_hash _000976_hash = {
85891 + .next = NULL,
85892 + .name = "new_bind_ctl",
85893 + .param = PARAM2,
85894 +};
85895 +
85896 +struct size_overflow_hash _000977_hash = {
85897 + .next = NULL,
85898 + .name = "new_dir",
85899 + .param = PARAM3,
85900 +};
85901 +
85902 +struct size_overflow_hash _000978_hash = {
85903 + .next = NULL,
85904 + .name = "new_tape_buffer",
85905 + .param = PARAM2,
85906 +};
85907 +
85908 +struct size_overflow_hash _000979_hash = {
85909 + .next = NULL,
85910 + .name = "nfc_llcp_build_tlv",
85911 + .param = PARAM3,
85912 +};
85913 +
85914 +struct size_overflow_hash _000980_hash = {
85915 + .next = NULL,
85916 + .name = "nfc_llcp_send_i_frame",
85917 + .param = PARAM3,
85918 +};
85919 +
85920 +struct size_overflow_hash _000981_hash = {
85921 + .next = NULL,
85922 + .name = "nfs4_alloc_slots",
85923 + .param = PARAM1,
85924 +};
85925 +
85926 +struct size_overflow_hash _000982_hash = {
85927 + .next = NULL,
85928 + .name = "nfsctl_transaction_write",
85929 + .param = PARAM3,
85930 +};
85931 +
85932 +struct size_overflow_hash _000983_hash = {
85933 + .next = NULL,
85934 + .name = "nfs_idmap_request_key",
85935 + .param = PARAM3,
85936 +};
85937 +
85938 +struct size_overflow_hash _000984_hash = {
85939 + .next = NULL,
85940 + .name = "nfs_readdata_alloc",
85941 + .param = PARAM1,
85942 +};
85943 +
85944 +struct size_overflow_hash _000985_hash = {
85945 + .next = NULL,
85946 + .name = "nfs_writedata_alloc",
85947 + .param = PARAM1,
85948 +};
85949 +
85950 +struct size_overflow_hash _000986_hash = {
85951 + .next = NULL,
85952 + .name = "nl_pid_hash_zalloc",
85953 + .param = PARAM1,
85954 +};
85955 +
85956 +struct size_overflow_hash _000987_hash = {
85957 + .next = NULL,
85958 + .name = "nr_sendmsg",
85959 + .param = PARAM4,
85960 +};
85961 +
85962 +struct size_overflow_hash _000988_hash = {
85963 + .next = NULL,
85964 + .name = "nsm_create_handle",
85965 + .param = PARAM4,
85966 +};
85967 +
85968 +struct size_overflow_hash _000989_hash = {
85969 + .next = NULL,
85970 + .name = "ntfs_copy_from_user_iovec",
85971 + .param = PARAM3|PARAM6,
85972 +};
85973 +
85974 +struct size_overflow_hash _000991_hash = {
85975 + .next = NULL,
85976 + .name = "ntfs_file_buffered_write",
85977 + .param = PARAM4|PARAM6,
85978 +};
85979 +
85980 +struct size_overflow_hash _000993_hash = {
85981 + .next = NULL,
85982 + .name = "__ntfs_malloc",
85983 + .param = PARAM1,
85984 +};
85985 +
85986 +struct size_overflow_hash _000994_hash = {
85987 + .next = NULL,
85988 + .name = "nvme_alloc_queue",
85989 + .param = PARAM3,
85990 +};
85991 +
85992 +struct size_overflow_hash _000995_hash = {
85993 + .next = NULL,
85994 + .name = "ocfs2_acl_from_xattr",
85995 + .param = PARAM2,
85996 +};
85997 +
85998 +struct size_overflow_hash _000996_hash = {
85999 + .next = NULL,
86000 + .name = "ocfs2_control_message",
86001 + .param = PARAM3,
86002 +};
86003 +
86004 +struct size_overflow_hash _000997_hash = {
86005 + .next = NULL,
86006 + .name = "opera1_usb_i2c_msgxfer",
86007 + .param = PARAM4,
86008 +};
86009 +
86010 +struct size_overflow_hash _000998_hash = {
86011 + .next = NULL,
86012 + .name = "_ore_get_io_state",
86013 + .param = PARAM3,
86014 +};
86015 +
86016 +struct size_overflow_hash _000999_hash = {
86017 + .next = NULL,
86018 + .name = "orig_hash_add_if",
86019 + .param = PARAM2,
86020 +};
86021 +
86022 +struct size_overflow_hash _001000_hash = {
86023 + .next = NULL,
86024 + .name = "orig_hash_del_if",
86025 + .param = PARAM2,
86026 +};
86027 +
86028 +struct size_overflow_hash _001001_hash = {
86029 + .next = NULL,
86030 + .name = "orinoco_set_key",
86031 + .param = PARAM5|PARAM7,
86032 +};
86033 +
86034 +struct size_overflow_hash _001003_hash = {
86035 + .next = NULL,
86036 + .name = "osdmap_set_max_osd",
86037 + .param = PARAM2,
86038 +};
86039 +
86040 +struct size_overflow_hash _001004_hash = {
86041 + .next = NULL,
86042 + .name = "_osd_realloc_seg",
86043 + .param = PARAM3,
86044 +};
86045 +
86046 +struct size_overflow_hash _001005_hash = {
86047 + .next = NULL,
86048 + .name = "OSDSetBlock",
86049 + .param = PARAM2|PARAM4,
86050 +};
86051 +
86052 +struct size_overflow_hash _001007_hash = {
86053 + .next = NULL,
86054 + .name = "osst_execute",
86055 + .param = PARAM7|PARAM6,
86056 +};
86057 +
86058 +struct size_overflow_hash _001008_hash = {
86059 + .next = NULL,
86060 + .name = "osst_write",
86061 + .param = PARAM3,
86062 +};
86063 +
86064 +struct size_overflow_hash _001009_hash = {
86065 + .next = NULL,
86066 + .name = "otp_read",
86067 + .param = PARAM2|PARAM5|PARAM4,
86068 +};
86069 +
86070 +struct size_overflow_hash _001012_hash = {
86071 + .next = NULL,
86072 + .name = "ovs_vport_alloc",
86073 + .param = PARAM1,
86074 +};
86075 +
86076 +struct size_overflow_hash _001013_hash = {
86077 + .next = NULL,
86078 + .name = "packet_sendmsg_spkt",
86079 + .param = PARAM4,
86080 +};
86081 +
86082 +struct size_overflow_hash _001014_hash = {
86083 + .next = NULL,
86084 + .name = "pair_device",
86085 + .param = PARAM4,
86086 +};
86087 +
86088 +struct size_overflow_hash _001015_hash = {
86089 + .next = NULL,
86090 + .name = "pccard_store_cis",
86091 + .param = PARAM6,
86092 +};
86093 +
86094 +struct size_overflow_hash _001016_hash = {
86095 + .next = NULL,
86096 + .name = "pci_add_cap_save_buffer",
86097 + .param = PARAM3,
86098 +};
86099 +
86100 +struct size_overflow_hash _001017_hash = {
86101 + .next = NULL,
86102 + .name = "pcnet32_realloc_rx_ring",
86103 + .param = PARAM3,
86104 +};
86105 +
86106 +struct size_overflow_hash _001018_hash = {
86107 + .next = NULL,
86108 + .name = "pcnet32_realloc_tx_ring",
86109 + .param = PARAM3,
86110 +};
86111 +
86112 +struct size_overflow_hash _001019_hash = {
86113 + .next = NULL,
86114 + .name = "pcpu_mem_zalloc",
86115 + .param = PARAM1,
86116 +};
86117 +
86118 +struct size_overflow_hash _001020_hash = {
86119 + .next = NULL,
86120 + .name = "pep_sendmsg",
86121 + .param = PARAM4,
86122 +};
86123 +
86124 +struct size_overflow_hash _001021_hash = {
86125 + .next = NULL,
86126 + .name = "pfkey_sendmsg",
86127 + .param = PARAM4,
86128 +};
86129 +
86130 +struct size_overflow_hash _001022_hash = {
86131 + .next = NULL,
86132 + .name = "pidlist_resize",
86133 + .param = PARAM2,
86134 +};
86135 +
86136 +struct size_overflow_hash _001023_hash = {
86137 + .next = NULL,
86138 + .name = "pin_code_reply",
86139 + .param = PARAM4,
86140 +};
86141 +
86142 +struct size_overflow_hash _001024_hash = {
86143 + .next = NULL,
86144 + .name = "ping_getfrag",
86145 + .param = PARAM3|PARAM4,
86146 +};
86147 +
86148 +struct size_overflow_hash _001026_hash = {
86149 + .next = NULL,
86150 + .name = "pipe_set_size",
86151 + .param = PARAM2,
86152 +};
86153 +
86154 +struct size_overflow_hash _001027_hash = {
86155 + .next = NULL,
86156 + .name = "pkt_bio_alloc",
86157 + .param = PARAM1,
86158 +};
86159 +
86160 +struct size_overflow_hash _001028_hash = {
86161 + .next = NULL,
86162 + .name = "platform_create_bundle",
86163 + .param = PARAM4|PARAM6,
86164 +};
86165 +
86166 +struct size_overflow_hash _001030_hash = {
86167 + .next = NULL,
86168 + .name = "play_iframe",
86169 + .param = PARAM3,
86170 +};
86171 +
86172 +struct size_overflow_hash _001031_hash = {
86173 + .next = NULL,
86174 + .name = "pm8001_store_update_fw",
86175 + .param = PARAM4,
86176 +};
86177 +
86178 +struct size_overflow_hash _001032_hash = {
86179 + .next = NULL,
86180 + .name = "pmcraid_alloc_sglist",
86181 + .param = PARAM1,
86182 +};
86183 +
86184 +struct size_overflow_hash _001033_hash = {
86185 + .next = NULL,
86186 + .name = "pn533_dep_link_up",
86187 + .param = PARAM5,
86188 +};
86189 +
86190 +struct size_overflow_hash _001034_hash = {
86191 + .next = NULL,
86192 + .name = "pnp_alloc",
86193 + .param = PARAM1,
86194 +};
86195 +
86196 +struct size_overflow_hash _001035_hash = {
86197 + .next = NULL,
86198 + .name = "pn_sendmsg",
86199 + .param = PARAM4,
86200 +};
86201 +
86202 +struct size_overflow_hash _001036_hash = {
86203 + .next = NULL,
86204 + .name = "pppoe_sendmsg",
86205 + .param = PARAM4,
86206 +};
86207 +
86208 +struct size_overflow_hash _001037_hash = {
86209 + .next = NULL,
86210 + .name = "pppol2tp_sendmsg",
86211 + .param = PARAM4,
86212 +};
86213 +
86214 +struct size_overflow_hash _001038_hash = {
86215 + .next = NULL,
86216 + .name = "process_vm_rw",
86217 + .param = PARAM3|PARAM5,
86218 +};
86219 +
86220 +struct size_overflow_hash _001040_hash = {
86221 + .next = NULL,
86222 + .name = "process_vm_rw_single_vec",
86223 + .param = PARAM1|PARAM2,
86224 +};
86225 +
86226 +struct size_overflow_hash _001042_hash = {
86227 + .next = NULL,
86228 + .name = "proc_write",
86229 + .param = PARAM3,
86230 +};
86231 +
86232 +struct size_overflow_hash _001043_hash = {
86233 + .next = NULL,
86234 + .name = "profile_load",
86235 + .param = PARAM3,
86236 +};
86237 +
86238 +struct size_overflow_hash _001044_hash = {
86239 + .next = NULL,
86240 + .name = "profile_remove",
86241 + .param = PARAM3,
86242 +};
86243 +
86244 +struct size_overflow_hash _001045_hash = {
86245 + .next = NULL,
86246 + .name = "profile_replace",
86247 + .param = PARAM3,
86248 +};
86249 +
86250 +struct size_overflow_hash _001046_hash = {
86251 + .next = NULL,
86252 + .name = "pscsi_get_bio",
86253 + .param = PARAM1,
86254 +};
86255 +
86256 +struct size_overflow_hash _001047_hash = {
86257 + .next = NULL,
86258 + .name = "pyra_send",
86259 + .param = PARAM4,
86260 +};
86261 +
86262 +struct size_overflow_hash _001048_hash = {
86263 + .next = NULL,
86264 + .name = "qc_capture",
86265 + .param = PARAM3,
86266 +};
86267 +
86268 +struct size_overflow_hash _001049_hash = {
86269 + .next = NULL,
86270 + .name = "qla4xxx_alloc_work",
86271 + .param = PARAM2,
86272 +};
86273 +
86274 +struct size_overflow_hash _001050_hash = {
86275 + .next = NULL,
86276 + .name = "qlcnic_alloc_msix_entries",
86277 + .param = PARAM2,
86278 +};
86279 +
86280 +struct size_overflow_hash _001051_hash = {
86281 + .next = NULL,
86282 + .name = "qlcnic_alloc_sds_rings",
86283 + .param = PARAM2,
86284 +};
86285 +
86286 +struct size_overflow_hash _001052_hash = {
86287 + .next = NULL,
86288 + .name = "queue_received_packet",
86289 + .param = PARAM5,
86290 +};
86291 +
86292 +struct size_overflow_hash _001053_hash = {
86293 + .next = NULL,
86294 + .name = "raw_send_hdrinc",
86295 + .param = PARAM4,
86296 +};
86297 +
86298 +struct size_overflow_hash _001054_hash = {
86299 + .next = &_000022_hash,
86300 + .name = "raw_sendmsg",
86301 + .param = PARAM4,
86302 +};
86303 +
86304 +struct size_overflow_hash _001055_hash = {
86305 + .next = NULL,
86306 + .name = "rawsock_sendmsg",
86307 + .param = PARAM4,
86308 +};
86309 +
86310 +struct size_overflow_hash _001056_hash = {
86311 + .next = NULL,
86312 + .name = "rawv6_send_hdrinc",
86313 + .param = PARAM3,
86314 +};
86315 +
86316 +struct size_overflow_hash _001057_hash = {
86317 + .next = NULL,
86318 + .name = "rb_alloc",
86319 + .param = PARAM1,
86320 +};
86321 +
86322 +struct size_overflow_hash _001058_hash = {
86323 + .next = NULL,
86324 + .name = "rbd_alloc_coll",
86325 + .param = PARAM1,
86326 +};
86327 +
86328 +struct size_overflow_hash _001059_hash = {
86329 + .next = NULL,
86330 + .name = "rbd_create_rw_ops",
86331 + .param = PARAM2,
86332 +};
86333 +
86334 +struct size_overflow_hash _001060_hash = {
86335 + .next = NULL,
86336 + .name = "rds_ib_inc_copy_to_user",
86337 + .param = PARAM3,
86338 +};
86339 +
86340 +struct size_overflow_hash _001061_hash = {
86341 + .next = NULL,
86342 + .name = "rds_iw_inc_copy_to_user",
86343 + .param = PARAM3,
86344 +};
86345 +
86346 +struct size_overflow_hash _001062_hash = {
86347 + .next = NULL,
86348 + .name = "rds_message_alloc",
86349 + .param = PARAM1,
86350 +};
86351 +
86352 +struct size_overflow_hash _001063_hash = {
86353 + .next = NULL,
86354 + .name = "rds_message_copy_from_user",
86355 + .param = PARAM3,
86356 +};
86357 +
86358 +struct size_overflow_hash _001064_hash = {
86359 + .next = NULL,
86360 + .name = "rds_message_inc_copy_to_user",
86361 + .param = PARAM3,
86362 +};
86363 +
86364 +struct size_overflow_hash _001065_hash = {
86365 + .next = NULL,
86366 + .name = "redrat3_transmit_ir",
86367 + .param = PARAM3,
86368 +};
86369 +
86370 +struct size_overflow_hash _001066_hash = {
86371 + .next = NULL,
86372 + .name = "regcache_rbtree_insert_to_block",
86373 + .param = PARAM5,
86374 +};
86375 +
86376 +struct size_overflow_hash _001067_hash = {
86377 + .next = NULL,
86378 + .name = "_regmap_raw_write",
86379 + .param = PARAM4,
86380 +};
86381 +
86382 +struct size_overflow_hash _001068_hash = {
86383 + .next = NULL,
86384 + .name = "regmap_register_patch",
86385 + .param = PARAM3,
86386 +};
86387 +
86388 +struct size_overflow_hash _001069_hash = {
86389 + .next = NULL,
86390 + .name = "relay_alloc_page_array",
86391 + .param = PARAM1,
86392 +};
86393 +
86394 +struct size_overflow_hash _001070_hash = {
86395 + .next = NULL,
86396 + .name = "remove_uuid",
86397 + .param = PARAM4,
86398 +};
86399 +
86400 +struct size_overflow_hash _001071_hash = {
86401 + .next = NULL,
86402 + .name = "reshape_ring",
86403 + .param = PARAM2,
86404 +};
86405 +
86406 +struct size_overflow_hash _001072_hash = {
86407 + .next = NULL,
86408 + .name = "RESIZE_IF_NEEDED",
86409 + .param = PARAM2,
86410 +};
86411 +
86412 +struct size_overflow_hash _001073_hash = {
86413 + .next = NULL,
86414 + .name = "resize_stripes",
86415 + .param = PARAM2,
86416 +};
86417 +
86418 +struct size_overflow_hash _001074_hash = {
86419 + .next = NULL,
86420 + .name = "rfcomm_sock_sendmsg",
86421 + .param = PARAM4,
86422 +};
86423 +
86424 +struct size_overflow_hash _001075_hash = {
86425 + .next = NULL,
86426 + .name = "rose_sendmsg",
86427 + .param = PARAM4,
86428 +};
86429 +
86430 +struct size_overflow_hash _001076_hash = {
86431 + .next = NULL,
86432 + .name = "rxrpc_send_data",
86433 + .param = PARAM5,
86434 +};
86435 +
86436 +struct size_overflow_hash _001077_hash = {
86437 + .next = NULL,
86438 + .name = "rxrpc_setsockopt",
86439 + .param = PARAM5,
86440 +};
86441 +
86442 +struct size_overflow_hash _001078_hash = {
86443 + .next = NULL,
86444 + .name = "saa7146_vmalloc_build_pgtable",
86445 + .param = PARAM2,
86446 +};
86447 +
86448 +struct size_overflow_hash _001079_hash = {
86449 + .next = NULL,
86450 + .name = "saa7164_buffer_alloc_user",
86451 + .param = PARAM2,
86452 +};
86453 +
86454 +struct size_overflow_hash _001081_hash = {
86455 + .next = NULL,
86456 + .name = "sco_send_frame",
86457 + .param = PARAM3,
86458 +};
86459 +
86460 +struct size_overflow_hash _001082_hash = {
86461 + .next = NULL,
86462 + .name = "scsi_host_alloc",
86463 + .param = PARAM2,
86464 +};
86465 +
86466 +struct size_overflow_hash _001083_hash = {
86467 + .next = NULL,
86468 + .name = "scsi_tgt_kspace_exec",
86469 + .param = PARAM8,
86470 +};
86471 +
86472 +struct size_overflow_hash _001084_hash = {
86473 + .next = NULL,
86474 + .name = "sctp_sendmsg",
86475 + .param = PARAM4,
86476 +};
86477 +
86478 +struct size_overflow_hash _001085_hash = {
86479 + .next = NULL,
86480 + .name = "sctp_setsockopt",
86481 + .param = PARAM5,
86482 +};
86483 +
86484 +struct size_overflow_hash _001086_hash = {
86485 + .next = NULL,
86486 + .name = "sctp_setsockopt_connectx",
86487 + .param = PARAM3,
86488 +};
86489 +
86490 +struct size_overflow_hash _001087_hash = {
86491 + .next = NULL,
86492 + .name = "sctp_setsockopt_connectx_old",
86493 + .param = PARAM3,
86494 +};
86495 +
86496 +struct size_overflow_hash _001088_hash = {
86497 + .next = NULL,
86498 + .name = "sctp_tsnmap_init",
86499 + .param = PARAM2,
86500 +};
86501 +
86502 +struct size_overflow_hash _001089_hash = {
86503 + .next = NULL,
86504 + .name = "sctp_user_addto_chunk",
86505 + .param = PARAM2|PARAM3,
86506 +};
86507 +
86508 +struct size_overflow_hash _001091_hash = {
86509 + .next = NULL,
86510 + .name = "security_context_to_sid",
86511 + .param = PARAM2,
86512 +};
86513 +
86514 +struct size_overflow_hash _001092_hash = {
86515 + .next = NULL,
86516 + .name = "security_context_to_sid_default",
86517 + .param = PARAM2,
86518 +};
86519 +
86520 +struct size_overflow_hash _001093_hash = {
86521 + .next = NULL,
86522 + .name = "security_context_to_sid_force",
86523 + .param = PARAM2,
86524 +};
86525 +
86526 +struct size_overflow_hash _001094_hash = {
86527 + .next = NULL,
86528 + .name = "selinux_transaction_write",
86529 + .param = PARAM3,
86530 +};
86531 +
86532 +struct size_overflow_hash _001095_hash = {
86533 + .next = NULL,
86534 + .name = "sel_write_access",
86535 + .param = PARAM3,
86536 +};
86537 +
86538 +struct size_overflow_hash _001096_hash = {
86539 + .next = NULL,
86540 + .name = "sel_write_create",
86541 + .param = PARAM3,
86542 +};
86543 +
86544 +struct size_overflow_hash _001097_hash = {
86545 + .next = NULL,
86546 + .name = "sel_write_member",
86547 + .param = PARAM3,
86548 +};
86549 +
86550 +struct size_overflow_hash _001098_hash = {
86551 + .next = NULL,
86552 + .name = "sel_write_relabel",
86553 + .param = PARAM3,
86554 +};
86555 +
86556 +struct size_overflow_hash _001099_hash = {
86557 + .next = NULL,
86558 + .name = "sel_write_user",
86559 + .param = PARAM3,
86560 +};
86561 +
86562 +struct size_overflow_hash _001100_hash = {
86563 + .next = NULL,
86564 + .name = "__seq_open_private",
86565 + .param = PARAM3,
86566 +};
86567 +
86568 +struct size_overflow_hash _001101_hash = {
86569 + .next = NULL,
86570 + .name = "serverworks_create_gatt_pages",
86571 + .param = PARAM1,
86572 +};
86573 +
86574 +struct size_overflow_hash _001102_hash = {
86575 + .next = NULL,
86576 + .name = "set_connectable",
86577 + .param = PARAM4,
86578 +};
86579 +
86580 +struct size_overflow_hash _001103_hash = {
86581 + .next = NULL,
86582 + .name = "set_dev_class",
86583 + .param = PARAM4,
86584 +};
86585 +
86586 +struct size_overflow_hash _001104_hash = {
86587 + .next = NULL,
86588 + .name = "set_discoverable",
86589 + .param = PARAM4,
86590 +};
86591 +
86592 +struct size_overflow_hash _001105_hash = {
86593 + .next = NULL,
86594 + .name = "setkey",
86595 + .param = PARAM3,
86596 +};
86597 +
86598 +struct size_overflow_hash _001106_hash = {
86599 + .next = NULL,
86600 + .name = "set_le",
86601 + .param = PARAM4,
86602 +};
86603 +
86604 +struct size_overflow_hash _001107_hash = {
86605 + .next = NULL,
86606 + .name = "set_link_security",
86607 + .param = PARAM4,
86608 +};
86609 +
86610 +struct size_overflow_hash _001108_hash = {
86611 + .next = NULL,
86612 + .name = "set_local_name",
86613 + .param = PARAM4,
86614 +};
86615 +
86616 +struct size_overflow_hash _001109_hash = {
86617 + .next = NULL,
86618 + .name = "set_powered",
86619 + .param = PARAM4,
86620 +};
86621 +
86622 +struct size_overflow_hash _001110_hash = {
86623 + .next = NULL,
86624 + .name = "set_ssp",
86625 + .param = PARAM4,
86626 +};
86627 +
86628 +struct size_overflow_hash _001111_hash = {
86629 + .next = &_000305_hash,
86630 + .name = "sg_build_sgat",
86631 + .param = PARAM3,
86632 +};
86633 +
86634 +struct size_overflow_hash _001112_hash = {
86635 + .next = NULL,
86636 + .name = "sg_read_oxfer",
86637 + .param = PARAM3,
86638 +};
86639 +
86640 +struct size_overflow_hash _001113_hash = {
86641 + .next = NULL,
86642 + .name = "shmem_xattr_set",
86643 + .param = PARAM4,
86644 +};
86645 +
86646 +struct size_overflow_hash _001114_hash = {
86647 + .next = NULL,
86648 + .name = "simple_alloc_urb",
86649 + .param = PARAM3,
86650 +};
86651 +
86652 +struct size_overflow_hash _001115_hash = {
86653 + .next = NULL,
86654 + .name = "sisusb_send_bridge_packet",
86655 + .param = PARAM2,
86656 +};
86657 +
86658 +struct size_overflow_hash _001116_hash = {
86659 + .next = NULL,
86660 + .name = "sisusb_send_packet",
86661 + .param = PARAM2,
86662 +};
86663 +
86664 +struct size_overflow_hash _001117_hash = {
86665 + .next = NULL,
86666 + .name = "skb_add_data_nocache",
86667 + .param = PARAM4,
86668 +};
86669 +
86670 +struct size_overflow_hash _001118_hash = {
86671 + .next = NULL,
86672 + .name = "skb_copy_datagram_from_iovec",
86673 + .param = PARAM2|PARAM5|PARAM4,
86674 +};
86675 +
86676 +struct size_overflow_hash _001121_hash = {
86677 + .next = NULL,
86678 + .name = "skb_copy_to_page_nocache",
86679 + .param = PARAM6,
86680 +};
86681 +
86682 +struct size_overflow_hash _001122_hash = {
86683 + .next = NULL,
86684 + .name = "sk_chk_filter",
86685 + .param = PARAM2,
86686 +};
86687 +
86688 +struct size_overflow_hash _001123_hash = {
86689 + .next = NULL,
86690 + .name = "skcipher_sendmsg",
86691 + .param = PARAM4,
86692 +};
86693 +
86694 +struct size_overflow_hash _001124_hash = {
86695 + .next = NULL,
86696 + .name = "sl_change_mtu",
86697 + .param = PARAM2,
86698 +};
86699 +
86700 +struct size_overflow_hash _001125_hash = {
86701 + .next = &_000894_hash,
86702 + .name = "slhc_init",
86703 + .param = PARAM1|PARAM2,
86704 +};
86705 +
86706 +struct size_overflow_hash _001127_hash = {
86707 + .next = NULL,
86708 + .name = "sm501_create_subdev",
86709 + .param = PARAM3|PARAM4,
86710 +};
86711 +
86712 +struct size_overflow_hash _001129_hash = {
86713 + .next = NULL,
86714 + .name = "smk_write_access",
86715 + .param = PARAM3,
86716 +};
86717 +
86718 +struct size_overflow_hash _001130_hash = {
86719 + .next = NULL,
86720 + .name = "snapshot_write",
86721 + .param = PARAM3,
86722 +};
86723 +
86724 +struct size_overflow_hash _001131_hash = {
86725 + .next = NULL,
86726 + .name = "snd_ac97_pcm_assign",
86727 + .param = PARAM2,
86728 +};
86729 +
86730 +struct size_overflow_hash _001132_hash = {
86731 + .next = NULL,
86732 + .name = "snd_card_create",
86733 + .param = PARAM4,
86734 +};
86735 +
86736 +struct size_overflow_hash _001133_hash = {
86737 + .next = NULL,
86738 + .name = "snd_emux_create_port",
86739 + .param = PARAM3,
86740 +};
86741 +
86742 +struct size_overflow_hash _001134_hash = {
86743 + .next = NULL,
86744 + .name = "snd_gus_dram_write",
86745 + .param = PARAM4,
86746 +};
86747 +
86748 +struct size_overflow_hash _001135_hash = {
86749 + .next = NULL,
86750 + .name = "snd_midi_channel_alloc_set",
86751 + .param = PARAM1,
86752 +};
86753 +
86754 +struct size_overflow_hash _001136_hash = {
86755 + .next = NULL,
86756 + .name = "_snd_pcm_lib_alloc_vmalloc_buffer",
86757 + .param = PARAM2,
86758 +};
86759 +
86760 +struct size_overflow_hash _001137_hash = {
86761 + .next = NULL,
86762 + .name = "snd_pcm_oss_sync1",
86763 + .param = PARAM2,
86764 +};
86765 +
86766 +struct size_overflow_hash _001138_hash = {
86767 + .next = NULL,
86768 + .name = "snd_pcm_oss_write",
86769 + .param = PARAM3,
86770 +};
86771 +
86772 +struct size_overflow_hash _001139_hash = {
86773 + .next = NULL,
86774 + .name = "snd_pcm_plugin_build",
86775 + .param = PARAM5,
86776 +};
86777 +
86778 +struct size_overflow_hash _001140_hash = {
86779 + .next = NULL,
86780 + .name = "snd_rawmidi_kernel_write",
86781 + .param = PARAM3,
86782 +};
86783 +
86784 +struct size_overflow_hash _001141_hash = {
86785 + .next = NULL,
86786 + .name = "snd_rawmidi_write",
86787 + .param = PARAM3,
86788 +};
86789 +
86790 +struct size_overflow_hash _001142_hash = {
86791 + .next = NULL,
86792 + .name = "snd_rme32_playback_copy",
86793 + .param = PARAM5,
86794 +};
86795 +
86796 +struct size_overflow_hash _001143_hash = {
86797 + .next = NULL,
86798 + .name = "snd_rme96_playback_copy",
86799 + .param = PARAM5,
86800 +};
86801 +
86802 +struct size_overflow_hash _001144_hash = {
86803 + .next = NULL,
86804 + .name = "snd_seq_device_new",
86805 + .param = PARAM4,
86806 +};
86807 +
86808 +struct size_overflow_hash _001145_hash = {
86809 + .next = NULL,
86810 + .name = "snd_seq_oss_readq_new",
86811 + .param = PARAM2,
86812 +};
86813 +
86814 +struct size_overflow_hash _001146_hash = {
86815 + .next = NULL,
86816 + .name = "snd_vx_create",
86817 + .param = PARAM4,
86818 +};
86819 +
86820 +struct size_overflow_hash _001147_hash = {
86821 + .next = NULL,
86822 + .name = "sock_setsockopt",
86823 + .param = PARAM5,
86824 +};
86825 +
86826 +struct size_overflow_hash _001148_hash = {
86827 + .next = NULL,
86828 + .name = "sound_write",
86829 + .param = PARAM3,
86830 +};
86831 +
86832 +struct size_overflow_hash _001149_hash = {
86833 + .next = NULL,
86834 + .name = "_sp2d_alloc",
86835 + .param = PARAM1,
86836 +};
86837 +
86838 +struct size_overflow_hash _001150_hash = {
86839 + .next = NULL,
86840 + .name = "spi_alloc_master",
86841 + .param = PARAM2,
86842 +};
86843 +
86844 +struct size_overflow_hash _001151_hash = {
86845 + .next = NULL,
86846 + .name = "spidev_message",
86847 + .param = PARAM3,
86848 +};
86849 +
86850 +struct size_overflow_hash _001152_hash = {
86851 + .next = NULL,
86852 + .name = "spi_register_board_info",
86853 + .param = PARAM2,
86854 +};
86855 +
86856 +struct size_overflow_hash _001153_hash = {
86857 + .next = NULL,
86858 + .name = "squashfs_cache_init",
86859 + .param = PARAM2,
86860 +};
86861 +
86862 +struct size_overflow_hash _001154_hash = {
86863 + .next = NULL,
86864 + .name = "squashfs_read_data",
86865 + .param = PARAM6,
86866 +};
86867 +
86868 +struct size_overflow_hash _001155_hash = {
86869 + .next = NULL,
86870 + .name = "srp_alloc_iu",
86871 + .param = PARAM2,
86872 +};
86873 +
86874 +struct size_overflow_hash _001156_hash = {
86875 + .next = NULL,
86876 + .name = "srp_iu_pool_alloc",
86877 + .param = PARAM2,
86878 +};
86879 +
86880 +struct size_overflow_hash _001157_hash = {
86881 + .next = NULL,
86882 + .name = "srp_ring_alloc",
86883 + .param = PARAM2,
86884 +};
86885 +
86886 +struct size_overflow_hash _001159_hash = {
86887 + .next = NULL,
86888 + .name = "start_isoc_chain",
86889 + .param = PARAM2,
86890 +};
86891 +
86892 +struct size_overflow_hash _001160_hash = {
86893 + .next = NULL,
86894 + .name = "stk_prepare_sio_buffers",
86895 + .param = PARAM2,
86896 +};
86897 +
86898 +struct size_overflow_hash _001161_hash = {
86899 + .next = NULL,
86900 + .name = "store_iwmct_log_level",
86901 + .param = PARAM4,
86902 +};
86903 +
86904 +struct size_overflow_hash _001162_hash = {
86905 + .next = NULL,
86906 + .name = "store_iwmct_log_level_fw",
86907 + .param = PARAM4,
86908 +};
86909 +
86910 +struct size_overflow_hash _001163_hash = {
86911 + .next = NULL,
86912 + .name = "st_write",
86913 + .param = PARAM3,
86914 +};
86915 +
86916 +struct size_overflow_hash _001164_hash = {
86917 + .next = NULL,
86918 + .name = "svc_pool_map_alloc_arrays",
86919 + .param = PARAM2,
86920 +};
86921 +
86922 +struct size_overflow_hash _001165_hash = {
86923 + .next = NULL,
86924 + .name = "symtab_init",
86925 + .param = PARAM2,
86926 +};
86927 +
86928 +struct size_overflow_hash _001166_hash = {
86929 + .next = NULL,
86930 + .name = "sys_bind",
86931 + .param = PARAM3,
86932 +};
86933 +
86934 +struct size_overflow_hash _001167_hash = {
86935 + .next = NULL,
86936 + .name = "sys_connect",
86937 + .param = PARAM3,
86938 +};
86939 +
86940 +struct size_overflow_hash _001168_hash = {
86941 + .next = NULL,
86942 + .name = "sys_flistxattr",
86943 + .param = PARAM3,
86944 +};
86945 +
86946 +struct size_overflow_hash _001169_hash = {
86947 + .next = NULL,
86948 + .name = "sys_fsetxattr",
86949 + .param = PARAM4,
86950 +};
86951 +
86952 +struct size_overflow_hash _001170_hash = {
86953 + .next = NULL,
86954 + .name = "sysfs_write_file",
86955 + .param = PARAM3,
86956 +};
86957 +
86958 +struct size_overflow_hash _001171_hash = {
86959 + .next = NULL,
86960 + .name = "sys_ipc",
86961 + .param = PARAM3,
86962 +};
86963 +
86964 +struct size_overflow_hash _001172_hash = {
86965 + .next = &_000974_hash,
86966 + .name = "sys_keyctl",
86967 + .param = PARAM4,
86968 +};
86969 +
86970 +struct size_overflow_hash _001173_hash = {
86971 + .next = NULL,
86972 + .name = "sys_listxattr",
86973 + .param = PARAM3,
86974 +};
86975 +
86976 +struct size_overflow_hash _001174_hash = {
86977 + .next = NULL,
86978 + .name = "sys_llistxattr",
86979 + .param = PARAM3,
86980 +};
86981 +
86982 +struct size_overflow_hash _001175_hash = {
86983 + .next = NULL,
86984 + .name = "sys_lsetxattr",
86985 + .param = PARAM4,
86986 +};
86987 +
86988 +struct size_overflow_hash _001176_hash = {
86989 + .next = NULL,
86990 + .name = "sys_mq_timedsend",
86991 + .param = PARAM3,
86992 +};
86993 +
86994 +struct size_overflow_hash _001177_hash = {
86995 + .next = NULL,
86996 + .name = "sys_sched_setaffinity",
86997 + .param = PARAM2,
86998 +};
86999 +
87000 +struct size_overflow_hash _001178_hash = {
87001 + .next = NULL,
87002 + .name = "sys_semop",
87003 + .param = PARAM3,
87004 +};
87005 +
87006 +struct size_overflow_hash _001179_hash = {
87007 + .next = NULL,
87008 + .name = "sys_sendto",
87009 + .param = PARAM6,
87010 +};
87011 +
87012 +struct size_overflow_hash _001180_hash = {
87013 + .next = NULL,
87014 + .name = "sys_setxattr",
87015 + .param = PARAM4,
87016 +};
87017 +
87018 +struct size_overflow_hash _001181_hash = {
87019 + .next = NULL,
87020 + .name = "t4_alloc_mem",
87021 + .param = PARAM1,
87022 +};
87023 +
87024 +struct size_overflow_hash _001182_hash = {
87025 + .next = NULL,
87026 + .name = "tcf_hash_create",
87027 + .param = PARAM4,
87028 +};
87029 +
87030 +struct size_overflow_hash _001183_hash = {
87031 + .next = NULL,
87032 + .name = "__team_options_register",
87033 + .param = PARAM3,
87034 +};
87035 +
87036 +struct size_overflow_hash _001184_hash = {
87037 + .next = NULL,
87038 + .name = "test_unaligned_bulk",
87039 + .param = PARAM3,
87040 +};
87041 +
87042 +struct size_overflow_hash _001185_hash = {
87043 + .next = NULL,
87044 + .name = "tifm_alloc_adapter",
87045 + .param = PARAM1,
87046 +};
87047 +
87048 +struct size_overflow_hash _001186_hash = {
87049 + .next = NULL,
87050 + .name = "timeout_write",
87051 + .param = PARAM3,
87052 +};
87053 +
87054 +struct size_overflow_hash _001187_hash = {
87055 + .next = NULL,
87056 + .name = "tipc_link_send_sections_fast",
87057 + .param = PARAM4,
87058 +};
87059 +
87060 +struct size_overflow_hash _001188_hash = {
87061 + .next = NULL,
87062 + .name = "tipc_subseq_alloc",
87063 + .param = PARAM1,
87064 +};
87065 +
87066 +struct size_overflow_hash _001189_hash = {
87067 + .next = NULL,
87068 + .name = "tm6000_read_write_usb",
87069 + .param = PARAM7,
87070 +};
87071 +
87072 +struct size_overflow_hash _001190_hash = {
87073 + .next = NULL,
87074 + .name = "tnode_alloc",
87075 + .param = PARAM1,
87076 +};
87077 +
87078 +struct size_overflow_hash _001191_hash = {
87079 + .next = NULL,
87080 + .name = "tomoyo_commit_ok",
87081 + .param = PARAM2,
87082 +};
87083 +
87084 +struct size_overflow_hash _001192_hash = {
87085 + .next = NULL,
87086 + .name = "tomoyo_scan_bprm",
87087 + .param = PARAM2|PARAM4,
87088 +};
87089 +
87090 +struct size_overflow_hash _001194_hash = {
87091 + .next = NULL,
87092 + .name = "tps65910_i2c_write",
87093 + .param = PARAM3,
87094 +};
87095 +
87096 +struct size_overflow_hash _001195_hash = {
87097 + .next = NULL,
87098 + .name = "ts_write",
87099 + .param = PARAM3,
87100 +};
87101 +
87102 +struct size_overflow_hash _001196_hash = {
87103 + .next = NULL,
87104 + .name = "ttusb2_msg",
87105 + .param = PARAM4,
87106 +};
87107 +
87108 +struct size_overflow_hash _001197_hash = {
87109 + .next = NULL,
87110 + .name = "tty_write",
87111 + .param = PARAM3,
87112 +};
87113 +
87114 +struct size_overflow_hash _001198_hash = {
87115 + .next = NULL,
87116 + .name = "ubi_dbg_check_all_ff",
87117 + .param = PARAM4,
87118 +};
87119 +
87120 +struct size_overflow_hash _001199_hash = {
87121 + .next = NULL,
87122 + .name = "ubi_dbg_check_write",
87123 + .param = PARAM5,
87124 +};
87125 +
87126 +struct size_overflow_hash _001200_hash = {
87127 + .next = NULL,
87128 + .name = "ubifs_setxattr",
87129 + .param = PARAM4,
87130 +};
87131 +
87132 +struct size_overflow_hash _001201_hash = {
87133 + .next = NULL,
87134 + .name = "udf_sb_alloc_partition_maps",
87135 + .param = PARAM2,
87136 +};
87137 +
87138 +struct size_overflow_hash _001202_hash = {
87139 + .next = NULL,
87140 + .name = "udplite_getfrag",
87141 + .param = PARAM3|PARAM4,
87142 +};
87143 +
87144 +struct size_overflow_hash _001204_hash = {
87145 + .next = NULL,
87146 + .name = "ulong_write_file",
87147 + .param = PARAM3,
87148 +};
87149 +
87150 +struct size_overflow_hash _001205_hash = {
87151 + .next = NULL,
87152 + .name = "unix_dgram_sendmsg",
87153 + .param = PARAM4,
87154 +};
87155 +
87156 +struct size_overflow_hash _001206_hash = {
87157 + .next = NULL,
87158 + .name = "unix_stream_sendmsg",
87159 + .param = PARAM4,
87160 +};
87161 +
87162 +struct size_overflow_hash _001207_hash = {
87163 + .next = NULL,
87164 + .name = "unlink_queued",
87165 + .param = PARAM3|PARAM4,
87166 +};
87167 +
87168 +struct size_overflow_hash _001208_hash = {
87169 + .next = NULL,
87170 + .name = "update_pmkid",
87171 + .param = PARAM4,
87172 +};
87173 +
87174 +struct size_overflow_hash _001209_hash = {
87175 + .next = NULL,
87176 + .name = "usb_alloc_coherent",
87177 + .param = PARAM2,
87178 +};
87179 +
87180 +struct size_overflow_hash _001210_hash = {
87181 + .next = NULL,
87182 + .name = "uvc_alloc_buffers",
87183 + .param = PARAM2,
87184 +};
87185 +
87186 +struct size_overflow_hash _001211_hash = {
87187 + .next = NULL,
87188 + .name = "uvc_alloc_entity",
87189 + .param = PARAM3,
87190 +};
87191 +
87192 +struct size_overflow_hash _001212_hash = {
87193 + .next = NULL,
87194 + .name = "v4l2_ctrl_new",
87195 + .param = PARAM7,
87196 +};
87197 +
87198 +struct size_overflow_hash _001213_hash = {
87199 + .next = NULL,
87200 + .name = "v4l2_event_subscribe",
87201 + .param = PARAM3,
87202 +};
87203 +
87204 +struct size_overflow_hash _001214_hash = {
87205 + .next = NULL,
87206 + .name = "vb2_read",
87207 + .param = PARAM3,
87208 +};
87209 +
87210 +struct size_overflow_hash _001215_hash = {
87211 + .next = NULL,
87212 + .name = "vb2_write",
87213 + .param = PARAM3,
87214 +};
87215 +
87216 +struct size_overflow_hash _001216_hash = {
87217 + .next = NULL,
87218 + .name = "vc_resize",
87219 + .param = PARAM2|PARAM3,
87220 +};
87221 +
87222 +struct size_overflow_hash _001218_hash = {
87223 + .next = NULL,
87224 + .name = "__vhost_add_used_n",
87225 + .param = PARAM3,
87226 +};
87227 +
87228 +struct size_overflow_hash _001219_hash = {
87229 + .next = NULL,
87230 + .name = "__videobuf_alloc_vb",
87231 + .param = PARAM1,
87232 +};
87233 +
87234 +struct size_overflow_hash _001220_hash = {
87235 + .next = NULL,
87236 + .name = "videobuf_dma_init_kernel",
87237 + .param = PARAM3,
87238 +};
87239 +
87240 +struct size_overflow_hash _001221_hash = {
87241 + .next = NULL,
87242 + .name = "virtqueue_add_buf",
87243 + .param = PARAM3|PARAM4,
87244 +};
87245 +
87246 +struct size_overflow_hash _001223_hash = {
87247 + .next = NULL,
87248 + .name = "vmalloc",
87249 + .param = PARAM1,
87250 +};
87251 +
87252 +struct size_overflow_hash _001224_hash = {
87253 + .next = NULL,
87254 + .name = "vmalloc_to_sg",
87255 + .param = PARAM2,
87256 +};
87257 +
87258 +struct size_overflow_hash _001225_hash = {
87259 + .next = NULL,
87260 + .name = "vol_cdev_write",
87261 + .param = PARAM3,
87262 +};
87263 +
87264 +struct size_overflow_hash _001226_hash = {
87265 + .next = NULL,
87266 + .name = "vxge_device_register",
87267 + .param = PARAM4,
87268 +};
87269 +
87270 +struct size_overflow_hash _001227_hash = {
87271 + .next = NULL,
87272 + .name = "__vxge_hw_channel_allocate",
87273 + .param = PARAM3,
87274 +};
87275 +
87276 +struct size_overflow_hash _001228_hash = {
87277 + .next = NULL,
87278 + .name = "vzalloc",
87279 + .param = PARAM1,
87280 +};
87281 +
87282 +struct size_overflow_hash _001229_hash = {
87283 + .next = NULL,
87284 + .name = "vzalloc_node",
87285 + .param = PARAM1,
87286 +};
87287 +
87288 +struct size_overflow_hash _001230_hash = {
87289 + .next = NULL,
87290 + .name = "wa_nep_queue",
87291 + .param = PARAM2,
87292 +};
87293 +
87294 +struct size_overflow_hash _001231_hash = {
87295 + .next = NULL,
87296 + .name = "__wa_xfer_setup_segs",
87297 + .param = PARAM2,
87298 +};
87299 +
87300 +struct size_overflow_hash _001232_hash = {
87301 + .next = NULL,
87302 + .name = "wiphy_new",
87303 + .param = PARAM2,
87304 +};
87305 +
87306 +struct size_overflow_hash _001233_hash = {
87307 + .next = NULL,
87308 + .name = "wpan_phy_alloc",
87309 + .param = PARAM1,
87310 +};
87311 +
87312 +struct size_overflow_hash _001234_hash = {
87313 + .next = NULL,
87314 + .name = "wusb_ccm_mac",
87315 + .param = PARAM7,
87316 +};
87317 +
87318 +struct size_overflow_hash _001235_hash = {
87319 + .next = NULL,
87320 + .name = "x25_sendmsg",
87321 + .param = PARAM4,
87322 +};
87323 +
87324 +struct size_overflow_hash _001236_hash = {
87325 + .next = NULL,
87326 + .name = "xfrm_hash_alloc",
87327 + .param = PARAM1,
87328 +};
87329 +
87330 +struct size_overflow_hash _001237_hash = {
87331 + .next = NULL,
87332 + .name = "_xfs_buf_get_pages",
87333 + .param = PARAM2,
87334 +};
87335 +
87336 +struct size_overflow_hash _001238_hash = {
87337 + .next = NULL,
87338 + .name = "xfs_da_buf_make",
87339 + .param = PARAM1,
87340 +};
87341 +
87342 +struct size_overflow_hash _001239_hash = {
87343 + .next = NULL,
87344 + .name = "xfs_da_grow_inode_int",
87345 + .param = PARAM3,
87346 +};
87347 +
87348 +struct size_overflow_hash _001240_hash = {
87349 + .next = NULL,
87350 + .name = "xfs_dir_cilookup_result",
87351 + .param = PARAM3,
87352 +};
87353 +
87354 +struct size_overflow_hash _001241_hash = {
87355 + .next = NULL,
87356 + .name = "xfs_iext_add_indirect_multi",
87357 + .param = PARAM3,
87358 +};
87359 +
87360 +struct size_overflow_hash _001242_hash = {
87361 + .next = NULL,
87362 + .name = "xfs_iext_inline_to_direct",
87363 + .param = PARAM2,
87364 +};
87365 +
87366 +struct size_overflow_hash _001243_hash = {
87367 + .next = NULL,
87368 + .name = "xfs_iroot_realloc",
87369 + .param = PARAM2,
87370 +};
87371 +
87372 +struct size_overflow_hash _001244_hash = {
87373 + .next = NULL,
87374 + .name = "xhci_alloc_stream_info",
87375 + .param = PARAM3,
87376 +};
87377 +
87378 +struct size_overflow_hash _001245_hash = {
87379 + .next = NULL,
87380 + .name = "xlog_recover_add_to_trans",
87381 + .param = PARAM4,
87382 +};
87383 +
87384 +struct size_overflow_hash _001246_hash = {
87385 + .next = NULL,
87386 + .name = "xprt_alloc",
87387 + .param = PARAM2,
87388 +};
87389 +
87390 +struct size_overflow_hash _001247_hash = {
87391 + .next = NULL,
87392 + .name = "xt_alloc_table_info",
87393 + .param = PARAM1,
87394 +};
87395 +
87396 +struct size_overflow_hash _001248_hash = {
87397 + .next = NULL,
87398 + .name = "_zd_iowrite32v_async_locked",
87399 + .param = PARAM3,
87400 +};
87401 +
87402 +struct size_overflow_hash _001249_hash = {
87403 + .next = NULL,
87404 + .name = "zd_usb_iowrite16v",
87405 + .param = PARAM3,
87406 +};
87407 +
87408 +struct size_overflow_hash _001250_hash = {
87409 + .next = NULL,
87410 + .name = "acpi_ds_build_internal_package_obj",
87411 + .param = PARAM3,
87412 +};
87413 +
87414 +struct size_overflow_hash _001251_hash = {
87415 + .next = NULL,
87416 + .name = "acpi_system_read_event",
87417 + .param = PARAM3,
87418 +};
87419 +
87420 +struct size_overflow_hash _001252_hash = {
87421 + .next = NULL,
87422 + .name = "acpi_ut_create_buffer_object",
87423 + .param = PARAM1,
87424 +};
87425 +
87426 +struct size_overflow_hash _001253_hash = {
87427 + .next = NULL,
87428 + .name = "acpi_ut_create_package_object",
87429 + .param = PARAM1,
87430 +};
87431 +
87432 +struct size_overflow_hash _001254_hash = {
87433 + .next = NULL,
87434 + .name = "acpi_ut_create_string_object",
87435 + .param = PARAM1,
87436 +};
87437 +
87438 +struct size_overflow_hash _001255_hash = {
87439 + .next = NULL,
87440 + .name = "ad7879_spi_multi_read",
87441 + .param = PARAM3,
87442 +};
87443 +
87444 +struct size_overflow_hash _001256_hash = {
87445 + .next = NULL,
87446 + .name = "add_child",
87447 + .param = PARAM4,
87448 +};
87449 +
87450 +struct size_overflow_hash _001257_hash = {
87451 + .next = NULL,
87452 + .name = "add_port",
87453 + .param = PARAM2,
87454 +};
87455 +
87456 +struct size_overflow_hash _001258_hash = {
87457 + .next = NULL,
87458 + .name = "adu_read",
87459 + .param = PARAM3,
87460 +};
87461 +
87462 +struct size_overflow_hash _001259_hash = {
87463 + .next = NULL,
87464 + .name = "afs_cell_create",
87465 + .param = PARAM2,
87466 +};
87467 +
87468 +struct size_overflow_hash _001260_hash = {
87469 + .next = NULL,
87470 + .name = "agp_generic_alloc_user",
87471 + .param = PARAM1,
87472 +};
87473 +
87474 +struct size_overflow_hash _001261_hash = {
87475 + .next = NULL,
87476 + .name = "alloc_agpphysmem_i8xx",
87477 + .param = PARAM1,
87478 +};
87479 +
87480 +struct size_overflow_hash _001262_hash = {
87481 + .next = NULL,
87482 + .name = "allocate_cnodes",
87483 + .param = PARAM1,
87484 +};
87485 +
87486 +struct size_overflow_hash _001263_hash = {
87487 + .next = NULL,
87488 + .name = "___alloc_bootmem",
87489 + .param = PARAM1,
87490 +};
87491 +
87492 +struct size_overflow_hash _001264_hash = {
87493 + .next = NULL,
87494 + .name = "__alloc_bootmem_nopanic",
87495 + .param = PARAM1,
87496 +};
87497 +
87498 +struct size_overflow_hash _001265_hash = {
87499 + .next = NULL,
87500 + .name = "alloc_bulk_urbs_generic",
87501 + .param = PARAM5,
87502 +};
87503 +
87504 +struct size_overflow_hash _001266_hash = {
87505 + .next = NULL,
87506 + .name = "alloc_candev",
87507 + .param = PARAM1|PARAM2,
87508 +};
87509 +
87510 +struct size_overflow_hash _001268_hash = {
87511 + .next = NULL,
87512 + .name = "____alloc_ei_netdev",
87513 + .param = PARAM1,
87514 +};
87515 +
87516 +struct size_overflow_hash _001269_hash = {
87517 + .next = NULL,
87518 + .name = "alloc_etherdev_mqs",
87519 + .param = PARAM1,
87520 +};
87521 +
87522 +struct size_overflow_hash _001270_hash = {
87523 + .next = NULL,
87524 + .name = "alloc_extent_buffer",
87525 + .param = PARAM3,
87526 +};
87527 +
87528 +struct size_overflow_hash _001271_hash = {
87529 + .next = NULL,
87530 + .name = "alloc_fcdev",
87531 + .param = PARAM1,
87532 +};
87533 +
87534 +struct size_overflow_hash _001272_hash = {
87535 + .next = NULL,
87536 + .name = "alloc_fddidev",
87537 + .param = PARAM1,
87538 +};
87539 +
87540 +struct size_overflow_hash _001273_hash = {
87541 + .next = NULL,
87542 + .name = "alloc_hippi_dev",
87543 + .param = PARAM1,
87544 +};
87545 +
87546 +struct size_overflow_hash _001274_hash = {
87547 + .next = NULL,
87548 + .name = "alloc_irdadev",
87549 + .param = PARAM1,
87550 +};
87551 +
87552 +struct size_overflow_hash _001275_hash = {
87553 + .next = NULL,
87554 + .name = "alloc_ltalkdev",
87555 + .param = PARAM1,
87556 +};
87557 +
87558 +struct size_overflow_hash _001276_hash = {
87559 + .next = NULL,
87560 + .name = "alloc_one_pg_vec_page",
87561 + .param = PARAM1,
87562 +};
87563 +
87564 +struct size_overflow_hash _001277_hash = {
87565 + .next = NULL,
87566 + .name = "alloc_orinocodev",
87567 + .param = PARAM1,
87568 +};
87569 +
87570 +struct size_overflow_hash _001279_hash = {
87571 + .next = NULL,
87572 + .name = "alloc_trdev",
87573 + .param = PARAM1,
87574 +};
87575 +
87576 +struct size_overflow_hash _001280_hash = {
87577 + .next = NULL,
87578 + .name = "async_setkey",
87579 + .param = PARAM3,
87580 +};
87581 +
87582 +struct size_overflow_hash _001281_hash = {
87583 + .next = NULL,
87584 + .name = "ata_host_alloc_pinfo",
87585 + .param = PARAM3,
87586 +};
87587 +
87588 +struct size_overflow_hash _001284_hash = {
87589 + .next = NULL,
87590 + .name = "ath6kl_connect_event",
87591 + .param = PARAM7|PARAM9|PARAM8,
87592 +};
87593 +
87594 +struct size_overflow_hash _001285_hash = {
87595 + .next = NULL,
87596 + .name = "ath6kl_fwlog_block_read",
87597 + .param = PARAM3,
87598 +};
87599 +
87600 +struct size_overflow_hash _001286_hash = {
87601 + .next = NULL,
87602 + .name = "ath6kl_fwlog_read",
87603 + .param = PARAM3,
87604 +};
87605 +
87606 +struct size_overflow_hash _001287_hash = {
87607 + .next = NULL,
87608 + .name = "ath_rx_init",
87609 + .param = PARAM2,
87610 +};
87611 +
87612 +struct size_overflow_hash _001288_hash = {
87613 + .next = NULL,
87614 + .name = "ath_tx_init",
87615 + .param = PARAM2,
87616 +};
87617 +
87618 +struct size_overflow_hash _001289_hash = {
87619 + .next = NULL,
87620 + .name = "atm_get_addr",
87621 + .param = PARAM3,
87622 +};
87623 +
87624 +struct size_overflow_hash _001290_hash = {
87625 + .next = NULL,
87626 + .name = "av7110_ipack_init",
87627 + .param = PARAM2,
87628 +};
87629 +
87630 +struct size_overflow_hash _001291_hash = {
87631 + .next = NULL,
87632 + .name = "bdx_rxdb_create",
87633 + .param = PARAM1,
87634 +};
87635 +
87636 +struct size_overflow_hash _001292_hash = {
87637 + .next = NULL,
87638 + .name = "bdx_tx_db_init",
87639 + .param = PARAM2,
87640 +};
87641 +
87642 +struct size_overflow_hash _001293_hash = {
87643 + .next = NULL,
87644 + .name = "bio_map_kern",
87645 + .param = PARAM3,
87646 +};
87647 +
87648 +struct size_overflow_hash _001294_hash = {
87649 + .next = NULL,
87650 + .name = "bits_to_user",
87651 + .param = PARAM3,
87652 +};
87653 +
87654 +struct size_overflow_hash _001295_hash = {
87655 + .next = NULL,
87656 + .name = "__blk_queue_init_tags",
87657 + .param = PARAM2,
87658 +};
87659 +
87660 +struct size_overflow_hash _001296_hash = {
87661 + .next = NULL,
87662 + .name = "blk_queue_resize_tags",
87663 + .param = PARAM2,
87664 +};
87665 +
87666 +struct size_overflow_hash _001297_hash = {
87667 + .next = NULL,
87668 + .name = "blk_rq_map_user_iov",
87669 + .param = PARAM5,
87670 +};
87671 +
87672 +struct size_overflow_hash _001298_hash = {
87673 + .next = NULL,
87674 + .name = "bm_init",
87675 + .param = PARAM2,
87676 +};
87677 +
87678 +struct size_overflow_hash _001299_hash = {
87679 + .next = NULL,
87680 + .name = "brcmf_alloc_wdev",
87681 + .param = PARAM1,
87682 +};
87683 +
87684 +struct size_overflow_hash _001300_hash = {
87685 + .next = NULL,
87686 + .name = "btrfs_insert_dir_item",
87687 + .param = PARAM4,
87688 +};
87689 +
87690 +struct size_overflow_hash _001301_hash = {
87691 + .next = NULL,
87692 + .name = "btrfs_map_block",
87693 + .param = PARAM3,
87694 +};
87695 +
87696 +struct size_overflow_hash _001302_hash = {
87697 + .next = NULL,
87698 + .name = "c4_add_card",
87699 + .param = PARAM3,
87700 +};
87701 +
87702 +struct size_overflow_hash _001303_hash = {
87703 + .next = NULL,
87704 + .name = "cache_read",
87705 + .param = PARAM3,
87706 +};
87707 +
87708 +struct size_overflow_hash _001304_hash = {
87709 + .next = NULL,
87710 + .name = "cache_write",
87711 + .param = PARAM3,
87712 +};
87713 +
87714 +struct size_overflow_hash _001305_hash = {
87715 + .next = NULL,
87716 + .name = "calc_hmac",
87717 + .param = PARAM3,
87718 +};
87719 +
87720 +struct size_overflow_hash _001306_hash = {
87721 + .next = NULL,
87722 + .name = "ccid_getsockopt_builtin_ccids",
87723 + .param = PARAM2,
87724 +};
87725 +
87726 +struct size_overflow_hash _001307_hash = {
87727 + .next = NULL,
87728 + .name = "ceph_copy_page_vector_to_user",
87729 + .param = PARAM4,
87730 +};
87731 +
87732 +struct size_overflow_hash _001308_hash = {
87733 + .next = NULL,
87734 + .name = "ceph_read_dir",
87735 + .param = PARAM3,
87736 +};
87737 +
87738 +struct size_overflow_hash _001309_hash = {
87739 + .next = NULL,
87740 + .name = "cfg80211_roamed",
87741 + .param = PARAM5|PARAM7,
87742 +};
87743 +
87744 +struct size_overflow_hash _001311_hash = {
87745 + .next = NULL,
87746 + .name = "ci_ll_init",
87747 + .param = PARAM3,
87748 +};
87749 +
87750 +struct size_overflow_hash _001312_hash = {
87751 + .next = NULL,
87752 + .name = "coda_psdev_read",
87753 + .param = PARAM3,
87754 +};
87755 +
87756 +struct size_overflow_hash _001313_hash = {
87757 + .next = NULL,
87758 + .name = "construct_key_and_link",
87759 + .param = PARAM4,
87760 +};
87761 +
87762 +struct size_overflow_hash _001314_hash = {
87763 + .next = NULL,
87764 + .name = "copy_counters_to_user",
87765 + .param = PARAM5,
87766 +};
87767 +
87768 +struct size_overflow_hash _001315_hash = {
87769 + .next = NULL,
87770 + .name = "copy_entries_to_user",
87771 + .param = PARAM1,
87772 +};
87773 +
87774 +struct size_overflow_hash _001316_hash = {
87775 + .next = NULL,
87776 + .name = "copy_from_buf",
87777 + .param = PARAM4,
87778 +};
87779 +
87780 +struct size_overflow_hash _001317_hash = {
87781 + .next = NULL,
87782 + .name = "copy_oldmem_page",
87783 + .param = PARAM3,
87784 +};
87785 +
87786 +struct size_overflow_hash _001318_hash = {
87787 + .next = NULL,
87788 + .name = "copy_to_user_fromio",
87789 + .param = PARAM3,
87790 +};
87791 +
87792 +struct size_overflow_hash _001319_hash = {
87793 + .next = NULL,
87794 + .name = "cryptd_hash_setkey",
87795 + .param = PARAM3,
87796 +};
87797 +
87798 +struct size_overflow_hash _001320_hash = {
87799 + .next = NULL,
87800 + .name = "crypto_authenc_esn_setkey",
87801 + .param = PARAM3,
87802 +};
87803 +
87804 +struct size_overflow_hash _001321_hash = {
87805 + .next = NULL,
87806 + .name = "crypto_authenc_setkey",
87807 + .param = PARAM3,
87808 +};
87809 +
87810 +struct size_overflow_hash _001322_hash = {
87811 + .next = NULL,
87812 + .name = "cx18_copy_buf_to_user",
87813 + .param = PARAM4,
87814 +};
87815 +
87816 +struct size_overflow_hash _001324_hash = {
87817 + .next = NULL,
87818 + .name = "cxgbi_ddp_reserve",
87819 + .param = PARAM4,
87820 +};
87821 +
87822 +struct size_overflow_hash _001325_hash = {
87823 + .next = NULL,
87824 + .name = "datablob_hmac_append",
87825 + .param = PARAM3,
87826 +};
87827 +
87828 +struct size_overflow_hash _001326_hash = {
87829 + .next = NULL,
87830 + .name = "datablob_hmac_verify",
87831 + .param = PARAM4,
87832 +};
87833 +
87834 +struct size_overflow_hash _001327_hash = {
87835 + .next = NULL,
87836 + .name = "dataflash_read_fact_otp",
87837 + .param = PARAM3|PARAM2,
87838 +};
87839 +
87840 +struct size_overflow_hash _001328_hash = {
87841 + .next = &_000201_hash,
87842 + .name = "dataflash_read_user_otp",
87843 + .param = PARAM3|PARAM2,
87844 +};
87845 +
87846 +struct size_overflow_hash _001329_hash = {
87847 + .next = NULL,
87848 + .name = "dccp_feat_register_sp",
87849 + .param = PARAM5,
87850 +};
87851 +
87852 +struct size_overflow_hash _001330_hash = {
87853 + .next = NULL,
87854 + .name = "ddb_input_read",
87855 + .param = PARAM3,
87856 +};
87857 +
87858 +struct size_overflow_hash _001331_hash = {
87859 + .next = NULL,
87860 + .name = "dev_read",
87861 + .param = PARAM3,
87862 +};
87863 +
87864 +struct size_overflow_hash _001332_hash = {
87865 + .next = NULL,
87866 + .name = "diva_os_copy_to_user",
87867 + .param = PARAM4,
87868 +};
87869 +
87870 +struct size_overflow_hash _001333_hash = {
87871 + .next = NULL,
87872 + .name = "diva_os_malloc",
87873 + .param = PARAM2,
87874 +};
87875 +
87876 +struct size_overflow_hash _001334_hash = {
87877 + .next = NULL,
87878 + .name = "dlm_dir_lookup",
87879 + .param = PARAM4,
87880 +};
87881 +
87882 +struct size_overflow_hash _001335_hash = {
87883 + .next = NULL,
87884 + .name = "dm_vcalloc",
87885 + .param = PARAM1|PARAM2,
87886 +};
87887 +
87888 +struct size_overflow_hash _001337_hash = {
87889 + .next = NULL,
87890 + .name = "do_proc_readlink",
87891 + .param = PARAM3,
87892 +};
87893 +
87894 +struct size_overflow_hash _001338_hash = {
87895 + .next = NULL,
87896 + .name = "do_readlink",
87897 + .param = PARAM2,
87898 +};
87899 +
87900 +struct size_overflow_hash _001339_hash = {
87901 + .next = NULL,
87902 + .name = "__do_replace",
87903 + .param = PARAM5,
87904 +};
87905 +
87906 +struct size_overflow_hash _001340_hash = {
87907 + .next = NULL,
87908 + .name = "do_sigpending",
87909 + .param = PARAM2,
87910 +};
87911 +
87912 +struct size_overflow_hash _001341_hash = {
87913 + .next = &_000371_hash,
87914 + .name = "drbd_setsockopt",
87915 + .param = PARAM5,
87916 +};
87917 +
87918 +struct size_overflow_hash _001342_hash = {
87919 + .next = NULL,
87920 + .name = "dsp_buffer_alloc",
87921 + .param = PARAM2,
87922 +};
87923 +
87924 +struct size_overflow_hash _001343_hash = {
87925 + .next = NULL,
87926 + .name = "dump_midi",
87927 + .param = PARAM3,
87928 +};
87929 +
87930 +struct size_overflow_hash _001344_hash = {
87931 + .next = NULL,
87932 + .name = "dvb_dmxdev_set_buffer_size",
87933 + .param = PARAM2,
87934 +};
87935 +
87936 +struct size_overflow_hash _001345_hash = {
87937 + .next = NULL,
87938 + .name = "dvb_dvr_set_buffer_size",
87939 + .param = PARAM2,
87940 +};
87941 +
87942 +struct size_overflow_hash _001346_hash = {
87943 + .next = NULL,
87944 + .name = "dvb_ringbuffer_pkt_read_user",
87945 + .param = PARAM3|PARAM5,
87946 +};
87947 +
87948 +struct size_overflow_hash _001348_hash = {
87949 + .next = NULL,
87950 + .name = "dvb_ringbuffer_read_user",
87951 + .param = PARAM3,
87952 +};
87953 +
87954 +struct size_overflow_hash _001349_hash = {
87955 + .next = NULL,
87956 + .name = "ecryptfs_filldir",
87957 + .param = PARAM3,
87958 +};
87959 +
87960 +struct size_overflow_hash _001350_hash = {
87961 + .next = NULL,
87962 + .name = "ecryptfs_readlink",
87963 + .param = PARAM3,
87964 +};
87965 +
87966 +struct size_overflow_hash _001351_hash = {
87967 + .next = NULL,
87968 + .name = "ecryptfs_send_message",
87969 + .param = PARAM2,
87970 +};
87971 +
87972 +struct size_overflow_hash _001352_hash = {
87973 + .next = &_000721_hash,
87974 + .name = "em28xx_init_isoc",
87975 + .param = PARAM4,
87976 +};
87977 +
87978 +struct size_overflow_hash _001353_hash = {
87979 + .next = NULL,
87980 + .name = "et61x251_read",
87981 + .param = PARAM3,
87982 +};
87983 +
87984 +struct size_overflow_hash _001354_hash = {
87985 + .next = NULL,
87986 + .name = "ext4_add_new_descs",
87987 + .param = PARAM3,
87988 +};
87989 +
87990 +struct size_overflow_hash _001355_hash = {
87991 + .next = NULL,
87992 + .name = "fat_ioctl_filldir",
87993 + .param = PARAM3,
87994 +};
87995 +
87996 +struct size_overflow_hash _001356_hash = {
87997 + .next = NULL,
87998 + .name = "fd_copyout",
87999 + .param = PARAM3,
88000 +};
88001 +
88002 +struct size_overflow_hash _001357_hash = {
88003 + .next = NULL,
88004 + .name = "f_hidg_read",
88005 + .param = PARAM3,
88006 +};
88007 +
88008 +struct size_overflow_hash _001358_hash = {
88009 + .next = NULL,
88010 + .name = "filldir",
88011 + .param = PARAM3,
88012 +};
88013 +
88014 +struct size_overflow_hash _001359_hash = {
88015 + .next = NULL,
88016 + .name = "filldir64",
88017 + .param = PARAM3,
88018 +};
88019 +
88020 +struct size_overflow_hash _001360_hash = {
88021 + .next = NULL,
88022 + .name = "fops_read",
88023 + .param = PARAM3,
88024 +};
88025 +
88026 +struct size_overflow_hash _001361_hash = {
88027 + .next = NULL,
88028 + .name = "from_buffer",
88029 + .param = PARAM3,
88030 +};
88031 +
88032 +struct size_overflow_hash _001362_hash = {
88033 + .next = NULL,
88034 + .name = "fsm_init",
88035 + .param = PARAM2,
88036 +};
88037 +
88038 +struct size_overflow_hash _001363_hash = {
88039 + .next = NULL,
88040 + .name = "get_subdir",
88041 + .param = PARAM3,
88042 +};
88043 +
88044 +struct size_overflow_hash _001364_hash = {
88045 + .next = NULL,
88046 + .name = "gspca_dev_probe",
88047 + .param = PARAM4,
88048 +};
88049 +
88050 +struct size_overflow_hash _001365_hash = {
88051 + .next = NULL,
88052 + .name = "handle_received_packet",
88053 + .param = PARAM3,
88054 +};
88055 +
88056 +struct size_overflow_hash _001366_hash = {
88057 + .next = NULL,
88058 + .name = "hash_setkey",
88059 + .param = PARAM3,
88060 +};
88061 +
88062 +struct size_overflow_hash _001367_hash = {
88063 + .next = NULL,
88064 + .name = "hdlcdrv_register",
88065 + .param = PARAM2,
88066 +};
88067 +
88068 +struct size_overflow_hash _001368_hash = {
88069 + .next = NULL,
88070 + .name = "hdpvr_read",
88071 + .param = PARAM3,
88072 +};
88073 +
88074 +struct size_overflow_hash _001369_hash = {
88075 + .next = NULL,
88076 + .name = "hid_input_report",
88077 + .param = PARAM4,
88078 +};
88079 +
88080 +struct size_overflow_hash _001370_hash = {
88081 + .next = &_001200_hash,
88082 + .name = "hidraw_read",
88083 + .param = PARAM3,
88084 +};
88085 +
88086 +struct size_overflow_hash _001371_hash = {
88087 + .next = NULL,
88088 + .name = "HiSax_readstatus",
88089 + .param = PARAM2,
88090 +};
88091 +
88092 +struct size_overflow_hash _001373_hash = {
88093 + .next = NULL,
88094 + .name = "__hwahc_op_set_gtk",
88095 + .param = PARAM4,
88096 +};
88097 +
88098 +struct size_overflow_hash _001374_hash = {
88099 + .next = NULL,
88100 + .name = "__hwahc_op_set_ptk",
88101 + .param = PARAM5,
88102 +};
88103 +
88104 +struct size_overflow_hash _001375_hash = {
88105 + .next = NULL,
88106 + .name = "ib_copy_to_udata",
88107 + .param = PARAM3,
88108 +};
88109 +
88110 +struct size_overflow_hash _001376_hash = {
88111 + .next = NULL,
88112 + .name = "idetape_chrdev_read",
88113 + .param = PARAM3,
88114 +};
88115 +
88116 +struct size_overflow_hash _001377_hash = {
88117 + .next = NULL,
88118 + .name = "ieee80211_alloc_hw",
88119 + .param = PARAM1,
88120 +};
88121 +
88122 +struct size_overflow_hash _001378_hash = {
88123 + .next = NULL,
88124 + .name = "ieee80211_bss_info_update",
88125 + .param = PARAM4,
88126 +};
88127 +
88128 +struct size_overflow_hash _001379_hash = {
88129 + .next = NULL,
88130 + .name = "ilo_read",
88131 + .param = PARAM3,
88132 +};
88133 +
88134 +struct size_overflow_hash _001380_hash = {
88135 + .next = NULL,
88136 + .name = "init_map_ipmac",
88137 + .param = PARAM3|PARAM4,
88138 +};
88139 +
88140 +struct size_overflow_hash _001382_hash = {
88141 + .next = NULL,
88142 + .name = "init_tid_tabs",
88143 + .param = PARAM2|PARAM4|PARAM3,
88144 +};
88145 +
88146 +struct size_overflow_hash _001385_hash = {
88147 + .next = NULL,
88148 + .name = "iowarrior_read",
88149 + .param = PARAM3,
88150 +};
88151 +
88152 +struct size_overflow_hash _001386_hash = {
88153 + .next = NULL,
88154 + .name = "ipv6_getsockopt_sticky",
88155 + .param = PARAM5,
88156 +};
88157 +
88158 +struct size_overflow_hash _001387_hash = {
88159 + .next = NULL,
88160 + .name = "ipwireless_send_packet",
88161 + .param = PARAM4,
88162 +};
88163 +
88164 +struct size_overflow_hash _001388_hash = {
88165 + .next = NULL,
88166 + .name = "ipx_sendmsg",
88167 + .param = PARAM4,
88168 +};
88169 +
88170 +struct size_overflow_hash _001389_hash = {
88171 + .next = NULL,
88172 + .name = "iscsi_conn_setup",
88173 + .param = PARAM2,
88174 +};
88175 +
88176 +struct size_overflow_hash _001390_hash = {
88177 + .next = NULL,
88178 + .name = "iscsi_create_session",
88179 + .param = PARAM3,
88180 +};
88181 +
88182 +struct size_overflow_hash _001391_hash = {
88183 + .next = NULL,
88184 + .name = "iscsi_host_alloc",
88185 + .param = PARAM2,
88186 +};
88187 +
88188 +struct size_overflow_hash _001392_hash = {
88189 + .next = NULL,
88190 + .name = "iscsi_session_setup",
88191 + .param = PARAM4|PARAM5,
88192 +};
88193 +
88194 +struct size_overflow_hash _001394_hash = {
88195 + .next = NULL,
88196 + .name = "iscsit_find_cmd_from_itt_or_dump",
88197 + .param = PARAM3,
88198 +};
88199 +
88200 +struct size_overflow_hash _001395_hash = {
88201 + .next = NULL,
88202 + .name = "isdn_ppp_read",
88203 + .param = PARAM4,
88204 +};
88205 +
88206 +struct size_overflow_hash _001396_hash = {
88207 + .next = NULL,
88208 + .name = "isku_sysfs_read",
88209 + .param = PARAM6,
88210 +};
88211 +
88212 +struct size_overflow_hash _001397_hash = {
88213 + .next = NULL,
88214 + .name = "isku_sysfs_write",
88215 + .param = PARAM6,
88216 +};
88217 +
88218 +struct size_overflow_hash _001398_hash = {
88219 + .next = NULL,
88220 + .name = "iso_alloc_urb",
88221 + .param = PARAM4|PARAM5,
88222 +};
88223 +
88224 +struct size_overflow_hash _001400_hash = {
88225 + .next = NULL,
88226 + .name = "ivtv_copy_buf_to_user",
88227 + .param = PARAM4,
88228 +};
88229 +
88230 +struct size_overflow_hash _001401_hash = {
88231 + .next = NULL,
88232 + .name = "iwm_rx_handle",
88233 + .param = PARAM3,
88234 +};
88235 +
88236 +struct size_overflow_hash _001402_hash = {
88237 + .next = NULL,
88238 + .name = "iwm_wdev_alloc",
88239 + .param = PARAM1,
88240 +};
88241 +
88242 +struct size_overflow_hash _001403_hash = {
88243 + .next = NULL,
88244 + .name = "jbd2_alloc",
88245 + .param = PARAM1,
88246 +};
88247 +
88248 +struct size_overflow_hash _001404_hash = {
88249 + .next = NULL,
88250 + .name = "jffs2_do_link",
88251 + .param = PARAM6,
88252 +};
88253 +
88254 +struct size_overflow_hash _001405_hash = {
88255 + .next = NULL,
88256 + .name = "jffs2_do_unlink",
88257 + .param = PARAM4,
88258 +};
88259 +
88260 +struct size_overflow_hash _001406_hash = {
88261 + .next = NULL,
88262 + .name = "jffs2_security_setxattr",
88263 + .param = PARAM4,
88264 +};
88265 +
88266 +struct size_overflow_hash _001407_hash = {
88267 + .next = NULL,
88268 + .name = "jffs2_trusted_setxattr",
88269 + .param = PARAM4,
88270 +};
88271 +
88272 +struct size_overflow_hash _001408_hash = {
88273 + .next = NULL,
88274 + .name = "jffs2_user_setxattr",
88275 + .param = PARAM4,
88276 +};
88277 +
88278 +struct size_overflow_hash _001409_hash = {
88279 + .next = NULL,
88280 + .name = "kernel_setsockopt",
88281 + .param = PARAM5,
88282 +};
88283 +
88284 +struct size_overflow_hash _001410_hash = {
88285 + .next = NULL,
88286 + .name = "keyctl_describe_key",
88287 + .param = PARAM3,
88288 +};
88289 +
88290 +struct size_overflow_hash _001411_hash = {
88291 + .next = &_001132_hash,
88292 + .name = "keyctl_get_security",
88293 + .param = PARAM3,
88294 +};
88295 +
88296 +struct size_overflow_hash _001412_hash = {
88297 + .next = NULL,
88298 + .name = "keyring_read",
88299 + .param = PARAM3,
88300 +};
88301 +
88302 +struct size_overflow_hash _001413_hash = {
88303 + .next = NULL,
88304 + .name = "kfifo_copy_to_user",
88305 + .param = PARAM3,
88306 +};
88307 +
88308 +struct size_overflow_hash _001414_hash = {
88309 + .next = NULL,
88310 + .name = "kmem_zalloc_large",
88311 + .param = PARAM1,
88312 +};
88313 +
88314 +struct size_overflow_hash _001415_hash = {
88315 + .next = NULL,
88316 + .name = "kmp_init",
88317 + .param = PARAM2,
88318 +};
88319 +
88320 +struct size_overflow_hash _001416_hash = {
88321 + .next = NULL,
88322 + .name = "koneplus_sysfs_write",
88323 + .param = PARAM6,
88324 +};
88325 +
88326 +struct size_overflow_hash _001417_hash = {
88327 + .next = NULL,
88328 + .name = "kvm_clear_guest_page",
88329 + .param = PARAM4,
88330 +};
88331 +
88332 +struct size_overflow_hash _001418_hash = {
88333 + .next = NULL,
88334 + .name = "kvm_read_nested_guest_page",
88335 + .param = PARAM5,
88336 +};
88337 +
88338 +struct size_overflow_hash _001419_hash = {
88339 + .next = &_001034_hash,
88340 + .name = "l2cap_create_basic_pdu",
88341 + .param = PARAM3,
88342 +};
88343 +
88344 +struct size_overflow_hash _001420_hash = {
88345 + .next = &_000966_hash,
88346 + .name = "l2cap_create_connless_pdu",
88347 + .param = PARAM3,
88348 +};
88349 +
88350 +struct size_overflow_hash _001421_hash = {
88351 + .next = NULL,
88352 + .name = "l2cap_create_iframe_pdu",
88353 + .param = PARAM3,
88354 +};
88355 +
88356 +struct size_overflow_hash _001422_hash = {
88357 + .next = NULL,
88358 + .name = "__lgwrite",
88359 + .param = PARAM4,
88360 +};
88361 +
88362 +struct size_overflow_hash _001423_hash = {
88363 + .next = NULL,
88364 + .name = "libfc_host_alloc",
88365 + .param = PARAM2,
88366 +};
88367 +
88368 +struct size_overflow_hash _001424_hash = {
88369 + .next = NULL,
88370 + .name = "llcp_sock_sendmsg",
88371 + .param = PARAM4,
88372 +};
88373 +
88374 +struct size_overflow_hash _001425_hash = {
88375 + .next = NULL,
88376 + .name = "macvtap_get_user",
88377 + .param = PARAM4,
88378 +};
88379 +
88380 +struct size_overflow_hash _001426_hash = {
88381 + .next = NULL,
88382 + .name = "mcam_v4l_read",
88383 + .param = PARAM3,
88384 +};
88385 +
88386 +struct size_overflow_hash _001427_hash = {
88387 + .next = NULL,
88388 + .name = "mce_async_out",
88389 + .param = PARAM3,
88390 +};
88391 +
88392 +struct size_overflow_hash _001428_hash = {
88393 + .next = NULL,
88394 + .name = "mce_flush_rx_buffer",
88395 + .param = PARAM2,
88396 +};
88397 +
88398 +struct size_overflow_hash _001429_hash = {
88399 + .next = NULL,
88400 + .name = "mdc800_device_read",
88401 + .param = PARAM3,
88402 +};
88403 +
88404 +struct size_overflow_hash _001430_hash = {
88405 + .next = &_000867_hash,
88406 + .name = "memcpy_toiovec",
88407 + .param = PARAM3,
88408 +};
88409 +
88410 +struct size_overflow_hash _001431_hash = {
88411 + .next = NULL,
88412 + .name = "memcpy_toiovecend",
88413 + .param = PARAM3|PARAM4,
88414 +};
88415 +
88416 +struct size_overflow_hash _001433_hash = {
88417 + .next = NULL,
88418 + .name = "mgt_set_varlen",
88419 + .param = PARAM4,
88420 +};
88421 +
88422 +struct size_overflow_hash _001434_hash = {
88423 + .next = NULL,
88424 + .name = "mlx4_en_create_rx_ring",
88425 + .param = PARAM3,
88426 +};
88427 +
88428 +struct size_overflow_hash _001435_hash = {
88429 + .next = NULL,
88430 + .name = "mlx4_en_create_tx_ring",
88431 + .param = PARAM4,
88432 +};
88433 +
88434 +struct size_overflow_hash _001436_hash = {
88435 + .next = NULL,
88436 + .name = "mon_bin_get_event",
88437 + .param = PARAM4,
88438 +};
88439 +
88440 +struct size_overflow_hash _001437_hash = {
88441 + .next = NULL,
88442 + .name = "mousedev_read",
88443 + .param = PARAM3,
88444 +};
88445 +
88446 +struct size_overflow_hash _001438_hash = {
88447 + .next = NULL,
88448 + .name = "move_addr_to_user",
88449 + .param = PARAM2,
88450 +};
88451 +
88452 +struct size_overflow_hash _001439_hash = {
88453 + .next = NULL,
88454 + .name = "mpihelp_mul",
88455 + .param = PARAM5|PARAM3,
88456 +};
88457 +
88458 +struct size_overflow_hash _001441_hash = {
88459 + .next = NULL,
88460 + .name = "mpi_lshift_limbs",
88461 + .param = PARAM2,
88462 +};
88463 +
88464 +struct size_overflow_hash _001442_hash = {
88465 + .next = NULL,
88466 + .name = "msnd_fifo_alloc",
88467 + .param = PARAM2,
88468 +};
88469 +
88470 +struct size_overflow_hash _001443_hash = {
88471 + .next = NULL,
88472 + .name = "mtdswap_init",
88473 + .param = PARAM2,
88474 +};
88475 +
88476 +struct size_overflow_hash _001444_hash = {
88477 + .next = NULL,
88478 + .name = "neigh_hash_grow",
88479 + .param = PARAM2,
88480 +};
88481 +
88482 +struct size_overflow_hash _001445_hash = {
88483 + .next = NULL,
88484 + .name = "nfs4_realloc_slot_table",
88485 + .param = PARAM2,
88486 +};
88487 +
88488 +struct size_overflow_hash _001446_hash = {
88489 + .next = NULL,
88490 + .name = "nfs_idmap_get_key",
88491 + .param = PARAM2,
88492 +};
88493 +
88494 +struct size_overflow_hash _001447_hash = {
88495 + .next = NULL,
88496 + .name = "nsm_get_handle",
88497 + .param = PARAM4,
88498 +};
88499 +
88500 +struct size_overflow_hash _001448_hash = {
88501 + .next = NULL,
88502 + .name = "ntfs_malloc_nofs",
88503 + .param = PARAM1,
88504 +};
88505 +
88506 +struct size_overflow_hash _001449_hash = {
88507 + .next = NULL,
88508 + .name = "ntfs_malloc_nofs_nofail",
88509 + .param = PARAM1,
88510 +};
88511 +
88512 +struct size_overflow_hash _001450_hash = {
88513 + .next = NULL,
88514 + .name = "nvme_create_queue",
88515 + .param = PARAM3,
88516 +};
88517 +
88518 +struct size_overflow_hash _001451_hash = {
88519 + .next = NULL,
88520 + .name = "ocfs2_control_write",
88521 + .param = PARAM3,
88522 +};
88523 +
88524 +struct size_overflow_hash _001452_hash = {
88525 + .next = NULL,
88526 + .name = "orinoco_add_extscan_result",
88527 + .param = PARAM3,
88528 +};
88529 +
88530 +struct size_overflow_hash _001454_hash = {
88531 + .next = NULL,
88532 + .name = "override_release",
88533 + .param = PARAM2,
88534 +};
88535 +
88536 +struct size_overflow_hash _001455_hash = {
88537 + .next = NULL,
88538 + .name = "packet_snd",
88539 + .param = PARAM3,
88540 +};
88541 +
88542 +struct size_overflow_hash _001456_hash = {
88543 + .next = NULL,
88544 + .name = "pcbit_stat",
88545 + .param = PARAM2,
88546 +};
88547 +
88548 +struct size_overflow_hash _001457_hash = {
88549 + .next = NULL,
88550 + .name = "pcpu_extend_area_map",
88551 + .param = PARAM2,
88552 +};
88553 +
88554 +struct size_overflow_hash _001458_hash = {
88555 + .next = NULL,
88556 + .name = "pg_read",
88557 + .param = PARAM3,
88558 +};
88559 +
88560 +struct size_overflow_hash _001459_hash = {
88561 + .next = NULL,
88562 + .name = "picolcd_debug_eeprom_read",
88563 + .param = PARAM3,
88564 +};
88565 +
88566 +struct size_overflow_hash _001460_hash = {
88567 + .next = NULL,
88568 + .name = "pkt_alloc_packet_data",
88569 + .param = PARAM1,
88570 +};
88571 +
88572 +struct size_overflow_hash _001461_hash = {
88573 + .next = NULL,
88574 + .name = "pmcraid_build_passthrough_ioadls",
88575 + .param = PARAM2,
88576 +};
88577 +
88578 +struct size_overflow_hash _001462_hash = {
88579 + .next = NULL,
88580 + .name = "pms_capture",
88581 + .param = PARAM4,
88582 +};
88583 +
88584 +struct size_overflow_hash _001463_hash = {
88585 + .next = NULL,
88586 + .name = "posix_clock_register",
88587 + .param = PARAM2,
88588 +};
88589 +
88590 +struct size_overflow_hash _001464_hash = {
88591 + .next = NULL,
88592 + .name = "printer_read",
88593 + .param = PARAM3,
88594 +};
88595 +
88596 +struct size_overflow_hash _001465_hash = {
88597 + .next = NULL,
88598 + .name = "__proc_file_read",
88599 + .param = PARAM3,
88600 +};
88601 +
88602 +struct size_overflow_hash _001466_hash = {
88603 + .next = NULL,
88604 + .name = "pt_read",
88605 + .param = PARAM3,
88606 +};
88607 +
88608 +struct size_overflow_hash _001467_hash = {
88609 + .next = NULL,
88610 + .name = "put_cmsg",
88611 + .param = PARAM4,
88612 +};
88613 +
88614 +struct size_overflow_hash _001468_hash = {
88615 + .next = NULL,
88616 + .name = "pvr2_ioread_read",
88617 + .param = PARAM3,
88618 +};
88619 +
88620 +struct size_overflow_hash _001469_hash = {
88621 + .next = NULL,
88622 + .name = "pwc_video_read",
88623 + .param = PARAM3,
88624 +};
88625 +
88626 +struct size_overflow_hash _001470_hash = {
88627 + .next = NULL,
88628 + .name = "px_raw_event",
88629 + .param = PARAM4,
88630 +};
88631 +
88632 +struct size_overflow_hash _001471_hash = {
88633 + .next = NULL,
88634 + .name = "qcam_read",
88635 + .param = PARAM3,
88636 +};
88637 +
88638 +struct size_overflow_hash _001472_hash = {
88639 + .next = NULL,
88640 + .name = "rawv6_sendmsg",
88641 + .param = PARAM4,
88642 +};
88643 +
88644 +struct size_overflow_hash _001473_hash = {
88645 + .next = NULL,
88646 + .name = "rds_sendmsg",
88647 + .param = PARAM4,
88648 +};
88649 +
88650 +struct size_overflow_hash _001474_hash = {
88651 + .next = NULL,
88652 + .name = "read_flush",
88653 + .param = PARAM3,
88654 +};
88655 +
88656 +struct size_overflow_hash _001475_hash = {
88657 + .next = NULL,
88658 + .name = "read_profile",
88659 + .param = PARAM3,
88660 +};
88661 +
88662 +struct size_overflow_hash _001476_hash = {
88663 + .next = NULL,
88664 + .name = "read_vmcore",
88665 + .param = PARAM3,
88666 +};
88667 +
88668 +struct size_overflow_hash _001477_hash = {
88669 + .next = NULL,
88670 + .name = "redirected_tty_write",
88671 + .param = PARAM3,
88672 +};
88673 +
88674 +struct size_overflow_hash _001478_hash = {
88675 + .next = NULL,
88676 + .name = "__register_chrdev",
88677 + .param = PARAM2|PARAM3,
88678 +};
88679 +
88680 +struct size_overflow_hash _001480_hash = {
88681 + .next = NULL,
88682 + .name = "regmap_raw_write",
88683 + .param = PARAM4,
88684 +};
88685 +
88686 +struct size_overflow_hash _001481_hash = {
88687 + .next = NULL,
88688 + .name = "reiserfs_allocate_list_bitmaps",
88689 + .param = PARAM3,
88690 +};
88691 +
88692 +struct size_overflow_hash _001482_hash = {
88693 + .next = NULL,
88694 + .name = "reiserfs_resize",
88695 + .param = PARAM2,
88696 +};
88697 +
88698 +struct size_overflow_hash _001483_hash = {
88699 + .next = NULL,
88700 + .name = "request_key_auth_read",
88701 + .param = PARAM3,
88702 +};
88703 +
88704 +struct size_overflow_hash _001484_hash = {
88705 + .next = NULL,
88706 + .name = "rfkill_fop_read",
88707 + .param = PARAM3,
88708 +};
88709 +
88710 +struct size_overflow_hash _001485_hash = {
88711 + .next = NULL,
88712 + .name = "rng_dev_read",
88713 + .param = PARAM3,
88714 +};
88715 +
88716 +struct size_overflow_hash _001486_hash = {
88717 + .next = NULL,
88718 + .name = "roccat_read",
88719 + .param = PARAM3,
88720 +};
88721 +
88722 +struct size_overflow_hash _001487_hash = {
88723 + .next = NULL,
88724 + .name = "sco_sock_sendmsg",
88725 + .param = PARAM4,
88726 +};
88727 +
88728 +struct size_overflow_hash _001488_hash = {
88729 + .next = NULL,
88730 + .name = "scsi_register",
88731 + .param = PARAM2,
88732 +};
88733 +
88734 +struct size_overflow_hash _001489_hash = {
88735 + .next = NULL,
88736 + .name = "sctp_getsockopt_events",
88737 + .param = PARAM2,
88738 +};
88739 +
88740 +struct size_overflow_hash _001490_hash = {
88741 + .next = NULL,
88742 + .name = "sctp_getsockopt_maxburst",
88743 + .param = PARAM2,
88744 +};
88745 +
88746 +struct size_overflow_hash _001491_hash = {
88747 + .next = NULL,
88748 + .name = "sctp_getsockopt_maxseg",
88749 + .param = PARAM2,
88750 +};
88751 +
88752 +struct size_overflow_hash _001492_hash = {
88753 + .next = NULL,
88754 + .name = "sctpprobe_read",
88755 + .param = PARAM3,
88756 +};
88757 +
88758 +struct size_overflow_hash _001493_hash = {
88759 + .next = NULL,
88760 + .name = "sdhci_alloc_host",
88761 + .param = PARAM2,
88762 +};
88763 +
88764 +struct size_overflow_hash _001494_hash = {
88765 + .next = NULL,
88766 + .name = "selinux_inode_post_setxattr",
88767 + .param = PARAM4,
88768 +};
88769 +
88770 +struct size_overflow_hash _001495_hash = {
88771 + .next = NULL,
88772 + .name = "selinux_inode_setsecurity",
88773 + .param = PARAM4,
88774 +};
88775 +
88776 +struct size_overflow_hash _001496_hash = {
88777 + .next = NULL,
88778 + .name = "selinux_inode_setxattr",
88779 + .param = PARAM4,
88780 +};
88781 +
88782 +struct size_overflow_hash _001497_hash = {
88783 + .next = NULL,
88784 + .name = "selinux_secctx_to_secid",
88785 + .param = PARAM2,
88786 +};
88787 +
88788 +struct size_overflow_hash _001498_hash = {
88789 + .next = NULL,
88790 + .name = "selinux_setprocattr",
88791 + .param = PARAM4,
88792 +};
88793 +
88794 +struct size_overflow_hash _001499_hash = {
88795 + .next = NULL,
88796 + .name = "sel_write_context",
88797 + .param = PARAM3,
88798 +};
88799 +
88800 +struct size_overflow_hash _001500_hash = {
88801 + .next = NULL,
88802 + .name = "seq_copy_in_user",
88803 + .param = PARAM3,
88804 +};
88805 +
88806 +struct size_overflow_hash _001501_hash = {
88807 + .next = NULL,
88808 + .name = "seq_open_net",
88809 + .param = PARAM4,
88810 +};
88811 +
88812 +struct size_overflow_hash _001502_hash = {
88813 + .next = NULL,
88814 + .name = "seq_open_private",
88815 + .param = PARAM3,
88816 +};
88817 +
88818 +struct size_overflow_hash _001503_hash = {
88819 + .next = NULL,
88820 + .name = "set_arg",
88821 + .param = PARAM3,
88822 +};
88823 +
88824 +struct size_overflow_hash _001504_hash = {
88825 + .next = NULL,
88826 + .name = "sg_read",
88827 + .param = PARAM3,
88828 +};
88829 +
88830 +struct size_overflow_hash _001505_hash = {
88831 + .next = &_001468_hash,
88832 + .name = "shash_async_setkey",
88833 + .param = PARAM3,
88834 +};
88835 +
88836 +struct size_overflow_hash _001506_hash = {
88837 + .next = NULL,
88838 + .name = "shash_compat_setkey",
88839 + .param = PARAM3,
88840 +};
88841 +
88842 +struct size_overflow_hash _001507_hash = {
88843 + .next = NULL,
88844 + .name = "shmem_setxattr",
88845 + .param = PARAM4,
88846 +};
88847 +
88848 +struct size_overflow_hash _001508_hash = {
88849 + .next = NULL,
88850 + .name = "simple_read_from_buffer",
88851 + .param = PARAM2|PARAM5,
88852 +};
88853 +
88854 +struct size_overflow_hash _001511_hash = {
88855 + .next = NULL,
88856 + .name = "sm_checker_extend",
88857 + .param = PARAM2,
88858 +};
88859 +
88860 +struct size_overflow_hash _001512_hash = {
88861 + .next = NULL,
88862 + .name = "sn9c102_read",
88863 + .param = PARAM3,
88864 +};
88865 +
88866 +struct size_overflow_hash _001513_hash = {
88867 + .next = NULL,
88868 + .name = "snd_es1938_capture_copy",
88869 + .param = PARAM5,
88870 +};
88871 +
88872 +struct size_overflow_hash _001514_hash = {
88873 + .next = NULL,
88874 + .name = "snd_gus_dram_peek",
88875 + .param = PARAM4,
88876 +};
88877 +
88878 +struct size_overflow_hash _001515_hash = {
88879 + .next = NULL,
88880 + .name = "snd_hdsp_capture_copy",
88881 + .param = PARAM5,
88882 +};
88883 +
88884 +struct size_overflow_hash _001516_hash = {
88885 + .next = NULL,
88886 + .name = "snd_korg1212_copy_to",
88887 + .param = PARAM6,
88888 +};
88889 +
88890 +struct size_overflow_hash _001517_hash = {
88891 + .next = NULL,
88892 + .name = "snd_opl4_mem_proc_read",
88893 + .param = PARAM5,
88894 +};
88895 +
88896 +struct size_overflow_hash _001518_hash = {
88897 + .next = NULL,
88898 + .name = "snd_pcm_alloc_vmalloc_buffer",
88899 + .param = PARAM2,
88900 +};
88901 +
88902 +struct size_overflow_hash _001519_hash = {
88903 + .next = NULL,
88904 + .name = "snd_pcm_oss_read1",
88905 + .param = PARAM3,
88906 +};
88907 +
88908 +struct size_overflow_hash _001520_hash = {
88909 + .next = NULL,
88910 + .name = "snd_rawmidi_kernel_read1",
88911 + .param = PARAM4,
88912 +};
88913 +
88914 +struct size_overflow_hash _001521_hash = {
88915 + .next = NULL,
88916 + .name = "snd_rme9652_capture_copy",
88917 + .param = PARAM5,
88918 +};
88919 +
88920 +struct size_overflow_hash _001522_hash = {
88921 + .next = NULL,
88922 + .name = "srp_target_alloc",
88923 + .param = PARAM3,
88924 +};
88925 +
88926 +struct size_overflow_hash _001523_hash = {
88927 + .next = NULL,
88928 + .name = "stk_allocate_buffers",
88929 + .param = PARAM2,
88930 +};
88931 +
88932 +struct size_overflow_hash _001524_hash = {
88933 + .next = NULL,
88934 + .name = "store_ifalias",
88935 + .param = PARAM4,
88936 +};
88937 +
88938 +struct size_overflow_hash _001525_hash = {
88939 + .next = NULL,
88940 + .name = "store_msg",
88941 + .param = PARAM3,
88942 +};
88943 +
88944 +struct size_overflow_hash _001526_hash = {
88945 + .next = NULL,
88946 + .name = "str_to_user",
88947 + .param = PARAM2,
88948 +};
88949 +
88950 +struct size_overflow_hash _001527_hash = {
88951 + .next = NULL,
88952 + .name = "subbuf_read_actor",
88953 + .param = PARAM3,
88954 +};
88955 +
88956 +struct size_overflow_hash _001528_hash = {
88957 + .next = NULL,
88958 + .name = "sys_fgetxattr",
88959 + .param = PARAM4,
88960 +};
88961 +
88962 +struct size_overflow_hash _001529_hash = {
88963 + .next = NULL,
88964 + .name = "sys_gethostname",
88965 + .param = PARAM2,
88966 +};
88967 +
88968 +struct size_overflow_hash _001530_hash = {
88969 + .next = NULL,
88970 + .name = "sys_getxattr",
88971 + .param = PARAM4,
88972 +};
88973 +
88974 +struct size_overflow_hash _001531_hash = {
88975 + .next = NULL,
88976 + .name = "sys_kexec_load",
88977 + .param = PARAM2,
88978 +};
88979 +
88980 +struct size_overflow_hash _001532_hash = {
88981 + .next = &_000129_hash,
88982 + .name = "sys_msgsnd",
88983 + .param = PARAM3,
88984 +};
88985 +
88986 +struct size_overflow_hash _001533_hash = {
88987 + .next = NULL,
88988 + .name = "sys_process_vm_readv",
88989 + .param = PARAM3|PARAM5,
88990 +};
88991 +
88992 +struct size_overflow_hash _001535_hash = {
88993 + .next = NULL,
88994 + .name = "sys_process_vm_writev",
88995 + .param = PARAM3|PARAM5,
88996 +};
88997 +
88998 +struct size_overflow_hash _001537_hash = {
88999 + .next = NULL,
89000 + .name = "sys_sched_getaffinity",
89001 + .param = PARAM2,
89002 +};
89003 +
89004 +struct size_overflow_hash _001538_hash = {
89005 + .next = NULL,
89006 + .name = "sys_setsockopt",
89007 + .param = PARAM5,
89008 +};
89009 +
89010 +struct size_overflow_hash _001539_hash = {
89011 + .next = NULL,
89012 + .name = "t3_init_l2t",
89013 + .param = PARAM1,
89014 +};
89015 +
89016 +struct size_overflow_hash _001540_hash = {
89017 + .next = NULL,
89018 + .name = "team_options_register",
89019 + .param = PARAM3,
89020 +};
89021 +
89022 +struct size_overflow_hash _001541_hash = {
89023 + .next = NULL,
89024 + .name = "tipc_send2name",
89025 + .param = PARAM6,
89026 +};
89027 +
89028 +struct size_overflow_hash _001542_hash = {
89029 + .next = NULL,
89030 + .name = "tipc_send2port",
89031 + .param = PARAM5,
89032 +};
89033 +
89034 +struct size_overflow_hash _001543_hash = {
89035 + .next = NULL,
89036 + .name = "tipc_send",
89037 + .param = PARAM4,
89038 +};
89039 +
89040 +struct size_overflow_hash _001544_hash = {
89041 + .next = NULL,
89042 + .name = "tm6000_i2c_recv_regs16",
89043 + .param = PARAM5,
89044 +};
89045 +
89046 +struct size_overflow_hash _001545_hash = {
89047 + .next = NULL,
89048 + .name = "tm6000_i2c_recv_regs",
89049 + .param = PARAM5,
89050 +};
89051 +
89052 +struct size_overflow_hash _001546_hash = {
89053 + .next = NULL,
89054 + .name = "tm6000_i2c_send_regs",
89055 + .param = PARAM5,
89056 +};
89057 +
89058 +struct size_overflow_hash _001547_hash = {
89059 + .next = NULL,
89060 + .name = "tnode_new",
89061 + .param = PARAM3,
89062 +};
89063 +
89064 +struct size_overflow_hash _001548_hash = {
89065 + .next = NULL,
89066 + .name = "tomoyo_read_self",
89067 + .param = PARAM3,
89068 +};
89069 +
89070 +struct size_overflow_hash _001549_hash = {
89071 + .next = NULL,
89072 + .name = "tomoyo_update_domain",
89073 + .param = PARAM2,
89074 +};
89075 +
89076 +struct size_overflow_hash _001550_hash = {
89077 + .next = NULL,
89078 + .name = "tomoyo_update_policy",
89079 + .param = PARAM2,
89080 +};
89081 +
89082 +struct size_overflow_hash _001551_hash = {
89083 + .next = NULL,
89084 + .name = "tpm_read",
89085 + .param = PARAM3,
89086 +};
89087 +
89088 +struct size_overflow_hash _001552_hash = {
89089 + .next = NULL,
89090 + .name = "TSS_rawhmac",
89091 + .param = PARAM3,
89092 +};
89093 +
89094 +struct size_overflow_hash _001553_hash = {
89095 + .next = NULL,
89096 + .name = "tt3650_ci_msg",
89097 + .param = PARAM4,
89098 +};
89099 +
89100 +struct size_overflow_hash _001554_hash = {
89101 + .next = NULL,
89102 + .name = "tun_get_user",
89103 + .param = PARAM3,
89104 +};
89105 +
89106 +struct size_overflow_hash _001555_hash = {
89107 + .next = NULL,
89108 + .name = "ubi_dbg_dump_flash",
89109 + .param = PARAM4,
89110 +};
89111 +
89112 +struct size_overflow_hash _001556_hash = {
89113 + .next = &_000954_hash,
89114 + .name = "ubi_io_write",
89115 + .param = PARAM4|PARAM5,
89116 +};
89117 +
89118 +struct size_overflow_hash _001558_hash = {
89119 + .next = NULL,
89120 + .name = "uio_read",
89121 + .param = PARAM3,
89122 +};
89123 +
89124 +struct size_overflow_hash _001559_hash = {
89125 + .next = NULL,
89126 + .name = "unix_seqpacket_sendmsg",
89127 + .param = PARAM4,
89128 +};
89129 +
89130 +struct size_overflow_hash _001560_hash = {
89131 + .next = NULL,
89132 + .name = "unlink1",
89133 + .param = PARAM3,
89134 +};
89135 +
89136 +struct size_overflow_hash _001562_hash = {
89137 + .next = NULL,
89138 + .name = "usb_allocate_stream_buffers",
89139 + .param = PARAM3,
89140 +};
89141 +
89142 +struct size_overflow_hash _001563_hash = {
89143 + .next = NULL,
89144 + .name = "usbdev_read",
89145 + .param = PARAM3,
89146 +};
89147 +
89148 +struct size_overflow_hash _001564_hash = {
89149 + .next = NULL,
89150 + .name = "usblp_read",
89151 + .param = PARAM3,
89152 +};
89153 +
89154 +struct size_overflow_hash _001565_hash = {
89155 + .next = NULL,
89156 + .name = "usbtmc_read",
89157 + .param = PARAM3,
89158 +};
89159 +
89160 +struct size_overflow_hash _001566_hash = {
89161 + .next = NULL,
89162 + .name = "usbvision_v4l2_read",
89163 + .param = PARAM3,
89164 +};
89165 +
89166 +struct size_overflow_hash _001567_hash = {
89167 + .next = NULL,
89168 + .name = "_usb_writeN_sync",
89169 + .param = PARAM4,
89170 +};
89171 +
89172 +struct size_overflow_hash _001568_hash = {
89173 + .next = NULL,
89174 + .name = "user_read",
89175 + .param = PARAM3,
89176 +};
89177 +
89178 +struct size_overflow_hash _001569_hash = {
89179 + .next = NULL,
89180 + .name = "v4l_stk_read",
89181 + .param = PARAM3,
89182 +};
89183 +
89184 +struct size_overflow_hash _001570_hash = {
89185 + .next = NULL,
89186 + .name = "vcs_read",
89187 + .param = PARAM3,
89188 +};
89189 +
89190 +struct size_overflow_hash _001571_hash = {
89191 + .next = NULL,
89192 + .name = "vdma_mem_alloc",
89193 + .param = PARAM1,
89194 +};
89195 +
89196 +struct size_overflow_hash _001572_hash = {
89197 + .next = NULL,
89198 + .name = "venus_create",
89199 + .param = PARAM4,
89200 +};
89201 +
89202 +struct size_overflow_hash _001573_hash = {
89203 + .next = NULL,
89204 + .name = "venus_link",
89205 + .param = PARAM5,
89206 +};
89207 +
89208 +struct size_overflow_hash _001574_hash = {
89209 + .next = NULL,
89210 + .name = "venus_lookup",
89211 + .param = PARAM4,
89212 +};
89213 +
89214 +struct size_overflow_hash _001575_hash = {
89215 + .next = NULL,
89216 + .name = "venus_mkdir",
89217 + .param = PARAM4,
89218 +};
89219 +
89220 +struct size_overflow_hash _001576_hash = {
89221 + .next = NULL,
89222 + .name = "venus_remove",
89223 + .param = PARAM4,
89224 +};
89225 +
89226 +struct size_overflow_hash _001577_hash = {
89227 + .next = NULL,
89228 + .name = "venus_rename",
89229 + .param = PARAM4|PARAM5,
89230 +};
89231 +
89232 +struct size_overflow_hash _001579_hash = {
89233 + .next = NULL,
89234 + .name = "venus_rmdir",
89235 + .param = PARAM4,
89236 +};
89237 +
89238 +struct size_overflow_hash _001580_hash = {
89239 + .next = NULL,
89240 + .name = "venus_symlink",
89241 + .param = PARAM4|PARAM6,
89242 +};
89243 +
89244 +struct size_overflow_hash _001582_hash = {
89245 + .next = NULL,
89246 + .name = "vfs_readlink",
89247 + .param = PARAM3,
89248 +};
89249 +
89250 +struct size_overflow_hash _001583_hash = {
89251 + .next = NULL,
89252 + .name = "vfs_readv",
89253 + .param = PARAM3,
89254 +};
89255 +
89256 +struct size_overflow_hash _001584_hash = {
89257 + .next = NULL,
89258 + .name = "vfs_writev",
89259 + .param = PARAM3,
89260 +};
89261 +
89262 +struct size_overflow_hash _001585_hash = {
89263 + .next = NULL,
89264 + .name = "vga_arb_read",
89265 + .param = PARAM3,
89266 +};
89267 +
89268 +struct size_overflow_hash _001586_hash = {
89269 + .next = NULL,
89270 + .name = "vhci_put_user",
89271 + .param = PARAM4,
89272 +};
89273 +
89274 +struct size_overflow_hash _001587_hash = {
89275 + .next = NULL,
89276 + .name = "vhost_add_used_n",
89277 + .param = PARAM3,
89278 +};
89279 +
89280 +struct size_overflow_hash _001588_hash = {
89281 + .next = NULL,
89282 + .name = "__videobuf_copy_to_user",
89283 + .param = PARAM4,
89284 +};
89285 +
89286 +struct size_overflow_hash _001589_hash = {
89287 + .next = NULL,
89288 + .name = "videobuf_pages_to_sg",
89289 + .param = PARAM2,
89290 +};
89291 +
89292 +struct size_overflow_hash _001590_hash = {
89293 + .next = NULL,
89294 + .name = "videobuf_vmalloc_to_sg",
89295 + .param = PARAM2,
89296 +};
89297 +
89298 +struct size_overflow_hash _001591_hash = {
89299 + .next = NULL,
89300 + .name = "virtnet_send_command",
89301 + .param = PARAM5|PARAM6,
89302 +};
89303 +
89304 +struct size_overflow_hash _001593_hash = {
89305 + .next = NULL,
89306 + .name = "vmbus_establish_gpadl",
89307 + .param = PARAM3,
89308 +};
89309 +
89310 +struct size_overflow_hash _001594_hash = {
89311 + .next = &_001501_hash,
89312 + .name = "vol_cdev_read",
89313 + .param = PARAM3,
89314 +};
89315 +
89316 +struct size_overflow_hash _001595_hash = {
89317 + .next = NULL,
89318 + .name = "w9966_v4l_read",
89319 + .param = PARAM3,
89320 +};
89321 +
89322 +struct size_overflow_hash _001596_hash = {
89323 + .next = NULL,
89324 + .name = "wdm_read",
89325 + .param = PARAM3,
89326 +};
89327 +
89328 +struct size_overflow_hash _001597_hash = {
89329 + .next = &_000063_hash,
89330 + .name = "wusb_prf",
89331 + .param = PARAM7,
89332 +};
89333 +
89334 +struct size_overflow_hash _001598_hash = {
89335 + .next = NULL,
89336 + .name = "xdi_copy_to_user",
89337 + .param = PARAM4,
89338 +};
89339 +
89340 +struct size_overflow_hash _001599_hash = {
89341 + .next = NULL,
89342 + .name = "xfs_buf_get_uncached",
89343 + .param = PARAM2,
89344 +};
89345 +
89346 +struct size_overflow_hash _001600_hash = {
89347 + .next = NULL,
89348 + .name = "xfs_efd_init",
89349 + .param = PARAM3,
89350 +};
89351 +
89352 +struct size_overflow_hash _001601_hash = {
89353 + .next = NULL,
89354 + .name = "xfs_efi_init",
89355 + .param = PARAM2,
89356 +};
89357 +
89358 +struct size_overflow_hash _001602_hash = {
89359 + .next = NULL,
89360 + .name = "xfs_iext_realloc_direct",
89361 + .param = PARAM2,
89362 +};
89363 +
89364 +struct size_overflow_hash _001603_hash = {
89365 + .next = NULL,
89366 + .name = "xfs_iext_realloc_indirect",
89367 + .param = PARAM2,
89368 +};
89369 +
89370 +struct size_overflow_hash _001604_hash = {
89371 + .next = NULL,
89372 + .name = "xfs_inumbers_fmt",
89373 + .param = PARAM3,
89374 +};
89375 +
89376 +struct size_overflow_hash _001605_hash = {
89377 + .next = NULL,
89378 + .name = "xlog_recover_add_to_cont_trans",
89379 + .param = PARAM4,
89380 +};
89381 +
89382 +struct size_overflow_hash _001606_hash = {
89383 + .next = NULL,
89384 + .name = "xz_dec_lzma2_create",
89385 + .param = PARAM2,
89386 +};
89387 +
89388 +struct size_overflow_hash _001607_hash = {
89389 + .next = NULL,
89390 + .name = "_zd_iowrite32v_locked",
89391 + .param = PARAM3,
89392 +};
89393 +
89394 +struct size_overflow_hash _001608_hash = {
89395 + .next = NULL,
89396 + .name = "aat2870_reg_read_file",
89397 + .param = PARAM3,
89398 +};
89399 +
89400 +struct size_overflow_hash _001609_hash = {
89401 + .next = NULL,
89402 + .name = "add_sctp_bind_addr",
89403 + .param = PARAM3,
89404 +};
89405 +
89406 +struct size_overflow_hash _001610_hash = {
89407 + .next = NULL,
89408 + .name = "aes_decrypt_fail_read",
89409 + .param = PARAM3,
89410 +};
89411 +
89412 +struct size_overflow_hash _001611_hash = {
89413 + .next = NULL,
89414 + .name = "aes_decrypt_interrupt_read",
89415 + .param = PARAM3,
89416 +};
89417 +
89418 +struct size_overflow_hash _001612_hash = {
89419 + .next = NULL,
89420 + .name = "aes_decrypt_packets_read",
89421 + .param = PARAM3,
89422 +};
89423 +
89424 +struct size_overflow_hash _001613_hash = {
89425 + .next = NULL,
89426 + .name = "aes_encrypt_fail_read",
89427 + .param = PARAM3,
89428 +};
89429 +
89430 +struct size_overflow_hash _001614_hash = {
89431 + .next = NULL,
89432 + .name = "aes_encrypt_interrupt_read",
89433 + .param = PARAM3,
89434 +};
89435 +
89436 +struct size_overflow_hash _001615_hash = {
89437 + .next = NULL,
89438 + .name = "aes_encrypt_packets_read",
89439 + .param = PARAM3,
89440 +};
89441 +
89442 +struct size_overflow_hash _001616_hash = {
89443 + .next = NULL,
89444 + .name = "afs_cell_lookup",
89445 + .param = PARAM2,
89446 +};
89447 +
89448 +struct size_overflow_hash _001617_hash = {
89449 + .next = NULL,
89450 + .name = "agp_allocate_memory",
89451 + .param = PARAM2,
89452 +};
89453 +
89454 +struct size_overflow_hash _001618_hash = {
89455 + .next = NULL,
89456 + .name = "__alloc_bootmem",
89457 + .param = PARAM1,
89458 +};
89459 +
89460 +struct size_overflow_hash _001619_hash = {
89461 + .next = NULL,
89462 + .name = "__alloc_bootmem_low",
89463 + .param = PARAM1,
89464 +};
89465 +
89466 +struct size_overflow_hash _001620_hash = {
89467 + .next = NULL,
89468 + .name = "__alloc_bootmem_node_nopanic",
89469 + .param = PARAM2,
89470 +};
89471 +
89472 +struct size_overflow_hash _001621_hash = {
89473 + .next = NULL,
89474 + .name = "alloc_cc770dev",
89475 + .param = PARAM1,
89476 +};
89477 +
89478 +struct size_overflow_hash _001622_hash = {
89479 + .next = NULL,
89480 + .name = "__alloc_ei_netdev",
89481 + .param = PARAM1,
89482 +};
89483 +
89484 +struct size_overflow_hash _001623_hash = {
89485 + .next = NULL,
89486 + .name = "__alloc_eip_netdev",
89487 + .param = PARAM1,
89488 +};
89489 +
89490 +struct size_overflow_hash _001624_hash = {
89491 + .next = NULL,
89492 + .name = "alloc_libipw",
89493 + .param = PARAM1,
89494 +};
89495 +
89496 +struct size_overflow_hash _001625_hash = {
89497 + .next = NULL,
89498 + .name = "alloc_pg_vec",
89499 + .param = PARAM2,
89500 +};
89501 +
89502 +struct size_overflow_hash _001626_hash = {
89503 + .next = NULL,
89504 + .name = "alloc_sja1000dev",
89505 + .param = PARAM1,
89506 +};
89507 +
89508 +struct size_overflow_hash _001627_hash = {
89509 + .next = NULL,
89510 + .name = "alloc_targets",
89511 + .param = PARAM2,
89512 +};
89513 +
89514 +struct size_overflow_hash _001630_hash = {
89515 + .next = NULL,
89516 + .name = "ath6kl_disconnect_timeout_read",
89517 + .param = PARAM3,
89518 +};
89519 +
89520 +struct size_overflow_hash _001631_hash = {
89521 + .next = NULL,
89522 + .name = "ath6kl_endpoint_stats_read",
89523 + .param = PARAM3,
89524 +};
89525 +
89526 +struct size_overflow_hash _001632_hash = {
89527 + .next = NULL,
89528 + .name = "ath6kl_fwlog_mask_read",
89529 + .param = PARAM3,
89530 +};
89531 +
89532 +struct size_overflow_hash _001633_hash = {
89533 + .next = NULL,
89534 + .name = "ath6kl_keepalive_read",
89535 + .param = PARAM3,
89536 +};
89537 +
89538 +struct size_overflow_hash _001634_hash = {
89539 + .next = NULL,
89540 + .name = "ath6kl_listen_int_read",
89541 + .param = PARAM3,
89542 +};
89543 +
89544 +struct size_overflow_hash _001635_hash = {
89545 + .next = NULL,
89546 + .name = "ath6kl_lrssi_roam_read",
89547 + .param = PARAM3,
89548 +};
89549 +
89550 +struct size_overflow_hash _001636_hash = {
89551 + .next = NULL,
89552 + .name = "ath6kl_regdump_read",
89553 + .param = PARAM3,
89554 +};
89555 +
89556 +struct size_overflow_hash _001637_hash = {
89557 + .next = NULL,
89558 + .name = "ath6kl_regread_read",
89559 + .param = PARAM3,
89560 +};
89561 +
89562 +struct size_overflow_hash _001638_hash = {
89563 + .next = NULL,
89564 + .name = "ath6kl_regwrite_read",
89565 + .param = PARAM3,
89566 +};
89567 +
89568 +struct size_overflow_hash _001639_hash = {
89569 + .next = NULL,
89570 + .name = "ath6kl_roam_table_read",
89571 + .param = PARAM3,
89572 +};
89573 +
89574 +struct size_overflow_hash _001640_hash = {
89575 + .next = NULL,
89576 + .name = "ath9k_debugfs_read_buf",
89577 + .param = PARAM3,
89578 +};
89579 +
89580 +struct size_overflow_hash _001641_hash = {
89581 + .next = NULL,
89582 + .name = "atk_debugfs_ggrp_read",
89583 + .param = PARAM3,
89584 +};
89585 +
89586 +struct size_overflow_hash _001642_hash = {
89587 + .next = NULL,
89588 + .name = "b43_debugfs_read",
89589 + .param = PARAM3,
89590 +};
89591 +
89592 +struct size_overflow_hash _001643_hash = {
89593 + .next = NULL,
89594 + .name = "b43legacy_debugfs_read",
89595 + .param = PARAM3,
89596 +};
89597 +
89598 +struct size_overflow_hash _001644_hash = {
89599 + .next = NULL,
89600 + .name = "bcm_recvmsg",
89601 + .param = PARAM4,
89602 +};
89603 +
89604 +struct size_overflow_hash _001645_hash = {
89605 + .next = NULL,
89606 + .name = "bfad_debugfs_read",
89607 + .param = PARAM3,
89608 +};
89609 +
89610 +struct size_overflow_hash _001646_hash = {
89611 + .next = NULL,
89612 + .name = "bfad_debugfs_read_regrd",
89613 + .param = PARAM3,
89614 +};
89615 +
89616 +struct size_overflow_hash _001647_hash = {
89617 + .next = NULL,
89618 + .name = "blk_init_tags",
89619 + .param = PARAM1,
89620 +};
89621 +
89622 +struct size_overflow_hash _001648_hash = {
89623 + .next = NULL,
89624 + .name = "blk_queue_init_tags",
89625 + .param = PARAM2,
89626 +};
89627 +
89628 +struct size_overflow_hash _001649_hash = {
89629 + .next = NULL,
89630 + .name = "blk_rq_map_kern",
89631 + .param = PARAM4,
89632 +};
89633 +
89634 +struct size_overflow_hash _001650_hash = {
89635 + .next = NULL,
89636 + .name = "bm_entry_read",
89637 + .param = PARAM3,
89638 +};
89639 +
89640 +struct size_overflow_hash _001651_hash = {
89641 + .next = NULL,
89642 + .name = "bm_status_read",
89643 + .param = PARAM3,
89644 +};
89645 +
89646 +struct size_overflow_hash _001652_hash = {
89647 + .next = NULL,
89648 + .name = "bnad_debugfs_read",
89649 + .param = PARAM3,
89650 +};
89651 +
89652 +struct size_overflow_hash _001653_hash = {
89653 + .next = NULL,
89654 + .name = "bnad_debugfs_read_regrd",
89655 + .param = PARAM3,
89656 +};
89657 +
89658 +struct size_overflow_hash _001654_hash = {
89659 + .next = NULL,
89660 + .name = "btmrvl_curpsmode_read",
89661 + .param = PARAM3,
89662 +};
89663 +
89664 +struct size_overflow_hash _001655_hash = {
89665 + .next = NULL,
89666 + .name = "btmrvl_gpiogap_read",
89667 + .param = PARAM3,
89668 +};
89669 +
89670 +struct size_overflow_hash _001656_hash = {
89671 + .next = NULL,
89672 + .name = "btmrvl_hscfgcmd_read",
89673 + .param = PARAM3,
89674 +};
89675 +
89676 +struct size_overflow_hash _001657_hash = {
89677 + .next = NULL,
89678 + .name = "btmrvl_hscmd_read",
89679 + .param = PARAM3,
89680 +};
89681 +
89682 +struct size_overflow_hash _001658_hash = {
89683 + .next = NULL,
89684 + .name = "btmrvl_hsmode_read",
89685 + .param = PARAM3,
89686 +};
89687 +
89688 +struct size_overflow_hash _001659_hash = {
89689 + .next = NULL,
89690 + .name = "btmrvl_hsstate_read",
89691 + .param = PARAM3,
89692 +};
89693 +
89694 +struct size_overflow_hash _001660_hash = {
89695 + .next = NULL,
89696 + .name = "btmrvl_pscmd_read",
89697 + .param = PARAM3,
89698 +};
89699 +
89700 +struct size_overflow_hash _001661_hash = {
89701 + .next = NULL,
89702 + .name = "btmrvl_psmode_read",
89703 + .param = PARAM3,
89704 +};
89705 +
89706 +struct size_overflow_hash _001662_hash = {
89707 + .next = NULL,
89708 + .name = "btmrvl_psstate_read",
89709 + .param = PARAM3,
89710 +};
89711 +
89712 +struct size_overflow_hash _001663_hash = {
89713 + .next = NULL,
89714 + .name = "btmrvl_txdnldready_read",
89715 + .param = PARAM3,
89716 +};
89717 +
89718 +struct size_overflow_hash _001664_hash = {
89719 + .next = NULL,
89720 + .name = "btrfs_add_link",
89721 + .param = PARAM5,
89722 +};
89723 +
89724 +struct size_overflow_hash _001665_hash = {
89725 + .next = NULL,
89726 + .name = "btrfs_discard_extent",
89727 + .param = PARAM2,
89728 +};
89729 +
89730 +struct size_overflow_hash _001666_hash = {
89731 + .next = NULL,
89732 + .name = "btrfs_find_create_tree_block",
89733 + .param = PARAM3,
89734 +};
89735 +
89736 +struct size_overflow_hash _001667_hash = {
89737 + .next = NULL,
89738 + .name = "btrfsic_map_block",
89739 + .param = PARAM2,
89740 +};
89741 +
89742 +struct size_overflow_hash _001668_hash = {
89743 + .next = NULL,
89744 + .name = "caif_stream_recvmsg",
89745 + .param = PARAM4,
89746 +};
89747 +
89748 +struct size_overflow_hash _001669_hash = {
89749 + .next = NULL,
89750 + .name = "carl9170_alloc",
89751 + .param = PARAM1,
89752 +};
89753 +
89754 +struct size_overflow_hash _001670_hash = {
89755 + .next = NULL,
89756 + .name = "carl9170_debugfs_read",
89757 + .param = PARAM3,
89758 +};
89759 +
89760 +struct size_overflow_hash _001671_hash = {
89761 + .next = NULL,
89762 + .name = "cgroup_read_s64",
89763 + .param = PARAM5,
89764 +};
89765 +
89766 +struct size_overflow_hash _001672_hash = {
89767 + .next = NULL,
89768 + .name = "cgroup_read_u64",
89769 + .param = PARAM5,
89770 +};
89771 +
89772 +struct size_overflow_hash _001673_hash = {
89773 + .next = NULL,
89774 + .name = "channel_type_read",
89775 + .param = PARAM3,
89776 +};
89777 +
89778 +struct size_overflow_hash _001674_hash = {
89779 + .next = NULL,
89780 + .name = "codec_list_read_file",
89781 + .param = PARAM3,
89782 +};
89783 +
89784 +struct size_overflow_hash _001675_hash = {
89785 + .next = NULL,
89786 + .name = "configfs_read_file",
89787 + .param = PARAM3,
89788 +};
89789 +
89790 +struct size_overflow_hash _001676_hash = {
89791 + .next = NULL,
89792 + .name = "cpuset_common_file_read",
89793 + .param = PARAM5,
89794 +};
89795 +
89796 +struct size_overflow_hash _001677_hash = {
89797 + .next = NULL,
89798 + .name = "create_subvol",
89799 + .param = PARAM4,
89800 +};
89801 +
89802 +struct size_overflow_hash _001678_hash = {
89803 + .next = NULL,
89804 + .name = "cx18_copy_mdl_to_user",
89805 + .param = PARAM4,
89806 +};
89807 +
89808 +struct size_overflow_hash _001679_hash = {
89809 + .next = NULL,
89810 + .name = "dai_list_read_file",
89811 + .param = PARAM3,
89812 +};
89813 +
89814 +struct size_overflow_hash _001680_hash = {
89815 + .next = NULL,
89816 + .name = "dapm_bias_read_file",
89817 + .param = PARAM3,
89818 +};
89819 +
89820 +struct size_overflow_hash _001681_hash = {
89821 + .next = NULL,
89822 + .name = "dapm_widget_power_read_file",
89823 + .param = PARAM3,
89824 +};
89825 +
89826 +struct size_overflow_hash _001684_hash = {
89827 + .next = NULL,
89828 + .name = "dbgfs_frame",
89829 + .param = PARAM3,
89830 +};
89831 +
89832 +struct size_overflow_hash _001685_hash = {
89833 + .next = NULL,
89834 + .name = "dbgfs_state",
89835 + .param = PARAM3,
89836 +};
89837 +
89838 +struct size_overflow_hash _001686_hash = {
89839 + .next = NULL,
89840 + .name = "debugfs_read",
89841 + .param = PARAM3,
89842 +};
89843 +
89844 +struct size_overflow_hash _001687_hash = {
89845 + .next = NULL,
89846 + .name = "debug_output",
89847 + .param = PARAM3,
89848 +};
89849 +
89850 +struct size_overflow_hash _001688_hash = {
89851 + .next = NULL,
89852 + .name = "debug_read",
89853 + .param = PARAM3,
89854 +};
89855 +
89856 +struct size_overflow_hash _001689_hash = {
89857 + .next = NULL,
89858 + .name = "dfs_file_read",
89859 + .param = PARAM3,
89860 +};
89861 +
89862 +struct size_overflow_hash _001690_hash = {
89863 + .next = NULL,
89864 + .name = "dma_memcpy_pg_to_iovec",
89865 + .param = PARAM6,
89866 +};
89867 +
89868 +struct size_overflow_hash _001691_hash = {
89869 + .next = NULL,
89870 + .name = "dma_memcpy_to_iovec",
89871 + .param = PARAM5,
89872 +};
89873 +
89874 +struct size_overflow_hash _001692_hash = {
89875 + .next = NULL,
89876 + .name = "dma_rx_errors_read",
89877 + .param = PARAM3,
89878 +};
89879 +
89880 +struct size_overflow_hash _001693_hash = {
89881 + .next = NULL,
89882 + .name = "dma_rx_requested_read",
89883 + .param = PARAM3,
89884 +};
89885 +
89886 +struct size_overflow_hash _001694_hash = {
89887 + .next = NULL,
89888 + .name = "dma_show_regs",
89889 + .param = PARAM3,
89890 +};
89891 +
89892 +struct size_overflow_hash _001695_hash = {
89893 + .next = NULL,
89894 + .name = "dma_tx_errors_read",
89895 + .param = PARAM3,
89896 +};
89897 +
89898 +struct size_overflow_hash _001696_hash = {
89899 + .next = NULL,
89900 + .name = "dma_tx_requested_read",
89901 + .param = PARAM3,
89902 +};
89903 +
89904 +struct size_overflow_hash _001697_hash = {
89905 + .next = &_001103_hash,
89906 + .name = "dm_exception_table_init",
89907 + .param = PARAM2,
89908 +};
89909 +
89910 +struct size_overflow_hash _001698_hash = {
89911 + .next = NULL,
89912 + .name = "dn_recvmsg",
89913 + .param = PARAM4,
89914 +};
89915 +
89916 +struct size_overflow_hash _001699_hash = {
89917 + .next = NULL,
89918 + .name = "dns_resolver_read",
89919 + .param = PARAM3,
89920 +};
89921 +
89922 +struct size_overflow_hash _001700_hash = {
89923 + .next = NULL,
89924 + .name = "do_msgrcv",
89925 + .param = PARAM4,
89926 +};
89927 +
89928 +struct size_overflow_hash _001701_hash = {
89929 + .next = &_001394_hash,
89930 + .name = "driver_state_read",
89931 + .param = PARAM3,
89932 +};
89933 +
89934 +struct size_overflow_hash _001702_hash = {
89935 + .next = NULL,
89936 + .name = "dvb_demux_do_ioctl",
89937 + .param = PARAM3,
89938 +};
89939 +
89940 +struct size_overflow_hash _001703_hash = {
89941 + .next = NULL,
89942 + .name = "dvb_dmxdev_buffer_read",
89943 + .param = PARAM4,
89944 +};
89945 +
89946 +struct size_overflow_hash _001704_hash = {
89947 + .next = NULL,
89948 + .name = "dvb_dvr_do_ioctl",
89949 + .param = PARAM3,
89950 +};
89951 +
89952 +struct size_overflow_hash _001705_hash = {
89953 + .next = NULL,
89954 + .name = "econet_recvmsg",
89955 + .param = PARAM4,
89956 +};
89957 +
89958 +struct size_overflow_hash _001706_hash = {
89959 + .next = NULL,
89960 + .name = "event_calibration_read",
89961 + .param = PARAM3,
89962 +};
89963 +
89964 +struct size_overflow_hash _001707_hash = {
89965 + .next = NULL,
89966 + .name = "event_heart_beat_read",
89967 + .param = PARAM3,
89968 +};
89969 +
89970 +struct size_overflow_hash _001708_hash = {
89971 + .next = &_001014_hash,
89972 + .name = "event_oom_late_read",
89973 + .param = PARAM3,
89974 +};
89975 +
89976 +struct size_overflow_hash _001709_hash = {
89977 + .next = NULL,
89978 + .name = "event_phy_transmit_error_read",
89979 + .param = PARAM3,
89980 +};
89981 +
89982 +struct size_overflow_hash _001710_hash = {
89983 + .next = NULL,
89984 + .name = "event_rx_mem_empty_read",
89985 + .param = PARAM3,
89986 +};
89987 +
89988 +struct size_overflow_hash _001711_hash = {
89989 + .next = NULL,
89990 + .name = "event_rx_mismatch_read",
89991 + .param = PARAM3,
89992 +};
89993 +
89994 +struct size_overflow_hash _001712_hash = {
89995 + .next = NULL,
89996 + .name = "event_rx_pool_read",
89997 + .param = PARAM3,
89998 +};
89999 +
90000 +struct size_overflow_hash _001713_hash = {
90001 + .next = NULL,
90002 + .name = "event_tx_stuck_read",
90003 + .param = PARAM3,
90004 +};
90005 +
90006 +struct size_overflow_hash _001714_hash = {
90007 + .next = NULL,
90008 + .name = "excessive_retries_read",
90009 + .param = PARAM3,
90010 +};
90011 +
90012 +struct size_overflow_hash _001715_hash = {
90013 + .next = NULL,
90014 + .name = "fallback_on_nodma_alloc",
90015 + .param = PARAM2,
90016 +};
90017 +
90018 +struct size_overflow_hash _001716_hash = {
90019 + .next = NULL,
90020 + .name = "filter_read",
90021 + .param = PARAM3,
90022 +};
90023 +
90024 +struct size_overflow_hash _001717_hash = {
90025 + .next = NULL,
90026 + .name = "format_devstat_counter",
90027 + .param = PARAM3,
90028 +};
90029 +
90030 +struct size_overflow_hash _001718_hash = {
90031 + .next = NULL,
90032 + .name = "fragmentation_threshold_read",
90033 + .param = PARAM3,
90034 +};
90035 +
90036 +struct size_overflow_hash _001719_hash = {
90037 + .next = NULL,
90038 + .name = "fuse_conn_limit_read",
90039 + .param = PARAM3,
90040 +};
90041 +
90042 +struct size_overflow_hash _001720_hash = {
90043 + .next = NULL,
90044 + .name = "fuse_conn_waiting_read",
90045 + .param = PARAM3,
90046 +};
90047 +
90048 +struct size_overflow_hash _001721_hash = {
90049 + .next = NULL,
90050 + .name = "generic_readlink",
90051 + .param = PARAM3,
90052 +};
90053 +
90054 +struct size_overflow_hash _001722_hash = {
90055 + .next = NULL,
90056 + .name = "gpio_power_read",
90057 + .param = PARAM3,
90058 +};
90059 +
90060 +struct size_overflow_hash _001723_hash = {
90061 + .next = NULL,
90062 + .name = "hash_recvmsg",
90063 + .param = PARAM4,
90064 +};
90065 +
90066 +struct size_overflow_hash _001724_hash = {
90067 + .next = NULL,
90068 + .name = "ht40allow_map_read",
90069 + .param = PARAM3,
90070 +};
90071 +
90072 +struct size_overflow_hash _001725_hash = {
90073 + .next = NULL,
90074 + .name = "hwflags_read",
90075 + .param = PARAM3,
90076 +};
90077 +
90078 +struct size_overflow_hash _001726_hash = {
90079 + .next = NULL,
90080 + .name = "hysdn_conf_read",
90081 + .param = PARAM3,
90082 +};
90083 +
90084 +struct size_overflow_hash _001727_hash = {
90085 + .next = NULL,
90086 + .name = "i2400m_rx_stats_read",
90087 + .param = PARAM3,
90088 +};
90089 +
90090 +struct size_overflow_hash _001728_hash = {
90091 + .next = NULL,
90092 + .name = "i2400m_tx_stats_read",
90093 + .param = PARAM3,
90094 +};
90095 +
90096 +struct size_overflow_hash _001729_hash = {
90097 + .next = NULL,
90098 + .name = "idmouse_read",
90099 + .param = PARAM3,
90100 +};
90101 +
90102 +struct size_overflow_hash _001730_hash = {
90103 + .next = NULL,
90104 + .name = "ieee80211_if_read",
90105 + .param = PARAM3,
90106 +};
90107 +
90108 +struct size_overflow_hash _001731_hash = {
90109 + .next = NULL,
90110 + .name = "ieee80211_rx_bss_info",
90111 + .param = PARAM3,
90112 +};
90113 +
90114 +struct size_overflow_hash _001732_hash = {
90115 + .next = NULL,
90116 + .name = "ikconfig_read_current",
90117 + .param = PARAM3,
90118 +};
90119 +
90120 +struct size_overflow_hash _001733_hash = {
90121 + .next = NULL,
90122 + .name = "il3945_sta_dbgfs_stats_table_read",
90123 + .param = PARAM3,
90124 +};
90125 +
90126 +struct size_overflow_hash _001734_hash = {
90127 + .next = NULL,
90128 + .name = "il3945_ucode_general_stats_read",
90129 + .param = PARAM3,
90130 +};
90131 +
90132 +struct size_overflow_hash _001735_hash = {
90133 + .next = NULL,
90134 + .name = "il3945_ucode_rx_stats_read",
90135 + .param = PARAM3,
90136 +};
90137 +
90138 +struct size_overflow_hash _001736_hash = {
90139 + .next = NULL,
90140 + .name = "il3945_ucode_tx_stats_read",
90141 + .param = PARAM3,
90142 +};
90143 +
90144 +struct size_overflow_hash _001737_hash = {
90145 + .next = NULL,
90146 + .name = "il4965_rs_sta_dbgfs_rate_scale_data_read",
90147 + .param = PARAM3,
90148 +};
90149 +
90150 +struct size_overflow_hash _001738_hash = {
90151 + .next = NULL,
90152 + .name = "il4965_rs_sta_dbgfs_scale_table_read",
90153 + .param = PARAM3,
90154 +};
90155 +
90156 +struct size_overflow_hash _001739_hash = {
90157 + .next = NULL,
90158 + .name = "il4965_rs_sta_dbgfs_stats_table_read",
90159 + .param = PARAM3,
90160 +};
90161 +
90162 +struct size_overflow_hash _001740_hash = {
90163 + .next = NULL,
90164 + .name = "il4965_ucode_general_stats_read",
90165 + .param = PARAM3,
90166 +};
90167 +
90168 +struct size_overflow_hash _001741_hash = {
90169 + .next = NULL,
90170 + .name = "il4965_ucode_rx_stats_read",
90171 + .param = PARAM3,
90172 +};
90173 +
90174 +struct size_overflow_hash _001742_hash = {
90175 + .next = NULL,
90176 + .name = "il4965_ucode_tx_stats_read",
90177 + .param = PARAM3,
90178 +};
90179 +
90180 +struct size_overflow_hash _001743_hash = {
90181 + .next = NULL,
90182 + .name = "il_dbgfs_chain_noise_read",
90183 + .param = PARAM3,
90184 +};
90185 +
90186 +struct size_overflow_hash _001744_hash = {
90187 + .next = NULL,
90188 + .name = "il_dbgfs_channels_read",
90189 + .param = PARAM3,
90190 +};
90191 +
90192 +struct size_overflow_hash _001745_hash = {
90193 + .next = NULL,
90194 + .name = "il_dbgfs_disable_ht40_read",
90195 + .param = PARAM3,
90196 +};
90197 +
90198 +struct size_overflow_hash _001746_hash = {
90199 + .next = NULL,
90200 + .name = "il_dbgfs_fh_reg_read",
90201 + .param = PARAM3,
90202 +};
90203 +
90204 +struct size_overflow_hash _001747_hash = {
90205 + .next = NULL,
90206 + .name = "il_dbgfs_force_reset_read",
90207 + .param = PARAM3,
90208 +};
90209 +
90210 +struct size_overflow_hash _001748_hash = {
90211 + .next = NULL,
90212 + .name = "il_dbgfs_interrupt_read",
90213 + .param = PARAM3,
90214 +};
90215 +
90216 +struct size_overflow_hash _001749_hash = {
90217 + .next = NULL,
90218 + .name = "il_dbgfs_missed_beacon_read",
90219 + .param = PARAM3,
90220 +};
90221 +
90222 +struct size_overflow_hash _001750_hash = {
90223 + .next = NULL,
90224 + .name = "il_dbgfs_nvm_read",
90225 + .param = PARAM3,
90226 +};
90227 +
90228 +struct size_overflow_hash _001751_hash = {
90229 + .next = NULL,
90230 + .name = "il_dbgfs_power_save_status_read",
90231 + .param = PARAM3,
90232 +};
90233 +
90234 +struct size_overflow_hash _001752_hash = {
90235 + .next = NULL,
90236 + .name = "il_dbgfs_qos_read",
90237 + .param = PARAM3,
90238 +};
90239 +
90240 +struct size_overflow_hash _001753_hash = {
90241 + .next = NULL,
90242 + .name = "il_dbgfs_rxon_filter_flags_read",
90243 + .param = PARAM3,
90244 +};
90245 +
90246 +struct size_overflow_hash _001754_hash = {
90247 + .next = &_001681_hash,
90248 + .name = "il_dbgfs_rxon_flags_read",
90249 + .param = PARAM3,
90250 +};
90251 +
90252 +struct size_overflow_hash _001755_hash = {
90253 + .next = NULL,
90254 + .name = "il_dbgfs_rx_queue_read",
90255 + .param = PARAM3,
90256 +};
90257 +
90258 +struct size_overflow_hash _001756_hash = {
90259 + .next = NULL,
90260 + .name = "il_dbgfs_rx_stats_read",
90261 + .param = PARAM3,
90262 +};
90263 +
90264 +struct size_overflow_hash _001757_hash = {
90265 + .next = NULL,
90266 + .name = "il_dbgfs_sensitivity_read",
90267 + .param = PARAM3,
90268 +};
90269 +
90270 +struct size_overflow_hash _001758_hash = {
90271 + .next = NULL,
90272 + .name = "il_dbgfs_sram_read",
90273 + .param = PARAM3,
90274 +};
90275 +
90276 +struct size_overflow_hash _001759_hash = {
90277 + .next = NULL,
90278 + .name = "il_dbgfs_stations_read",
90279 + .param = PARAM3,
90280 +};
90281 +
90282 +struct size_overflow_hash _001760_hash = {
90283 + .next = NULL,
90284 + .name = "il_dbgfs_status_read",
90285 + .param = PARAM3,
90286 +};
90287 +
90288 +struct size_overflow_hash _001761_hash = {
90289 + .next = NULL,
90290 + .name = "il_dbgfs_tx_queue_read",
90291 + .param = PARAM3,
90292 +};
90293 +
90294 +struct size_overflow_hash _001762_hash = {
90295 + .next = NULL,
90296 + .name = "il_dbgfs_tx_stats_read",
90297 + .param = PARAM3,
90298 +};
90299 +
90300 +struct size_overflow_hash _001763_hash = {
90301 + .next = NULL,
90302 + .name = "ima_show_htable_value",
90303 + .param = PARAM2,
90304 +};
90305 +
90306 +struct size_overflow_hash _001765_hash = {
90307 + .next = NULL,
90308 + .name = "ipw_write",
90309 + .param = PARAM3,
90310 +};
90311 +
90312 +struct size_overflow_hash _001766_hash = {
90313 + .next = NULL,
90314 + .name = "irda_recvmsg_stream",
90315 + .param = PARAM4,
90316 +};
90317 +
90318 +struct size_overflow_hash _001767_hash = {
90319 + .next = NULL,
90320 + .name = "iscsi_tcp_conn_setup",
90321 + .param = PARAM2,
90322 +};
90323 +
90324 +struct size_overflow_hash _001768_hash = {
90325 + .next = NULL,
90326 + .name = "isr_cmd_cmplt_read",
90327 + .param = PARAM3,
90328 +};
90329 +
90330 +struct size_overflow_hash _001769_hash = {
90331 + .next = NULL,
90332 + .name = "isr_commands_read",
90333 + .param = PARAM3,
90334 +};
90335 +
90336 +struct size_overflow_hash _001770_hash = {
90337 + .next = NULL,
90338 + .name = "isr_decrypt_done_read",
90339 + .param = PARAM3,
90340 +};
90341 +
90342 +struct size_overflow_hash _001771_hash = {
90343 + .next = NULL,
90344 + .name = "isr_dma0_done_read",
90345 + .param = PARAM3,
90346 +};
90347 +
90348 +struct size_overflow_hash _001772_hash = {
90349 + .next = NULL,
90350 + .name = "isr_dma1_done_read",
90351 + .param = PARAM3,
90352 +};
90353 +
90354 +struct size_overflow_hash _001773_hash = {
90355 + .next = NULL,
90356 + .name = "isr_fiqs_read",
90357 + .param = PARAM3,
90358 +};
90359 +
90360 +struct size_overflow_hash _001774_hash = {
90361 + .next = NULL,
90362 + .name = "isr_host_acknowledges_read",
90363 + .param = PARAM3,
90364 +};
90365 +
90366 +struct size_overflow_hash _001775_hash = {
90367 + .next = &_001696_hash,
90368 + .name = "isr_hw_pm_mode_changes_read",
90369 + .param = PARAM3,
90370 +};
90371 +
90372 +struct size_overflow_hash _001776_hash = {
90373 + .next = NULL,
90374 + .name = "isr_irqs_read",
90375 + .param = PARAM3,
90376 +};
90377 +
90378 +struct size_overflow_hash _001777_hash = {
90379 + .next = NULL,
90380 + .name = "isr_low_rssi_read",
90381 + .param = PARAM3,
90382 +};
90383 +
90384 +struct size_overflow_hash _001778_hash = {
90385 + .next = NULL,
90386 + .name = "isr_pci_pm_read",
90387 + .param = PARAM3,
90388 +};
90389 +
90390 +struct size_overflow_hash _001779_hash = {
90391 + .next = NULL,
90392 + .name = "isr_rx_headers_read",
90393 + .param = PARAM3,
90394 +};
90395 +
90396 +struct size_overflow_hash _001780_hash = {
90397 + .next = NULL,
90398 + .name = "isr_rx_mem_overflow_read",
90399 + .param = PARAM3,
90400 +};
90401 +
90402 +struct size_overflow_hash _001781_hash = {
90403 + .next = NULL,
90404 + .name = "isr_rx_procs_read",
90405 + .param = PARAM3,
90406 +};
90407 +
90408 +struct size_overflow_hash _001782_hash = {
90409 + .next = NULL,
90410 + .name = "isr_rx_rdys_read",
90411 + .param = PARAM3,
90412 +};
90413 +
90414 +struct size_overflow_hash _001783_hash = {
90415 + .next = NULL,
90416 + .name = "isr_tx_exch_complete_read",
90417 + .param = PARAM3,
90418 +};
90419 +
90420 +struct size_overflow_hash _001784_hash = {
90421 + .next = NULL,
90422 + .name = "isr_tx_procs_read",
90423 + .param = PARAM3,
90424 +};
90425 +
90426 +struct size_overflow_hash _001785_hash = {
90427 + .next = NULL,
90428 + .name = "isr_wakeups_read",
90429 + .param = PARAM3,
90430 +};
90431 +
90432 +struct size_overflow_hash _001786_hash = {
90433 + .next = NULL,
90434 + .name = "ivtv_read",
90435 + .param = PARAM3,
90436 +};
90437 +
90438 +struct size_overflow_hash _001787_hash = {
90439 + .next = NULL,
90440 + .name = "iwl_dbgfs_bt_traffic_read",
90441 + .param = PARAM3,
90442 +};
90443 +
90444 +struct size_overflow_hash _001788_hash = {
90445 + .next = NULL,
90446 + .name = "iwl_dbgfs_chain_noise_read",
90447 + .param = PARAM3,
90448 +};
90449 +
90450 +struct size_overflow_hash _001789_hash = {
90451 + .next = NULL,
90452 + .name = "iwl_dbgfs_channels_read",
90453 + .param = PARAM3,
90454 +};
90455 +
90456 +struct size_overflow_hash _001790_hash = {
90457 + .next = NULL,
90458 + .name = "iwl_dbgfs_current_sleep_command_read",
90459 + .param = PARAM3,
90460 +};
90461 +
90462 +struct size_overflow_hash _001791_hash = {
90463 + .next = NULL,
90464 + .name = "iwl_dbgfs_disable_ht40_read",
90465 + .param = PARAM3,
90466 +};
90467 +
90468 +struct size_overflow_hash _001792_hash = {
90469 + .next = &_000393_hash,
90470 + .name = "iwl_dbgfs_fh_reg_read",
90471 + .param = PARAM3,
90472 +};
90473 +
90474 +struct size_overflow_hash _001793_hash = {
90475 + .next = NULL,
90476 + .name = "iwl_dbgfs_force_reset_read",
90477 + .param = PARAM3,
90478 +};
90479 +
90480 +struct size_overflow_hash _001794_hash = {
90481 + .next = NULL,
90482 + .name = "iwl_dbgfs_interrupt_read",
90483 + .param = PARAM3,
90484 +};
90485 +
90486 +struct size_overflow_hash _001795_hash = {
90487 + .next = NULL,
90488 + .name = "iwl_dbgfs_log_event_read",
90489 + .param = PARAM3,
90490 +};
90491 +
90492 +struct size_overflow_hash _001796_hash = {
90493 + .next = NULL,
90494 + .name = "iwl_dbgfs_missed_beacon_read",
90495 + .param = PARAM3,
90496 +};
90497 +
90498 +struct size_overflow_hash _001797_hash = {
90499 + .next = NULL,
90500 + .name = "iwl_dbgfs_nvm_read",
90501 + .param = PARAM3,
90502 +};
90503 +
90504 +struct size_overflow_hash _001798_hash = {
90505 + .next = NULL,
90506 + .name = "iwl_dbgfs_plcp_delta_read",
90507 + .param = PARAM3,
90508 +};
90509 +
90510 +struct size_overflow_hash _001799_hash = {
90511 + .next = NULL,
90512 + .name = "iwl_dbgfs_power_save_status_read",
90513 + .param = PARAM3,
90514 +};
90515 +
90516 +struct size_overflow_hash _001800_hash = {
90517 + .next = NULL,
90518 + .name = "iwl_dbgfs_protection_mode_read",
90519 + .param = PARAM3,
90520 +};
90521 +
90522 +struct size_overflow_hash _001801_hash = {
90523 + .next = NULL,
90524 + .name = "iwl_dbgfs_qos_read",
90525 + .param = PARAM3,
90526 +};
90527 +
90528 +struct size_overflow_hash _001802_hash = {
90529 + .next = NULL,
90530 + .name = "iwl_dbgfs_reply_tx_error_read",
90531 + .param = PARAM3,
90532 +};
90533 +
90534 +struct size_overflow_hash _001803_hash = {
90535 + .next = NULL,
90536 + .name = "iwl_dbgfs_rx_handlers_read",
90537 + .param = PARAM3,
90538 +};
90539 +
90540 +struct size_overflow_hash _001804_hash = {
90541 + .next = NULL,
90542 + .name = "iwl_dbgfs_rxon_filter_flags_read",
90543 + .param = PARAM3,
90544 +};
90545 +
90546 +struct size_overflow_hash _001805_hash = {
90547 + .next = NULL,
90548 + .name = "iwl_dbgfs_rxon_flags_read",
90549 + .param = PARAM3,
90550 +};
90551 +
90552 +struct size_overflow_hash _001806_hash = {
90553 + .next = NULL,
90554 + .name = "iwl_dbgfs_rx_queue_read",
90555 + .param = PARAM3,
90556 +};
90557 +
90558 +struct size_overflow_hash _001807_hash = {
90559 + .next = &_000425_hash,
90560 + .name = "iwl_dbgfs_rx_statistics_read",
90561 + .param = PARAM3,
90562 +};
90563 +
90564 +struct size_overflow_hash _001808_hash = {
90565 + .next = NULL,
90566 + .name = "iwl_dbgfs_sensitivity_read",
90567 + .param = PARAM3,
90568 +};
90569 +
90570 +struct size_overflow_hash _001809_hash = {
90571 + .next = NULL,
90572 + .name = "iwl_dbgfs_sleep_level_override_read",
90573 + .param = PARAM3,
90574 +};
90575 +
90576 +struct size_overflow_hash _001810_hash = {
90577 + .next = NULL,
90578 + .name = "iwl_dbgfs_sram_read",
90579 + .param = PARAM3,
90580 +};
90581 +
90582 +struct size_overflow_hash _001811_hash = {
90583 + .next = NULL,
90584 + .name = "iwl_dbgfs_stations_read",
90585 + .param = PARAM3,
90586 +};
90587 +
90588 +struct size_overflow_hash _001812_hash = {
90589 + .next = NULL,
90590 + .name = "iwl_dbgfs_status_read",
90591 + .param = PARAM3,
90592 +};
90593 +
90594 +struct size_overflow_hash _001813_hash = {
90595 + .next = NULL,
90596 + .name = "iwl_dbgfs_temperature_read",
90597 + .param = PARAM3,
90598 +};
90599 +
90600 +struct size_overflow_hash _001814_hash = {
90601 + .next = NULL,
90602 + .name = "iwl_dbgfs_thermal_throttling_read",
90603 + .param = PARAM3,
90604 +};
90605 +
90606 +struct size_overflow_hash _001815_hash = {
90607 + .next = NULL,
90608 + .name = "iwl_dbgfs_traffic_log_read",
90609 + .param = PARAM3,
90610 +};
90611 +
90612 +struct size_overflow_hash _001816_hash = {
90613 + .next = NULL,
90614 + .name = "iwl_dbgfs_tx_queue_read",
90615 + .param = PARAM3,
90616 +};
90617 +
90618 +struct size_overflow_hash _001817_hash = {
90619 + .next = NULL,
90620 + .name = "iwl_dbgfs_tx_statistics_read",
90621 + .param = PARAM3,
90622 +};
90623 +
90624 +struct size_overflow_hash _001818_hash = {
90625 + .next = NULL,
90626 + .name = "iwl_dbgfs_ucode_bt_stats_read",
90627 + .param = PARAM3,
90628 +};
90629 +
90630 +struct size_overflow_hash _001819_hash = {
90631 + .next = NULL,
90632 + .name = "iwl_dbgfs_ucode_general_stats_read",
90633 + .param = PARAM3,
90634 +};
90635 +
90636 +struct size_overflow_hash _001820_hash = {
90637 + .next = NULL,
90638 + .name = "iwl_dbgfs_ucode_rx_stats_read",
90639 + .param = PARAM3,
90640 +};
90641 +
90642 +struct size_overflow_hash _001821_hash = {
90643 + .next = &_000349_hash,
90644 + .name = "iwl_dbgfs_ucode_tracing_read",
90645 + .param = PARAM3,
90646 +};
90647 +
90648 +struct size_overflow_hash _001822_hash = {
90649 + .next = NULL,
90650 + .name = "iwl_dbgfs_ucode_tx_stats_read",
90651 + .param = PARAM3,
90652 +};
90653 +
90654 +struct size_overflow_hash _001823_hash = {
90655 + .next = NULL,
90656 + .name = "iwl_dbgfs_wowlan_sram_read",
90657 + .param = PARAM3,
90658 +};
90659 +
90660 +struct size_overflow_hash _001824_hash = {
90661 + .next = &_001314_hash,
90662 + .name = "iwm_if_alloc",
90663 + .param = PARAM1,
90664 +};
90665 +
90666 +struct size_overflow_hash _001825_hash = {
90667 + .next = NULL,
90668 + .name = "kernel_readv",
90669 + .param = PARAM3,
90670 +};
90671 +
90672 +struct size_overflow_hash _001826_hash = {
90673 + .next = NULL,
90674 + .name = "key_algorithm_read",
90675 + .param = PARAM3,
90676 +};
90677 +
90678 +struct size_overflow_hash _001827_hash = {
90679 + .next = NULL,
90680 + .name = "key_icverrors_read",
90681 + .param = PARAM3,
90682 +};
90683 +
90684 +struct size_overflow_hash _001828_hash = {
90685 + .next = NULL,
90686 + .name = "key_key_read",
90687 + .param = PARAM3,
90688 +};
90689 +
90690 +struct size_overflow_hash _001829_hash = {
90691 + .next = NULL,
90692 + .name = "key_replays_read",
90693 + .param = PARAM3,
90694 +};
90695 +
90696 +struct size_overflow_hash _001830_hash = {
90697 + .next = NULL,
90698 + .name = "key_rx_spec_read",
90699 + .param = PARAM3,
90700 +};
90701 +
90702 +struct size_overflow_hash _001831_hash = {
90703 + .next = NULL,
90704 + .name = "key_tx_spec_read",
90705 + .param = PARAM3,
90706 +};
90707 +
90708 +struct size_overflow_hash _001832_hash = {
90709 + .next = NULL,
90710 + .name = "__kfifo_to_user",
90711 + .param = PARAM3,
90712 +};
90713 +
90714 +struct size_overflow_hash _001833_hash = {
90715 + .next = NULL,
90716 + .name = "__kfifo_to_user_r",
90717 + .param = PARAM3,
90718 +};
90719 +
90720 +struct size_overflow_hash _001834_hash = {
90721 + .next = NULL,
90722 + .name = "kmem_zalloc_greedy",
90723 + .param = PARAM2|PARAM3,
90724 +};
90725 +
90726 +struct size_overflow_hash _001836_hash = {
90727 + .next = NULL,
90728 + .name = "l2cap_chan_send",
90729 + .param = PARAM3,
90730 +};
90731 +
90732 +struct size_overflow_hash _001837_hash = {
90733 + .next = NULL,
90734 + .name = "l2cap_sar_segment_sdu",
90735 + .param = PARAM3,
90736 +};
90737 +
90738 +struct size_overflow_hash _001838_hash = {
90739 + .next = NULL,
90740 + .name = "lbs_debugfs_read",
90741 + .param = PARAM3,
90742 +};
90743 +
90744 +struct size_overflow_hash _001839_hash = {
90745 + .next = NULL,
90746 + .name = "lbs_dev_info",
90747 + .param = PARAM3,
90748 +};
90749 +
90750 +struct size_overflow_hash _001840_hash = {
90751 + .next = NULL,
90752 + .name = "lbs_host_sleep_read",
90753 + .param = PARAM3,
90754 +};
90755 +
90756 +struct size_overflow_hash _001841_hash = {
90757 + .next = NULL,
90758 + .name = "lbs_rdbbp_read",
90759 + .param = PARAM3,
90760 +};
90761 +
90762 +struct size_overflow_hash _001842_hash = {
90763 + .next = NULL,
90764 + .name = "lbs_rdmac_read",
90765 + .param = PARAM3,
90766 +};
90767 +
90768 +struct size_overflow_hash _001843_hash = {
90769 + .next = NULL,
90770 + .name = "lbs_rdrf_read",
90771 + .param = PARAM3,
90772 +};
90773 +
90774 +struct size_overflow_hash _001844_hash = {
90775 + .next = NULL,
90776 + .name = "lbs_sleepparams_read",
90777 + .param = PARAM3,
90778 +};
90779 +
90780 +struct size_overflow_hash _001845_hash = {
90781 + .next = NULL,
90782 + .name = "lbs_threshold_read",
90783 + .param = PARAM5,
90784 +};
90785 +
90786 +struct size_overflow_hash _001846_hash = {
90787 + .next = NULL,
90788 + .name = "libfc_vport_create",
90789 + .param = PARAM2,
90790 +};
90791 +
90792 +struct size_overflow_hash _001847_hash = {
90793 + .next = NULL,
90794 + .name = "lkdtm_debugfs_read",
90795 + .param = PARAM3,
90796 +};
90797 +
90798 +struct size_overflow_hash _001848_hash = {
90799 + .next = NULL,
90800 + .name = "llcp_sock_recvmsg",
90801 + .param = PARAM4,
90802 +};
90803 +
90804 +struct size_overflow_hash _001849_hash = {
90805 + .next = NULL,
90806 + .name = "long_retry_limit_read",
90807 + .param = PARAM3,
90808 +};
90809 +
90810 +struct size_overflow_hash _001850_hash = {
90811 + .next = NULL,
90812 + .name = "lpfc_debugfs_dif_err_read",
90813 + .param = PARAM3,
90814 +};
90815 +
90816 +struct size_overflow_hash _001851_hash = {
90817 + .next = NULL,
90818 + .name = "lpfc_debugfs_read",
90819 + .param = PARAM3,
90820 +};
90821 +
90822 +struct size_overflow_hash _001852_hash = {
90823 + .next = NULL,
90824 + .name = "lpfc_idiag_baracc_read",
90825 + .param = PARAM3,
90826 +};
90827 +
90828 +struct size_overflow_hash _001853_hash = {
90829 + .next = NULL,
90830 + .name = "lpfc_idiag_ctlacc_read",
90831 + .param = PARAM3,
90832 +};
90833 +
90834 +struct size_overflow_hash _001854_hash = {
90835 + .next = NULL,
90836 + .name = "lpfc_idiag_drbacc_read",
90837 + .param = PARAM3,
90838 +};
90839 +
90840 +struct size_overflow_hash _001855_hash = {
90841 + .next = NULL,
90842 + .name = "lpfc_idiag_extacc_read",
90843 + .param = PARAM3,
90844 +};
90845 +
90846 +struct size_overflow_hash _001856_hash = {
90847 + .next = NULL,
90848 + .name = "lpfc_idiag_mbxacc_read",
90849 + .param = PARAM3,
90850 +};
90851 +
90852 +struct size_overflow_hash _001857_hash = {
90853 + .next = NULL,
90854 + .name = "lpfc_idiag_pcicfg_read",
90855 + .param = PARAM3,
90856 +};
90857 +
90858 +struct size_overflow_hash _001858_hash = {
90859 + .next = NULL,
90860 + .name = "lpfc_idiag_queacc_read",
90861 + .param = PARAM3,
90862 +};
90863 +
90864 +struct size_overflow_hash _001859_hash = {
90865 + .next = NULL,
90866 + .name = "lpfc_idiag_queinfo_read",
90867 + .param = PARAM3,
90868 +};
90869 +
90870 +struct size_overflow_hash _001860_hash = {
90871 + .next = NULL,
90872 + .name = "mac80211_format_buffer",
90873 + .param = PARAM2,
90874 +};
90875 +
90876 +struct size_overflow_hash _001861_hash = {
90877 + .next = NULL,
90878 + .name = "macvtap_put_user",
90879 + .param = PARAM4,
90880 +};
90881 +
90882 +struct size_overflow_hash _001862_hash = {
90883 + .next = NULL,
90884 + .name = "macvtap_sendmsg",
90885 + .param = PARAM4,
90886 +};
90887 +
90888 +struct size_overflow_hash _001863_hash = {
90889 + .next = NULL,
90890 + .name = "mic_calc_failure_read",
90891 + .param = PARAM3,
90892 +};
90893 +
90894 +struct size_overflow_hash _001864_hash = {
90895 + .next = NULL,
90896 + .name = "mic_rx_pkts_read",
90897 + .param = PARAM3,
90898 +};
90899 +
90900 +struct size_overflow_hash _001865_hash = {
90901 + .next = NULL,
90902 + .name = "minstrel_stats_read",
90903 + .param = PARAM3,
90904 +};
90905 +
90906 +struct size_overflow_hash _001866_hash = {
90907 + .next = NULL,
90908 + .name = "mmc_ext_csd_read",
90909 + .param = PARAM3,
90910 +};
90911 +
90912 +struct size_overflow_hash _001867_hash = {
90913 + .next = NULL,
90914 + .name = "mon_bin_read",
90915 + .param = PARAM3,
90916 +};
90917 +
90918 +struct size_overflow_hash _001868_hash = {
90919 + .next = NULL,
90920 + .name = "mon_stat_read",
90921 + .param = PARAM3,
90922 +};
90923 +
90924 +struct size_overflow_hash _001870_hash = {
90925 + .next = NULL,
90926 + .name = "mqueue_read_file",
90927 + .param = PARAM3,
90928 +};
90929 +
90930 +struct size_overflow_hash _001871_hash = {
90931 + .next = NULL,
90932 + .name = "mwifiex_debug_read",
90933 + .param = PARAM3,
90934 +};
90935 +
90936 +struct size_overflow_hash _001872_hash = {
90937 + .next = NULL,
90938 + .name = "mwifiex_getlog_read",
90939 + .param = PARAM3,
90940 +};
90941 +
90942 +struct size_overflow_hash _001873_hash = {
90943 + .next = NULL,
90944 + .name = "mwifiex_info_read",
90945 + .param = PARAM3,
90946 +};
90947 +
90948 +struct size_overflow_hash _001874_hash = {
90949 + .next = NULL,
90950 + .name = "mwifiex_rdeeprom_read",
90951 + .param = PARAM3,
90952 +};
90953 +
90954 +struct size_overflow_hash _001875_hash = {
90955 + .next = NULL,
90956 + .name = "mwifiex_regrdwr_read",
90957 + .param = PARAM3,
90958 +};
90959 +
90960 +struct size_overflow_hash _001876_hash = {
90961 + .next = NULL,
90962 + .name = "nfsd_vfs_read",
90963 + .param = PARAM6,
90964 +};
90965 +
90966 +struct size_overflow_hash _001877_hash = {
90967 + .next = NULL,
90968 + .name = "nfsd_vfs_write",
90969 + .param = PARAM6,
90970 +};
90971 +
90972 +struct size_overflow_hash _001878_hash = {
90973 + .next = NULL,
90974 + .name = "nfs_idmap_lookup_id",
90975 + .param = PARAM2,
90976 +};
90977 +
90978 +struct size_overflow_hash _001879_hash = {
90979 + .next = NULL,
90980 + .name = "o2hb_debug_read",
90981 + .param = PARAM3,
90982 +};
90983 +
90984 +struct size_overflow_hash _001880_hash = {
90985 + .next = NULL,
90986 + .name = "o2net_debug_read",
90987 + .param = PARAM3,
90988 +};
90989 +
90990 +struct size_overflow_hash _001881_hash = {
90991 + .next = NULL,
90992 + .name = "ocfs2_control_read",
90993 + .param = PARAM3,
90994 +};
90995 +
90996 +struct size_overflow_hash _001882_hash = {
90997 + .next = NULL,
90998 + .name = "ocfs2_debug_read",
90999 + .param = PARAM3,
91000 +};
91001 +
91002 +struct size_overflow_hash _001883_hash = {
91003 + .next = NULL,
91004 + .name = "ocfs2_readlink",
91005 + .param = PARAM3,
91006 +};
91007 +
91008 +struct size_overflow_hash _001884_hash = {
91009 + .next = NULL,
91010 + .name = "oom_adjust_read",
91011 + .param = PARAM3,
91012 +};
91013 +
91014 +struct size_overflow_hash _001885_hash = {
91015 + .next = NULL,
91016 + .name = "oom_score_adj_read",
91017 + .param = PARAM3,
91018 +};
91019 +
91020 +struct size_overflow_hash _001886_hash = {
91021 + .next = NULL,
91022 + .name = "oprofilefs_str_to_user",
91023 + .param = PARAM3,
91024 +};
91025 +
91026 +struct size_overflow_hash _001887_hash = {
91027 + .next = NULL,
91028 + .name = "oprofilefs_ulong_to_user",
91029 + .param = PARAM3,
91030 +};
91031 +
91032 +struct size_overflow_hash _001888_hash = {
91033 + .next = NULL,
91034 + .name = "_osd_req_list_objects",
91035 + .param = PARAM6,
91036 +};
91037 +
91038 +struct size_overflow_hash _001889_hash = {
91039 + .next = NULL,
91040 + .name = "osd_req_read_kern",
91041 + .param = PARAM5,
91042 +};
91043 +
91044 +struct size_overflow_hash _001890_hash = {
91045 + .next = NULL,
91046 + .name = "osd_req_write_kern",
91047 + .param = PARAM5,
91048 +};
91049 +
91050 +struct size_overflow_hash _001891_hash = {
91051 + .next = NULL,
91052 + .name = "p54_init_common",
91053 + .param = PARAM1,
91054 +};
91055 +
91056 +struct size_overflow_hash _001892_hash = {
91057 + .next = NULL,
91058 + .name = "packet_sendmsg",
91059 + .param = PARAM4,
91060 +};
91061 +
91062 +struct size_overflow_hash _001893_hash = {
91063 + .next = NULL,
91064 + .name = "page_readlink",
91065 + .param = PARAM3,
91066 +};
91067 +
91068 +struct size_overflow_hash _001894_hash = {
91069 + .next = NULL,
91070 + .name = "pcf50633_write_block",
91071 + .param = PARAM3,
91072 +};
91073 +
91074 +struct size_overflow_hash _001895_hash = {
91075 + .next = NULL,
91076 + .name = "platform_list_read_file",
91077 + .param = PARAM3,
91078 +};
91079 +
91080 +struct size_overflow_hash _001896_hash = {
91081 + .next = NULL,
91082 + .name = "pm860x_bulk_write",
91083 + .param = PARAM3,
91084 +};
91085 +
91086 +struct size_overflow_hash _001897_hash = {
91087 + .next = NULL,
91088 + .name = "pm_qos_power_read",
91089 + .param = PARAM3,
91090 +};
91091 +
91092 +struct size_overflow_hash _001898_hash = {
91093 + .next = NULL,
91094 + .name = "pms_read",
91095 + .param = PARAM3,
91096 +};
91097 +
91098 +struct size_overflow_hash _001899_hash = {
91099 + .next = NULL,
91100 + .name = "port_show_regs",
91101 + .param = PARAM3,
91102 +};
91103 +
91104 +struct size_overflow_hash _001900_hash = {
91105 + .next = NULL,
91106 + .name = "proc_coredump_filter_read",
91107 + .param = PARAM3,
91108 +};
91109 +
91110 +struct size_overflow_hash _001901_hash = {
91111 + .next = NULL,
91112 + .name = "proc_fdinfo_read",
91113 + .param = PARAM3,
91114 +};
91115 +
91116 +struct size_overflow_hash _001902_hash = {
91117 + .next = NULL,
91118 + .name = "proc_info_read",
91119 + .param = PARAM3,
91120 +};
91121 +
91122 +struct size_overflow_hash _001903_hash = {
91123 + .next = NULL,
91124 + .name = "proc_loginuid_read",
91125 + .param = PARAM3,
91126 +};
91127 +
91128 +struct size_overflow_hash _001904_hash = {
91129 + .next = NULL,
91130 + .name = "proc_pid_attr_read",
91131 + .param = PARAM3,
91132 +};
91133 +
91134 +struct size_overflow_hash _001905_hash = {
91135 + .next = NULL,
91136 + .name = "proc_pid_readlink",
91137 + .param = PARAM3,
91138 +};
91139 +
91140 +struct size_overflow_hash _001906_hash = {
91141 + .next = NULL,
91142 + .name = "proc_read",
91143 + .param = PARAM3,
91144 +};
91145 +
91146 +struct size_overflow_hash _001907_hash = {
91147 + .next = NULL,
91148 + .name = "proc_self_readlink",
91149 + .param = PARAM3,
91150 +};
91151 +
91152 +struct size_overflow_hash _001908_hash = {
91153 + .next = NULL,
91154 + .name = "proc_sessionid_read",
91155 + .param = PARAM3,
91156 +};
91157 +
91158 +struct size_overflow_hash _001909_hash = {
91159 + .next = NULL,
91160 + .name = "provide_user_output",
91161 + .param = PARAM3,
91162 +};
91163 +
91164 +struct size_overflow_hash _001910_hash = {
91165 + .next = NULL,
91166 + .name = "ps_pspoll_max_apturn_read",
91167 + .param = PARAM3,
91168 +};
91169 +
91170 +struct size_overflow_hash _001911_hash = {
91171 + .next = NULL,
91172 + .name = "ps_pspoll_timeouts_read",
91173 + .param = PARAM3,
91174 +};
91175 +
91176 +struct size_overflow_hash _001912_hash = {
91177 + .next = NULL,
91178 + .name = "ps_pspoll_utilization_read",
91179 + .param = PARAM3,
91180 +};
91181 +
91182 +struct size_overflow_hash _001913_hash = {
91183 + .next = NULL,
91184 + .name = "pstore_file_read",
91185 + .param = PARAM3,
91186 +};
91187 +
91188 +struct size_overflow_hash _001914_hash = {
91189 + .next = NULL,
91190 + .name = "ps_upsd_max_apturn_read",
91191 + .param = PARAM3,
91192 +};
91193 +
91194 +struct size_overflow_hash _001915_hash = {
91195 + .next = NULL,
91196 + .name = "ps_upsd_max_sptime_read",
91197 + .param = PARAM3,
91198 +};
91199 +
91200 +struct size_overflow_hash _001916_hash = {
91201 + .next = NULL,
91202 + .name = "ps_upsd_timeouts_read",
91203 + .param = PARAM3,
91204 +};
91205 +
91206 +struct size_overflow_hash _001917_hash = {
91207 + .next = NULL,
91208 + .name = "ps_upsd_utilization_read",
91209 + .param = PARAM3,
91210 +};
91211 +
91212 +struct size_overflow_hash _001918_hash = {
91213 + .next = NULL,
91214 + .name = "pvr2_v4l2_read",
91215 + .param = PARAM3,
91216 +};
91217 +
91218 +struct size_overflow_hash _001919_hash = {
91219 + .next = NULL,
91220 + .name = "pwr_disable_ps_read",
91221 + .param = PARAM3,
91222 +};
91223 +
91224 +struct size_overflow_hash _001920_hash = {
91225 + .next = NULL,
91226 + .name = "pwr_elp_enter_read",
91227 + .param = PARAM3,
91228 +};
91229 +
91230 +struct size_overflow_hash _001921_hash = {
91231 + .next = NULL,
91232 + .name = "pwr_enable_ps_read",
91233 + .param = PARAM3,
91234 +};
91235 +
91236 +struct size_overflow_hash _001922_hash = {
91237 + .next = NULL,
91238 + .name = "pwr_fix_tsf_ps_read",
91239 + .param = PARAM3,
91240 +};
91241 +
91242 +struct size_overflow_hash _001923_hash = {
91243 + .next = NULL,
91244 + .name = "pwr_missing_bcns_read",
91245 + .param = PARAM3,
91246 +};
91247 +
91248 +struct size_overflow_hash _001924_hash = {
91249 + .next = NULL,
91250 + .name = "pwr_power_save_off_read",
91251 + .param = PARAM3,
91252 +};
91253 +
91254 +struct size_overflow_hash _001925_hash = {
91255 + .next = &_000501_hash,
91256 + .name = "pwr_ps_enter_read",
91257 + .param = PARAM3,
91258 +};
91259 +
91260 +struct size_overflow_hash _001926_hash = {
91261 + .next = NULL,
91262 + .name = "pwr_rcvd_awake_beacons_read",
91263 + .param = PARAM3,
91264 +};
91265 +
91266 +struct size_overflow_hash _001927_hash = {
91267 + .next = NULL,
91268 + .name = "pwr_rcvd_beacons_read",
91269 + .param = PARAM3,
91270 +};
91271 +
91272 +struct size_overflow_hash _001928_hash = {
91273 + .next = NULL,
91274 + .name = "pwr_tx_without_ps_read",
91275 + .param = PARAM3,
91276 +};
91277 +
91278 +struct size_overflow_hash _001929_hash = {
91279 + .next = NULL,
91280 + .name = "pwr_tx_with_ps_read",
91281 + .param = PARAM3,
91282 +};
91283 +
91284 +struct size_overflow_hash _001930_hash = {
91285 + .next = NULL,
91286 + .name = "pwr_wake_on_host_read",
91287 + .param = PARAM3,
91288 +};
91289 +
91290 +struct size_overflow_hash _001931_hash = {
91291 + .next = NULL,
91292 + .name = "pwr_wake_on_timer_exp_read",
91293 + .param = PARAM3,
91294 +};
91295 +
91296 +struct size_overflow_hash _001932_hash = {
91297 + .next = NULL,
91298 + .name = "queues_read",
91299 + .param = PARAM3,
91300 +};
91301 +
91302 +struct size_overflow_hash _001933_hash = {
91303 + .next = NULL,
91304 + .name = "raw_recvmsg",
91305 + .param = PARAM4,
91306 +};
91307 +
91308 +struct size_overflow_hash _001934_hash = {
91309 + .next = NULL,
91310 + .name = "rcname_read",
91311 + .param = PARAM3,
91312 +};
91313 +
91314 +struct size_overflow_hash _001935_hash = {
91315 + .next = NULL,
91316 + .name = "read_4k_modal_eeprom",
91317 + .param = PARAM3,
91318 +};
91319 +
91320 +struct size_overflow_hash _001936_hash = {
91321 + .next = NULL,
91322 + .name = "read_9287_modal_eeprom",
91323 + .param = PARAM3,
91324 +};
91325 +
91326 +struct size_overflow_hash _001937_hash = {
91327 + .next = NULL,
91328 + .name = "reada_find_extent",
91329 + .param = PARAM2,
91330 +};
91331 +
91332 +struct size_overflow_hash _001938_hash = {
91333 + .next = NULL,
91334 + .name = "read_def_modal_eeprom",
91335 + .param = PARAM3,
91336 +};
91337 +
91338 +struct size_overflow_hash _001939_hash = {
91339 + .next = NULL,
91340 + .name = "read_enabled_file_bool",
91341 + .param = PARAM3,
91342 +};
91343 +
91344 +struct size_overflow_hash _001940_hash = {
91345 + .next = NULL,
91346 + .name = "read_file_ani",
91347 + .param = PARAM3,
91348 +};
91349 +
91350 +struct size_overflow_hash _001941_hash = {
91351 + .next = NULL,
91352 + .name = "read_file_antenna",
91353 + .param = PARAM3,
91354 +};
91355 +
91356 +struct size_overflow_hash _001942_hash = {
91357 + .next = NULL,
91358 + .name = "read_file_base_eeprom",
91359 + .param = PARAM3,
91360 +};
91361 +
91362 +struct size_overflow_hash _001943_hash = {
91363 + .next = NULL,
91364 + .name = "read_file_beacon",
91365 + .param = PARAM3,
91366 +};
91367 +
91368 +struct size_overflow_hash _001944_hash = {
91369 + .next = NULL,
91370 + .name = "read_file_blob",
91371 + .param = PARAM3,
91372 +};
91373 +
91374 +struct size_overflow_hash _001945_hash = {
91375 + .next = NULL,
91376 + .name = "read_file_bool",
91377 + .param = PARAM3,
91378 +};
91379 +
91380 +struct size_overflow_hash _001946_hash = {
91381 + .next = NULL,
91382 + .name = "read_file_credit_dist_stats",
91383 + .param = PARAM3,
91384 +};
91385 +
91386 +struct size_overflow_hash _001947_hash = {
91387 + .next = NULL,
91388 + .name = "read_file_debug",
91389 + .param = PARAM3,
91390 +};
91391 +
91392 +struct size_overflow_hash _001948_hash = {
91393 + .next = NULL,
91394 + .name = "read_file_disable_ani",
91395 + .param = PARAM3,
91396 +};
91397 +
91398 +struct size_overflow_hash _001949_hash = {
91399 + .next = NULL,
91400 + .name = "read_file_dma",
91401 + .param = PARAM3,
91402 +};
91403 +
91404 +struct size_overflow_hash _001950_hash = {
91405 + .next = NULL,
91406 + .name = "read_file_dump_nfcal",
91407 + .param = PARAM3,
91408 +};
91409 +
91410 +struct size_overflow_hash _001951_hash = {
91411 + .next = NULL,
91412 + .name = "read_file_frameerrors",
91413 + .param = PARAM3,
91414 +};
91415 +
91416 +struct size_overflow_hash _001952_hash = {
91417 + .next = NULL,
91418 + .name = "read_file_interrupt",
91419 + .param = PARAM3,
91420 +};
91421 +
91422 +struct size_overflow_hash _001953_hash = {
91423 + .next = NULL,
91424 + .name = "read_file_misc",
91425 + .param = PARAM3,
91426 +};
91427 +
91428 +struct size_overflow_hash _001954_hash = {
91429 + .next = NULL,
91430 + .name = "read_file_modal_eeprom",
91431 + .param = PARAM3,
91432 +};
91433 +
91434 +struct size_overflow_hash _001955_hash = {
91435 + .next = NULL,
91436 + .name = "read_file_queue",
91437 + .param = PARAM3,
91438 +};
91439 +
91440 +struct size_overflow_hash _001956_hash = {
91441 + .next = NULL,
91442 + .name = "read_file_rcstat",
91443 + .param = PARAM3,
91444 +};
91445 +
91446 +struct size_overflow_hash _001957_hash = {
91447 + .next = NULL,
91448 + .name = "read_file_recv",
91449 + .param = PARAM3,
91450 +};
91451 +
91452 +struct size_overflow_hash _001958_hash = {
91453 + .next = NULL,
91454 + .name = "read_file_regidx",
91455 + .param = PARAM3,
91456 +};
91457 +
91458 +struct size_overflow_hash _001959_hash = {
91459 + .next = &_001952_hash,
91460 + .name = "read_file_regval",
91461 + .param = PARAM3,
91462 +};
91463 +
91464 +struct size_overflow_hash _001960_hash = {
91465 + .next = NULL,
91466 + .name = "read_file_reset",
91467 + .param = PARAM3,
91468 +};
91469 +
91470 +struct size_overflow_hash _001961_hash = {
91471 + .next = NULL,
91472 + .name = "read_file_rx_chainmask",
91473 + .param = PARAM3,
91474 +};
91475 +
91476 +struct size_overflow_hash _001962_hash = {
91477 + .next = NULL,
91478 + .name = "read_file_slot",
91479 + .param = PARAM3,
91480 +};
91481 +
91482 +struct size_overflow_hash _001963_hash = {
91483 + .next = NULL,
91484 + .name = "read_file_stations",
91485 + .param = PARAM3,
91486 +};
91487 +
91488 +struct size_overflow_hash _001964_hash = {
91489 + .next = NULL,
91490 + .name = "read_file_tgt_int_stats",
91491 + .param = PARAM3,
91492 +};
91493 +
91494 +struct size_overflow_hash _001965_hash = {
91495 + .next = NULL,
91496 + .name = "read_file_tgt_rx_stats",
91497 + .param = PARAM3,
91498 +};
91499 +
91500 +struct size_overflow_hash _001966_hash = {
91501 + .next = NULL,
91502 + .name = "read_file_tgt_stats",
91503 + .param = PARAM3,
91504 +};
91505 +
91506 +struct size_overflow_hash _001967_hash = {
91507 + .next = NULL,
91508 + .name = "read_file_tgt_tx_stats",
91509 + .param = PARAM3,
91510 +};
91511 +
91512 +struct size_overflow_hash _001968_hash = {
91513 + .next = NULL,
91514 + .name = "read_file_tx_chainmask",
91515 + .param = PARAM3,
91516 +};
91517 +
91518 +struct size_overflow_hash _001969_hash = {
91519 + .next = NULL,
91520 + .name = "read_file_war_stats",
91521 + .param = PARAM3,
91522 +};
91523 +
91524 +struct size_overflow_hash _001970_hash = {
91525 + .next = NULL,
91526 + .name = "read_file_xmit",
91527 + .param = PARAM3,
91528 +};
91529 +
91530 +struct size_overflow_hash _001971_hash = {
91531 + .next = NULL,
91532 + .name = "read_from_oldmem",
91533 + .param = PARAM2,
91534 +};
91535 +
91536 +struct size_overflow_hash _001972_hash = {
91537 + .next = NULL,
91538 + .name = "read_oldmem",
91539 + .param = PARAM3,
91540 +};
91541 +
91542 +struct size_overflow_hash _001973_hash = {
91543 + .next = NULL,
91544 + .name = "regmap_name_read_file",
91545 + .param = PARAM3,
91546 +};
91547 +
91548 +struct size_overflow_hash _001974_hash = {
91549 + .next = NULL,
91550 + .name = "repair_io_failure",
91551 + .param = PARAM4,
91552 +};
91553 +
91554 +struct size_overflow_hash _001975_hash = {
91555 + .next = NULL,
91556 + .name = "request_key_and_link",
91557 + .param = PARAM4,
91558 +};
91559 +
91560 +struct size_overflow_hash _001976_hash = {
91561 + .next = NULL,
91562 + .name = "res_counter_read",
91563 + .param = PARAM4,
91564 +};
91565 +
91566 +struct size_overflow_hash _001977_hash = {
91567 + .next = NULL,
91568 + .name = "retry_count_read",
91569 + .param = PARAM3,
91570 +};
91571 +
91572 +struct size_overflow_hash _001978_hash = {
91573 + .next = NULL,
91574 + .name = "rs_sta_dbgfs_rate_scale_data_read",
91575 + .param = PARAM3,
91576 +};
91577 +
91578 +struct size_overflow_hash _001979_hash = {
91579 + .next = NULL,
91580 + .name = "rs_sta_dbgfs_scale_table_read",
91581 + .param = PARAM3,
91582 +};
91583 +
91584 +struct size_overflow_hash _001980_hash = {
91585 + .next = NULL,
91586 + .name = "rs_sta_dbgfs_stats_table_read",
91587 + .param = PARAM3,
91588 +};
91589 +
91590 +struct size_overflow_hash _001981_hash = {
91591 + .next = NULL,
91592 + .name = "rts_threshold_read",
91593 + .param = PARAM3,
91594 +};
91595 +
91596 +struct size_overflow_hash _001982_hash = {
91597 + .next = NULL,
91598 + .name = "rx_dropped_read",
91599 + .param = PARAM3,
91600 +};
91601 +
91602 +struct size_overflow_hash _001983_hash = {
91603 + .next = NULL,
91604 + .name = "rx_fcs_err_read",
91605 + .param = PARAM3,
91606 +};
91607 +
91608 +struct size_overflow_hash _001984_hash = {
91609 + .next = NULL,
91610 + .name = "rx_hdr_overflow_read",
91611 + .param = PARAM3,
91612 +};
91613 +
91614 +struct size_overflow_hash _001985_hash = {
91615 + .next = NULL,
91616 + .name = "rx_hw_stuck_read",
91617 + .param = PARAM3,
91618 +};
91619 +
91620 +struct size_overflow_hash _001986_hash = {
91621 + .next = NULL,
91622 + .name = "rx_out_of_mem_read",
91623 + .param = PARAM3,
91624 +};
91625 +
91626 +struct size_overflow_hash _001987_hash = {
91627 + .next = NULL,
91628 + .name = "rx_path_reset_read",
91629 + .param = PARAM3,
91630 +};
91631 +
91632 +struct size_overflow_hash _001988_hash = {
91633 + .next = NULL,
91634 + .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read",
91635 + .param = PARAM3,
91636 +};
91637 +
91638 +struct size_overflow_hash _001989_hash = {
91639 + .next = NULL,
91640 + .name = "rxpipe_descr_host_int_trig_rx_data_read",
91641 + .param = PARAM3,
91642 +};
91643 +
91644 +struct size_overflow_hash _001990_hash = {
91645 + .next = NULL,
91646 + .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read",
91647 + .param = PARAM3,
91648 +};
91649 +
91650 +struct size_overflow_hash _001991_hash = {
91651 + .next = NULL,
91652 + .name = "rxpipe_rx_prep_beacon_drop_read",
91653 + .param = PARAM3,
91654 +};
91655 +
91656 +struct size_overflow_hash _001992_hash = {
91657 + .next = NULL,
91658 + .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read",
91659 + .param = PARAM3,
91660 +};
91661 +
91662 +struct size_overflow_hash _001993_hash = {
91663 + .next = NULL,
91664 + .name = "rx_reset_counter_read",
91665 + .param = PARAM3,
91666 +};
91667 +
91668 +struct size_overflow_hash _001994_hash = {
91669 + .next = NULL,
91670 + .name = "rx_xfr_hint_trig_read",
91671 + .param = PARAM3,
91672 +};
91673 +
91674 +struct size_overflow_hash _001995_hash = {
91675 + .next = NULL,
91676 + .name = "s5m_bulk_write",
91677 + .param = PARAM3,
91678 +};
91679 +
91680 +struct size_overflow_hash _001996_hash = {
91681 + .next = NULL,
91682 + .name = "scrub_setup_recheck_block",
91683 + .param = PARAM3|PARAM4,
91684 +};
91685 +
91686 +struct size_overflow_hash _001998_hash = {
91687 + .next = NULL,
91688 + .name = "scsi_adjust_queue_depth",
91689 + .param = PARAM3,
91690 +};
91691 +
91692 +struct size_overflow_hash _001999_hash = {
91693 + .next = NULL,
91694 + .name = "selinux_inode_notifysecctx",
91695 + .param = PARAM3,
91696 +};
91697 +
91698 +struct size_overflow_hash _002000_hash = {
91699 + .next = NULL,
91700 + .name = "sel_read_avc_cache_threshold",
91701 + .param = PARAM3,
91702 +};
91703 +
91704 +struct size_overflow_hash _002001_hash = {
91705 + .next = NULL,
91706 + .name = "sel_read_avc_hash_stats",
91707 + .param = PARAM3,
91708 +};
91709 +
91710 +struct size_overflow_hash _002002_hash = {
91711 + .next = NULL,
91712 + .name = "sel_read_bool",
91713 + .param = PARAM3,
91714 +};
91715 +
91716 +struct size_overflow_hash _002003_hash = {
91717 + .next = NULL,
91718 + .name = "sel_read_checkreqprot",
91719 + .param = PARAM3,
91720 +};
91721 +
91722 +struct size_overflow_hash _002004_hash = {
91723 + .next = NULL,
91724 + .name = "sel_read_class",
91725 + .param = PARAM3,
91726 +};
91727 +
91728 +struct size_overflow_hash _002005_hash = {
91729 + .next = NULL,
91730 + .name = "sel_read_enforce",
91731 + .param = PARAM3,
91732 +};
91733 +
91734 +struct size_overflow_hash _002006_hash = {
91735 + .next = NULL,
91736 + .name = "sel_read_handle_status",
91737 + .param = PARAM3,
91738 +};
91739 +
91740 +struct size_overflow_hash _002007_hash = {
91741 + .next = NULL,
91742 + .name = "sel_read_handle_unknown",
91743 + .param = PARAM3,
91744 +};
91745 +
91746 +struct size_overflow_hash _002008_hash = {
91747 + .next = NULL,
91748 + .name = "sel_read_initcon",
91749 + .param = PARAM3,
91750 +};
91751 +
91752 +struct size_overflow_hash _002009_hash = {
91753 + .next = NULL,
91754 + .name = "sel_read_mls",
91755 + .param = PARAM3,
91756 +};
91757 +
91758 +struct size_overflow_hash _002010_hash = {
91759 + .next = NULL,
91760 + .name = "sel_read_perm",
91761 + .param = PARAM3,
91762 +};
91763 +
91764 +struct size_overflow_hash _002011_hash = {
91765 + .next = NULL,
91766 + .name = "sel_read_policy",
91767 + .param = PARAM3,
91768 +};
91769 +
91770 +struct size_overflow_hash _002012_hash = {
91771 + .next = NULL,
91772 + .name = "sel_read_policycap",
91773 + .param = PARAM3,
91774 +};
91775 +
91776 +struct size_overflow_hash _002013_hash = {
91777 + .next = NULL,
91778 + .name = "sel_read_policyvers",
91779 + .param = PARAM3,
91780 +};
91781 +
91782 +struct size_overflow_hash _002014_hash = {
91783 + .next = NULL,
91784 + .name = "send_msg",
91785 + .param = PARAM4,
91786 +};
91787 +
91788 +struct size_overflow_hash _002015_hash = {
91789 + .next = NULL,
91790 + .name = "send_packet",
91791 + .param = PARAM4,
91792 +};
91793 +
91794 +struct size_overflow_hash _002016_hash = {
91795 + .next = NULL,
91796 + .name = "short_retry_limit_read",
91797 + .param = PARAM3,
91798 +};
91799 +
91800 +struct size_overflow_hash _002017_hash = {
91801 + .next = NULL,
91802 + .name = "simple_attr_read",
91803 + .param = PARAM3,
91804 +};
91805 +
91806 +struct size_overflow_hash _002018_hash = {
91807 + .next = NULL,
91808 + .name = "simple_transaction_read",
91809 + .param = PARAM3,
91810 +};
91811 +
91812 +struct size_overflow_hash _002019_hash = {
91813 + .next = NULL,
91814 + .name = "skb_copy_datagram_const_iovec",
91815 + .param = PARAM2|PARAM5|PARAM4,
91816 +};
91817 +
91818 +struct size_overflow_hash _002022_hash = {
91819 + .next = NULL,
91820 + .name = "skb_copy_datagram_iovec",
91821 + .param = PARAM2|PARAM4,
91822 +};
91823 +
91824 +struct size_overflow_hash _002024_hash = {
91825 + .next = NULL,
91826 + .name = "smk_read_ambient",
91827 + .param = PARAM3,
91828 +};
91829 +
91830 +struct size_overflow_hash _002025_hash = {
91831 + .next = NULL,
91832 + .name = "smk_read_direct",
91833 + .param = PARAM3,
91834 +};
91835 +
91836 +struct size_overflow_hash _002026_hash = {
91837 + .next = NULL,
91838 + .name = "smk_read_doi",
91839 + .param = PARAM3,
91840 +};
91841 +
91842 +struct size_overflow_hash _002027_hash = {
91843 + .next = NULL,
91844 + .name = "smk_read_logging",
91845 + .param = PARAM3,
91846 +};
91847 +
91848 +struct size_overflow_hash _002028_hash = {
91849 + .next = NULL,
91850 + .name = "smk_read_onlycap",
91851 + .param = PARAM3,
91852 +};
91853 +
91854 +struct size_overflow_hash _002029_hash = {
91855 + .next = NULL,
91856 + .name = "snapshot_read",
91857 + .param = PARAM3,
91858 +};
91859 +
91860 +struct size_overflow_hash _002030_hash = {
91861 + .next = NULL,
91862 + .name = "snd_cs4281_BA0_read",
91863 + .param = PARAM5,
91864 +};
91865 +
91866 +struct size_overflow_hash _002031_hash = {
91867 + .next = NULL,
91868 + .name = "snd_cs4281_BA1_read",
91869 + .param = PARAM5,
91870 +};
91871 +
91872 +struct size_overflow_hash _002032_hash = {
91873 + .next = NULL,
91874 + .name = "snd_cs46xx_io_read",
91875 + .param = PARAM5,
91876 +};
91877 +
91878 +struct size_overflow_hash _002033_hash = {
91879 + .next = NULL,
91880 + .name = "snd_gus_dram_read",
91881 + .param = PARAM4,
91882 +};
91883 +
91884 +struct size_overflow_hash _002034_hash = {
91885 + .next = NULL,
91886 + .name = "snd_pcm_oss_read",
91887 + .param = PARAM3,
91888 +};
91889 +
91890 +struct size_overflow_hash _002035_hash = {
91891 + .next = NULL,
91892 + .name = "snd_rme32_capture_copy",
91893 + .param = PARAM5,
91894 +};
91895 +
91896 +struct size_overflow_hash _002036_hash = {
91897 + .next = NULL,
91898 + .name = "snd_rme96_capture_copy",
91899 + .param = PARAM5,
91900 +};
91901 +
91902 +struct size_overflow_hash _002037_hash = {
91903 + .next = NULL,
91904 + .name = "snd_soc_hw_bulk_write_raw",
91905 + .param = PARAM4,
91906 +};
91907 +
91908 +struct size_overflow_hash _002038_hash = {
91909 + .next = &_001908_hash,
91910 + .name = "spi_show_regs",
91911 + .param = PARAM3,
91912 +};
91913 +
91914 +struct size_overflow_hash _002039_hash = {
91915 + .next = NULL,
91916 + .name = "sta_agg_status_read",
91917 + .param = PARAM3,
91918 +};
91919 +
91920 +struct size_overflow_hash _002040_hash = {
91921 + .next = NULL,
91922 + .name = "sta_connected_time_read",
91923 + .param = PARAM3,
91924 +};
91925 +
91926 +struct size_overflow_hash _002041_hash = {
91927 + .next = NULL,
91928 + .name = "sta_flags_read",
91929 + .param = PARAM3,
91930 +};
91931 +
91932 +struct size_overflow_hash _002042_hash = {
91933 + .next = NULL,
91934 + .name = "sta_ht_capa_read",
91935 + .param = PARAM3,
91936 +};
91937 +
91938 +struct size_overflow_hash _002043_hash = {
91939 + .next = NULL,
91940 + .name = "sta_last_seq_ctrl_read",
91941 + .param = PARAM3,
91942 +};
91943 +
91944 +struct size_overflow_hash _002044_hash = {
91945 + .next = NULL,
91946 + .name = "sta_num_ps_buf_frames_read",
91947 + .param = PARAM3,
91948 +};
91949 +
91950 +struct size_overflow_hash _002045_hash = {
91951 + .next = NULL,
91952 + .name = "st_read",
91953 + .param = PARAM3,
91954 +};
91955 +
91956 +struct size_overflow_hash _002046_hash = {
91957 + .next = NULL,
91958 + .name = "supply_map_read_file",
91959 + .param = PARAM3,
91960 +};
91961 +
91962 +struct size_overflow_hash _002047_hash = {
91963 + .next = NULL,
91964 + .name = "sysfs_read_file",
91965 + .param = PARAM3,
91966 +};
91967 +
91968 +struct size_overflow_hash _002048_hash = {
91969 + .next = NULL,
91970 + .name = "sys_lgetxattr",
91971 + .param = PARAM4,
91972 +};
91973 +
91974 +struct size_overflow_hash _002049_hash = {
91975 + .next = NULL,
91976 + .name = "sys_preadv",
91977 + .param = PARAM3,
91978 +};
91979 +
91980 +struct size_overflow_hash _002050_hash = {
91981 + .next = NULL,
91982 + .name = "sys_pwritev",
91983 + .param = PARAM3,
91984 +};
91985 +
91986 +struct size_overflow_hash _002051_hash = {
91987 + .next = NULL,
91988 + .name = "sys_readv",
91989 + .param = PARAM3,
91990 +};
91991 +
91992 +struct size_overflow_hash _002052_hash = {
91993 + .next = NULL,
91994 + .name = "sys_rt_sigpending",
91995 + .param = PARAM2,
91996 +};
91997 +
91998 +struct size_overflow_hash _002053_hash = {
91999 + .next = NULL,
92000 + .name = "sys_writev",
92001 + .param = PARAM3,
92002 +};
92003 +
92004 +struct size_overflow_hash _002054_hash = {
92005 + .next = NULL,
92006 + .name = "test_iso_queue",
92007 + .param = PARAM5,
92008 +};
92009 +
92010 +struct size_overflow_hash _002055_hash = {
92011 + .next = NULL,
92012 + .name = "ts_read",
92013 + .param = PARAM3,
92014 +};
92015 +
92016 +struct size_overflow_hash _002056_hash = {
92017 + .next = NULL,
92018 + .name = "TSS_authhmac",
92019 + .param = PARAM3,
92020 +};
92021 +
92022 +struct size_overflow_hash _002057_hash = {
92023 + .next = NULL,
92024 + .name = "TSS_checkhmac1",
92025 + .param = PARAM5,
92026 +};
92027 +
92028 +struct size_overflow_hash _002058_hash = {
92029 + .next = NULL,
92030 + .name = "TSS_checkhmac2",
92031 + .param = PARAM5|PARAM7,
92032 +};
92033 +
92034 +struct size_overflow_hash _002060_hash = {
92035 + .next = NULL,
92036 + .name = "tt3650_ci_msg_locked",
92037 + .param = PARAM4,
92038 +};
92039 +
92040 +struct size_overflow_hash _002061_hash = {
92041 + .next = NULL,
92042 + .name = "tun_sendmsg",
92043 + .param = PARAM4,
92044 +};
92045 +
92046 +struct size_overflow_hash _002062_hash = {
92047 + .next = NULL,
92048 + .name = "tx_internal_desc_overflow_read",
92049 + .param = PARAM3,
92050 +};
92051 +
92052 +struct size_overflow_hash _002063_hash = {
92053 + .next = NULL,
92054 + .name = "tx_queue_len_read",
92055 + .param = PARAM3,
92056 +};
92057 +
92058 +struct size_overflow_hash _002064_hash = {
92059 + .next = NULL,
92060 + .name = "tx_queue_status_read",
92061 + .param = PARAM3,
92062 +};
92063 +
92064 +struct size_overflow_hash _002065_hash = {
92065 + .next = NULL,
92066 + .name = "ubi_io_write_data",
92067 + .param = PARAM4|PARAM5,
92068 +};
92069 +
92070 +struct size_overflow_hash _002067_hash = {
92071 + .next = NULL,
92072 + .name = "uhci_debug_read",
92073 + .param = PARAM3,
92074 +};
92075 +
92076 +struct size_overflow_hash _002068_hash = {
92077 + .next = NULL,
92078 + .name = "unix_stream_recvmsg",
92079 + .param = PARAM4,
92080 +};
92081 +
92082 +struct size_overflow_hash _002069_hash = {
92083 + .next = NULL,
92084 + .name = "uvc_debugfs_stats_read",
92085 + .param = PARAM3,
92086 +};
92087 +
92088 +struct size_overflow_hash _002070_hash = {
92089 + .next = NULL,
92090 + .name = "vhost_add_used_and_signal_n",
92091 + .param = PARAM4,
92092 +};
92093 +
92094 +struct size_overflow_hash _002071_hash = {
92095 + .next = NULL,
92096 + .name = "vifs_state_read",
92097 + .param = PARAM3,
92098 +};
92099 +
92100 +struct size_overflow_hash _002072_hash = {
92101 + .next = NULL,
92102 + .name = "vmbus_open",
92103 + .param = PARAM2|PARAM3,
92104 +};
92105 +
92106 +struct size_overflow_hash _002074_hash = {
92107 + .next = NULL,
92108 + .name = "waiters_read",
92109 + .param = PARAM3,
92110 +};
92111 +
92112 +struct size_overflow_hash _002075_hash = {
92113 + .next = NULL,
92114 + .name = "wep_addr_key_count_read",
92115 + .param = PARAM3,
92116 +};
92117 +
92118 +struct size_overflow_hash _002076_hash = {
92119 + .next = NULL,
92120 + .name = "wep_decrypt_fail_read",
92121 + .param = PARAM3,
92122 +};
92123 +
92124 +struct size_overflow_hash _002077_hash = {
92125 + .next = NULL,
92126 + .name = "wep_default_key_count_read",
92127 + .param = PARAM3,
92128 +};
92129 +
92130 +struct size_overflow_hash _002078_hash = {
92131 + .next = NULL,
92132 + .name = "wep_interrupt_read",
92133 + .param = PARAM3,
92134 +};
92135 +
92136 +struct size_overflow_hash _002079_hash = {
92137 + .next = &_000915_hash,
92138 + .name = "wep_key_not_found_read",
92139 + .param = PARAM3,
92140 +};
92141 +
92142 +struct size_overflow_hash _002080_hash = {
92143 + .next = NULL,
92144 + .name = "wep_packets_read",
92145 + .param = PARAM3,
92146 +};
92147 +
92148 +struct size_overflow_hash _002081_hash = {
92149 + .next = NULL,
92150 + .name = "wl1271_format_buffer",
92151 + .param = PARAM2,
92152 +};
92153 +
92154 +struct size_overflow_hash _002082_hash = {
92155 + .next = NULL,
92156 + .name = "wm8994_bulk_write",
92157 + .param = PARAM3,
92158 +};
92159 +
92160 +struct size_overflow_hash _002083_hash = {
92161 + .next = NULL,
92162 + .name = "wusb_prf_256",
92163 + .param = PARAM7,
92164 +};
92165 +
92166 +struct size_overflow_hash _002084_hash = {
92167 + .next = NULL,
92168 + .name = "wusb_prf_64",
92169 + .param = PARAM7,
92170 +};
92171 +
92172 +struct size_overflow_hash _002085_hash = {
92173 + .next = NULL,
92174 + .name = "xfs_buf_read_uncached",
92175 + .param = PARAM4,
92176 +};
92177 +
92178 +struct size_overflow_hash _002086_hash = {
92179 + .next = NULL,
92180 + .name = "xfs_iext_add",
92181 + .param = PARAM3,
92182 +};
92183 +
92184 +struct size_overflow_hash _002087_hash = {
92185 + .next = NULL,
92186 + .name = "xfs_iext_remove_direct",
92187 + .param = PARAM3,
92188 +};
92189 +
92190 +struct size_overflow_hash _002088_hash = {
92191 + .next = NULL,
92192 + .name = "xfs_trans_get_efd",
92193 + .param = PARAM3,
92194 +};
92195 +
92196 +struct size_overflow_hash _002089_hash = {
92197 + .next = NULL,
92198 + .name = "xfs_trans_get_efi",
92199 + .param = PARAM2,
92200 +};
92201 +
92202 +struct size_overflow_hash _002090_hash = {
92203 + .next = NULL,
92204 + .name = "xlog_get_bp",
92205 + .param = PARAM2,
92206 +};
92207 +
92208 +struct size_overflow_hash _002091_hash = {
92209 + .next = NULL,
92210 + .name = "xz_dec_init",
92211 + .param = PARAM2,
92212 +};
92213 +
92214 +struct size_overflow_hash _002092_hash = {
92215 + .next = NULL,
92216 + .name = "aac_change_queue_depth",
92217 + .param = PARAM2,
92218 +};
92219 +
92220 +struct size_overflow_hash _002093_hash = {
92221 + .next = NULL,
92222 + .name = "agp_allocate_memory_wrap",
92223 + .param = PARAM1,
92224 +};
92225 +
92226 +struct size_overflow_hash _002094_hash = {
92227 + .next = NULL,
92228 + .name = "arcmsr_adjust_disk_queue_depth",
92229 + .param = PARAM2,
92230 +};
92231 +
92232 +struct size_overflow_hash _002095_hash = {
92233 + .next = NULL,
92234 + .name = "atalk_recvmsg",
92235 + .param = PARAM4,
92236 +};
92237 +
92238 +struct size_overflow_hash _002097_hash = {
92239 + .next = NULL,
92240 + .name = "atomic_read_file",
92241 + .param = PARAM3,
92242 +};
92243 +
92244 +struct size_overflow_hash _002098_hash = {
92245 + .next = NULL,
92246 + .name = "ax25_recvmsg",
92247 + .param = PARAM4,
92248 +};
92249 +
92250 +struct size_overflow_hash _002099_hash = {
92251 + .next = NULL,
92252 + .name = "beacon_interval_read",
92253 + .param = PARAM3,
92254 +};
92255 +
92256 +struct size_overflow_hash _002100_hash = {
92257 + .next = NULL,
92258 + .name = "btrfs_init_new_buffer",
92259 + .param = PARAM4,
92260 +};
92261 +
92262 +struct size_overflow_hash _002101_hash = {
92263 + .next = NULL,
92264 + .name = "btrfs_mksubvol",
92265 + .param = PARAM3,
92266 +};
92267 +
92268 +struct size_overflow_hash _002102_hash = {
92269 + .next = NULL,
92270 + .name = "bt_sock_recvmsg",
92271 + .param = PARAM4,
92272 +};
92273 +
92274 +struct size_overflow_hash _002103_hash = {
92275 + .next = NULL,
92276 + .name = "bt_sock_stream_recvmsg",
92277 + .param = PARAM4,
92278 +};
92279 +
92280 +struct size_overflow_hash _002104_hash = {
92281 + .next = NULL,
92282 + .name = "caif_seqpkt_recvmsg",
92283 + .param = PARAM4,
92284 +};
92285 +
92286 +struct size_overflow_hash _002105_hash = {
92287 + .next = NULL,
92288 + .name = "cpu_type_read",
92289 + .param = PARAM3,
92290 +};
92291 +
92292 +struct size_overflow_hash _002106_hash = {
92293 + .next = NULL,
92294 + .name = "cx18_read",
92295 + .param = PARAM3,
92296 +};
92297 +
92298 +struct size_overflow_hash _002107_hash = {
92299 + .next = NULL,
92300 + .name = "dccp_recvmsg",
92301 + .param = PARAM4,
92302 +};
92303 +
92304 +struct size_overflow_hash _002108_hash = {
92305 + .next = NULL,
92306 + .name = "depth_read",
92307 + .param = PARAM3,
92308 +};
92309 +
92310 +struct size_overflow_hash _002109_hash = {
92311 + .next = NULL,
92312 + .name = "dfs_global_file_read",
92313 + .param = PARAM3,
92314 +};
92315 +
92316 +struct size_overflow_hash _002110_hash = {
92317 + .next = NULL,
92318 + .name = "dgram_recvmsg",
92319 + .param = PARAM4,
92320 +};
92321 +
92322 +struct size_overflow_hash _002111_hash = {
92323 + .next = NULL,
92324 + .name = "dma_skb_copy_datagram_iovec",
92325 + .param = PARAM3|PARAM5,
92326 +};
92327 +
92328 +struct size_overflow_hash _002113_hash = {
92329 + .next = NULL,
92330 + .name = "dtim_interval_read",
92331 + .param = PARAM3,
92332 +};
92333 +
92334 +struct size_overflow_hash _002114_hash = {
92335 + .next = NULL,
92336 + .name = "dynamic_ps_timeout_read",
92337 + .param = PARAM3,
92338 +};
92339 +
92340 +struct size_overflow_hash _002115_hash = {
92341 + .next = NULL,
92342 + .name = "enable_read",
92343 + .param = PARAM3,
92344 +};
92345 +
92346 +struct size_overflow_hash _002116_hash = {
92347 + .next = &_001885_hash,
92348 + .name = "exofs_read_kern",
92349 + .param = PARAM6,
92350 +};
92351 +
92352 +struct size_overflow_hash _002117_hash = {
92353 + .next = NULL,
92354 + .name = "fc_change_queue_depth",
92355 + .param = PARAM2,
92356 +};
92357 +
92358 +struct size_overflow_hash _002118_hash = {
92359 + .next = NULL,
92360 + .name = "forced_ps_read",
92361 + .param = PARAM3,
92362 +};
92363 +
92364 +struct size_overflow_hash _002119_hash = {
92365 + .next = NULL,
92366 + .name = "frequency_read",
92367 + .param = PARAM3,
92368 +};
92369 +
92370 +struct size_overflow_hash _002120_hash = {
92371 + .next = NULL,
92372 + .name = "get_alua_req",
92373 + .param = PARAM3,
92374 +};
92375 +
92376 +struct size_overflow_hash _002121_hash = {
92377 + .next = NULL,
92378 + .name = "get_rdac_req",
92379 + .param = PARAM3,
92380 +};
92381 +
92382 +struct size_overflow_hash _002122_hash = {
92383 + .next = NULL,
92384 + .name = "hci_sock_recvmsg",
92385 + .param = PARAM4,
92386 +};
92387 +
92388 +struct size_overflow_hash _002123_hash = {
92389 + .next = NULL,
92390 + .name = "hpsa_change_queue_depth",
92391 + .param = PARAM2,
92392 +};
92393 +
92394 +struct size_overflow_hash _002124_hash = {
92395 + .next = NULL,
92396 + .name = "hptiop_adjust_disk_queue_depth",
92397 + .param = PARAM2,
92398 +};
92399 +
92400 +struct size_overflow_hash _002125_hash = {
92401 + .next = NULL,
92402 + .name = "ide_queue_pc_tail",
92403 + .param = PARAM5,
92404 +};
92405 +
92406 +struct size_overflow_hash _002126_hash = {
92407 + .next = NULL,
92408 + .name = "ide_raw_taskfile",
92409 + .param = PARAM4,
92410 +};
92411 +
92412 +struct size_overflow_hash _002127_hash = {
92413 + .next = NULL,
92414 + .name = "idetape_queue_rw_tail",
92415 + .param = PARAM3,
92416 +};
92417 +
92418 +struct size_overflow_hash _002128_hash = {
92419 + .next = NULL,
92420 + .name = "ieee80211_if_read_aid",
92421 + .param = PARAM3,
92422 +};
92423 +
92424 +struct size_overflow_hash _002129_hash = {
92425 + .next = NULL,
92426 + .name = "ieee80211_if_read_auto_open_plinks",
92427 + .param = PARAM3,
92428 +};
92429 +
92430 +struct size_overflow_hash _002130_hash = {
92431 + .next = NULL,
92432 + .name = "ieee80211_if_read_ave_beacon",
92433 + .param = PARAM3,
92434 +};
92435 +
92436 +struct size_overflow_hash _002131_hash = {
92437 + .next = NULL,
92438 + .name = "ieee80211_if_read_bssid",
92439 + .param = PARAM3,
92440 +};
92441 +
92442 +struct size_overflow_hash _002132_hash = {
92443 + .next = NULL,
92444 + .name = "ieee80211_if_read_channel_type",
92445 + .param = PARAM3,
92446 +};
92447 +
92448 +struct size_overflow_hash _002133_hash = {
92449 + .next = NULL,
92450 + .name = "ieee80211_if_read_dot11MeshConfirmTimeout",
92451 + .param = PARAM3,
92452 +};
92453 +
92454 +struct size_overflow_hash _002134_hash = {
92455 + .next = NULL,
92456 + .name = "ieee80211_if_read_dot11MeshGateAnnouncementProtocol",
92457 + .param = PARAM3,
92458 +};
92459 +
92460 +struct size_overflow_hash _002135_hash = {
92461 + .next = NULL,
92462 + .name = "ieee80211_if_read_dot11MeshHoldingTimeout",
92463 + .param = PARAM3,
92464 +};
92465 +
92466 +struct size_overflow_hash _002136_hash = {
92467 + .next = NULL,
92468 + .name = "ieee80211_if_read_dot11MeshHWMPactivePathTimeout",
92469 + .param = PARAM3,
92470 +};
92471 +
92472 +struct size_overflow_hash _002137_hash = {
92473 + .next = NULL,
92474 + .name = "ieee80211_if_read_dot11MeshHWMPmaxPREQretries",
92475 + .param = PARAM3,
92476 +};
92477 +
92478 +struct size_overflow_hash _002138_hash = {
92479 + .next = NULL,
92480 + .name = "ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime",
92481 + .param = PARAM3,
92482 +};
92483 +
92484 +struct size_overflow_hash _002139_hash = {
92485 + .next = NULL,
92486 + .name = "ieee80211_if_read_dot11MeshHWMPperrMinInterval",
92487 + .param = PARAM3,
92488 +};
92489 +
92490 +struct size_overflow_hash _002140_hash = {
92491 + .next = NULL,
92492 + .name = "ieee80211_if_read_dot11MeshHWMPpreqMinInterval",
92493 + .param = PARAM3,
92494 +};
92495 +
92496 +struct size_overflow_hash _002141_hash = {
92497 + .next = NULL,
92498 + .name = "ieee80211_if_read_dot11MeshHWMPRannInterval",
92499 + .param = PARAM3,
92500 +};
92501 +
92502 +struct size_overflow_hash _002142_hash = {
92503 + .next = NULL,
92504 + .name = "ieee80211_if_read_dot11MeshHWMPRootMode",
92505 + .param = PARAM3,
92506 +};
92507 +
92508 +struct size_overflow_hash _002143_hash = {
92509 + .next = NULL,
92510 + .name = "ieee80211_if_read_dot11MeshMaxPeerLinks",
92511 + .param = PARAM3,
92512 +};
92513 +
92514 +struct size_overflow_hash _002144_hash = {
92515 + .next = NULL,
92516 + .name = "ieee80211_if_read_dot11MeshMaxRetries",
92517 + .param = PARAM3,
92518 +};
92519 +
92520 +struct size_overflow_hash _002145_hash = {
92521 + .next = NULL,
92522 + .name = "ieee80211_if_read_dot11MeshRetryTimeout",
92523 + .param = PARAM3,
92524 +};
92525 +
92526 +struct size_overflow_hash _002146_hash = {
92527 + .next = NULL,
92528 + .name = "ieee80211_if_read_dot11MeshTTL",
92529 + .param = PARAM3,
92530 +};
92531 +
92532 +struct size_overflow_hash _002147_hash = {
92533 + .next = NULL,
92534 + .name = "ieee80211_if_read_dropped_frames_congestion",
92535 + .param = PARAM3,
92536 +};
92537 +
92538 +struct size_overflow_hash _002148_hash = {
92539 + .next = NULL,
92540 + .name = "ieee80211_if_read_dropped_frames_no_route",
92541 + .param = PARAM3,
92542 +};
92543 +
92544 +struct size_overflow_hash _002149_hash = {
92545 + .next = NULL,
92546 + .name = "ieee80211_if_read_dropped_frames_ttl",
92547 + .param = PARAM3,
92548 +};
92549 +
92550 +struct size_overflow_hash _002150_hash = {
92551 + .next = NULL,
92552 + .name = "ieee80211_if_read_drop_unencrypted",
92553 + .param = PARAM3,
92554 +};
92555 +
92556 +struct size_overflow_hash _002151_hash = {
92557 + .next = NULL,
92558 + .name = "ieee80211_if_read_dtim_count",
92559 + .param = PARAM3,
92560 +};
92561 +
92562 +struct size_overflow_hash _002152_hash = {
92563 + .next = NULL,
92564 + .name = "ieee80211_if_read_element_ttl",
92565 + .param = PARAM3,
92566 +};
92567 +
92568 +struct size_overflow_hash _002153_hash = {
92569 + .next = NULL,
92570 + .name = "ieee80211_if_read_estab_plinks",
92571 + .param = PARAM3,
92572 +};
92573 +
92574 +struct size_overflow_hash _002154_hash = {
92575 + .next = NULL,
92576 + .name = "ieee80211_if_read_flags",
92577 + .param = PARAM3,
92578 +};
92579 +
92580 +struct size_overflow_hash _002155_hash = {
92581 + .next = NULL,
92582 + .name = "ieee80211_if_read_fwded_frames",
92583 + .param = PARAM3,
92584 +};
92585 +
92586 +struct size_overflow_hash _002156_hash = {
92587 + .next = &_000151_hash,
92588 + .name = "ieee80211_if_read_fwded_mcast",
92589 + .param = PARAM3,
92590 +};
92591 +
92592 +struct size_overflow_hash _002157_hash = {
92593 + .next = NULL,
92594 + .name = "ieee80211_if_read_fwded_unicast",
92595 + .param = PARAM3,
92596 +};
92597 +
92598 +struct size_overflow_hash _002158_hash = {
92599 + .next = NULL,
92600 + .name = "ieee80211_if_read_last_beacon",
92601 + .param = PARAM3,
92602 +};
92603 +
92604 +struct size_overflow_hash _002159_hash = {
92605 + .next = NULL,
92606 + .name = "ieee80211_if_read_min_discovery_timeout",
92607 + .param = PARAM3,
92608 +};
92609 +
92610 +struct size_overflow_hash _002160_hash = {
92611 + .next = NULL,
92612 + .name = "ieee80211_if_read_num_buffered_multicast",
92613 + .param = PARAM3,
92614 +};
92615 +
92616 +struct size_overflow_hash _002161_hash = {
92617 + .next = NULL,
92618 + .name = "ieee80211_if_read_num_sta_authorized",
92619 + .param = PARAM3,
92620 +};
92621 +
92622 +struct size_overflow_hash _002162_hash = {
92623 + .next = NULL,
92624 + .name = "ieee80211_if_read_num_sta_ps",
92625 + .param = PARAM3,
92626 +};
92627 +
92628 +struct size_overflow_hash _002163_hash = {
92629 + .next = NULL,
92630 + .name = "ieee80211_if_read_path_refresh_time",
92631 + .param = PARAM3,
92632 +};
92633 +
92634 +struct size_overflow_hash _002164_hash = {
92635 + .next = NULL,
92636 + .name = "ieee80211_if_read_peer",
92637 + .param = PARAM3,
92638 +};
92639 +
92640 +struct size_overflow_hash _002165_hash = {
92641 + .next = NULL,
92642 + .name = "ieee80211_if_read_rc_rateidx_mask_2ghz",
92643 + .param = PARAM3,
92644 +};
92645 +
92646 +struct size_overflow_hash _002166_hash = {
92647 + .next = NULL,
92648 + .name = "ieee80211_if_read_rc_rateidx_mask_5ghz",
92649 + .param = PARAM3,
92650 +};
92651 +
92652 +struct size_overflow_hash _002167_hash = {
92653 + .next = NULL,
92654 + .name = "ieee80211_if_read_rc_rateidx_mcs_mask_2ghz",
92655 + .param = PARAM3,
92656 +};
92657 +
92658 +struct size_overflow_hash _002168_hash = {
92659 + .next = NULL,
92660 + .name = "ieee80211_if_read_rc_rateidx_mcs_mask_5ghz",
92661 + .param = PARAM3,
92662 +};
92663 +
92664 +struct size_overflow_hash _002169_hash = {
92665 + .next = NULL,
92666 + .name = "ieee80211_if_read_rssi_threshold",
92667 + .param = PARAM3,
92668 +};
92669 +
92670 +struct size_overflow_hash _002170_hash = {
92671 + .next = NULL,
92672 + .name = "ieee80211_if_read_smps",
92673 + .param = PARAM3,
92674 +};
92675 +
92676 +struct size_overflow_hash _002171_hash = {
92677 + .next = NULL,
92678 + .name = "ieee80211_if_read_state",
92679 + .param = PARAM3,
92680 +};
92681 +
92682 +struct size_overflow_hash _002172_hash = {
92683 + .next = NULL,
92684 + .name = "ieee80211_if_read_tkip_mic_test",
92685 + .param = PARAM3,
92686 +};
92687 +
92688 +struct size_overflow_hash _002173_hash = {
92689 + .next = NULL,
92690 + .name = "ieee80211_if_read_tsf",
92691 + .param = PARAM3,
92692 +};
92693 +
92694 +struct size_overflow_hash _002174_hash = {
92695 + .next = NULL,
92696 + .name = "ieee80211_if_read_uapsd_max_sp_len",
92697 + .param = PARAM3,
92698 +};
92699 +
92700 +struct size_overflow_hash _002175_hash = {
92701 + .next = NULL,
92702 + .name = "ieee80211_if_read_uapsd_queues",
92703 + .param = PARAM3,
92704 +};
92705 +
92706 +struct size_overflow_hash _002176_hash = {
92707 + .next = NULL,
92708 + .name = "ieee80211_rx_mgmt_beacon",
92709 + .param = PARAM3,
92710 +};
92711 +
92712 +struct size_overflow_hash _002177_hash = {
92713 + .next = NULL,
92714 + .name = "ieee80211_rx_mgmt_probe_resp",
92715 + .param = PARAM3,
92716 +};
92717 +
92718 +struct size_overflow_hash _002178_hash = {
92719 + .next = NULL,
92720 + .name = "ima_show_htable_violations",
92721 + .param = PARAM3,
92722 +};
92723 +
92724 +struct size_overflow_hash _002179_hash = {
92725 + .next = NULL,
92726 + .name = "ima_show_measurements_count",
92727 + .param = PARAM3,
92728 +};
92729 +
92730 +struct size_overflow_hash _002180_hash = {
92731 + .next = NULL,
92732 + .name = "insert_one_name",
92733 + .param = PARAM7,
92734 +};
92735 +
92736 +struct size_overflow_hash _002181_hash = {
92737 + .next = NULL,
92738 + .name = "ipr_change_queue_depth",
92739 + .param = PARAM2,
92740 +};
92741 +
92742 +struct size_overflow_hash _002182_hash = {
92743 + .next = NULL,
92744 + .name = "ip_recv_error",
92745 + .param = PARAM3,
92746 +};
92747 +
92748 +struct size_overflow_hash _002183_hash = {
92749 + .next = NULL,
92750 + .name = "ipv6_recv_error",
92751 + .param = PARAM3,
92752 +};
92753 +
92754 +struct size_overflow_hash _002184_hash = {
92755 + .next = NULL,
92756 + .name = "ipv6_recv_rxpmtu",
92757 + .param = PARAM3,
92758 +};
92759 +
92760 +struct size_overflow_hash _002185_hash = {
92761 + .next = NULL,
92762 + .name = "ipx_recvmsg",
92763 + .param = PARAM4,
92764 +};
92765 +
92766 +struct size_overflow_hash _002186_hash = {
92767 + .next = NULL,
92768 + .name = "irda_recvmsg_dgram",
92769 + .param = PARAM4,
92770 +};
92771 +
92772 +struct size_overflow_hash _002187_hash = {
92773 + .next = NULL,
92774 + .name = "iscsi_change_queue_depth",
92775 + .param = PARAM2,
92776 +};
92777 +
92778 +struct size_overflow_hash _002188_hash = {
92779 + .next = &_000303_hash,
92780 + .name = "ivtv_read_pos",
92781 + .param = PARAM3,
92782 +};
92783 +
92784 +struct size_overflow_hash _002189_hash = {
92785 + .next = NULL,
92786 + .name = "key_conf_hw_key_idx_read",
92787 + .param = PARAM3,
92788 +};
92789 +
92790 +struct size_overflow_hash _002190_hash = {
92791 + .next = NULL,
92792 + .name = "key_conf_keyidx_read",
92793 + .param = PARAM3,
92794 +};
92795 +
92796 +struct size_overflow_hash _002191_hash = {
92797 + .next = NULL,
92798 + .name = "key_conf_keylen_read",
92799 + .param = PARAM3,
92800 +};
92801 +
92802 +struct size_overflow_hash _002192_hash = {
92803 + .next = NULL,
92804 + .name = "key_flags_read",
92805 + .param = PARAM3,
92806 +};
92807 +
92808 +struct size_overflow_hash _002193_hash = {
92809 + .next = NULL,
92810 + .name = "key_ifindex_read",
92811 + .param = PARAM3,
92812 +};
92813 +
92814 +struct size_overflow_hash _002194_hash = {
92815 + .next = NULL,
92816 + .name = "key_tx_rx_count_read",
92817 + .param = PARAM3,
92818 +};
92819 +
92820 +struct size_overflow_hash _002195_hash = {
92821 + .next = NULL,
92822 + .name = "l2cap_sock_sendmsg",
92823 + .param = PARAM4,
92824 +};
92825 +
92826 +struct size_overflow_hash _002196_hash = {
92827 + .next = NULL,
92828 + .name = "l2tp_ip_recvmsg",
92829 + .param = PARAM4,
92830 +};
92831 +
92832 +struct size_overflow_hash _002197_hash = {
92833 + .next = NULL,
92834 + .name = "llc_ui_recvmsg",
92835 + .param = PARAM4,
92836 +};
92837 +
92838 +struct size_overflow_hash _002198_hash = {
92839 + .next = NULL,
92840 + .name = "lpfc_change_queue_depth",
92841 + .param = PARAM2,
92842 +};
92843 +
92844 +struct size_overflow_hash _002199_hash = {
92845 + .next = &_001832_hash,
92846 + .name = "macvtap_do_read",
92847 + .param = PARAM4,
92848 +};
92849 +
92850 +struct size_overflow_hash _002200_hash = {
92851 + .next = NULL,
92852 + .name = "megaraid_change_queue_depth",
92853 + .param = PARAM2,
92854 +};
92855 +
92856 +struct size_overflow_hash _002201_hash = {
92857 + .next = NULL,
92858 + .name = "megasas_change_queue_depth",
92859 + .param = PARAM2,
92860 +};
92861 +
92862 +struct size_overflow_hash _002202_hash = {
92863 + .next = NULL,
92864 + .name = "mptscsih_change_queue_depth",
92865 + .param = PARAM2,
92866 +};
92867 +
92868 +struct size_overflow_hash _002203_hash = {
92869 + .next = NULL,
92870 + .name = "NCR_700_change_queue_depth",
92871 + .param = PARAM2,
92872 +};
92873 +
92874 +struct size_overflow_hash _002204_hash = {
92875 + .next = NULL,
92876 + .name = "netlink_recvmsg",
92877 + .param = PARAM4,
92878 +};
92879 +
92880 +struct size_overflow_hash _002205_hash = {
92881 + .next = NULL,
92882 + .name = "nfsctl_transaction_read",
92883 + .param = PARAM3,
92884 +};
92885 +
92886 +struct size_overflow_hash _002206_hash = {
92887 + .next = NULL,
92888 + .name = "nfs_map_group_to_gid",
92889 + .param = PARAM3,
92890 +};
92891 +
92892 +struct size_overflow_hash _002207_hash = {
92893 + .next = NULL,
92894 + .name = "nfs_map_name_to_uid",
92895 + .param = PARAM3,
92896 +};
92897 +
92898 +struct size_overflow_hash _002208_hash = {
92899 + .next = NULL,
92900 + .name = "nr_recvmsg",
92901 + .param = PARAM4,
92902 +};
92903 +
92904 +struct size_overflow_hash _002209_hash = {
92905 + .next = NULL,
92906 + .name = "osd_req_list_collection_objects",
92907 + .param = PARAM5,
92908 +};
92909 +
92910 +struct size_overflow_hash _002210_hash = {
92911 + .next = NULL,
92912 + .name = "osd_req_list_partition_objects",
92913 + .param = PARAM5,
92914 +};
92915 +
92916 +struct size_overflow_hash _002212_hash = {
92917 + .next = NULL,
92918 + .name = "packet_recv_error",
92919 + .param = PARAM3,
92920 +};
92921 +
92922 +struct size_overflow_hash _002213_hash = {
92923 + .next = NULL,
92924 + .name = "packet_recvmsg",
92925 + .param = PARAM4,
92926 +};
92927 +
92928 +struct size_overflow_hash _002214_hash = {
92929 + .next = NULL,
92930 + .name = "pep_recvmsg",
92931 + .param = PARAM4,
92932 +};
92933 +
92934 +struct size_overflow_hash _002215_hash = {
92935 + .next = NULL,
92936 + .name = "pfkey_recvmsg",
92937 + .param = PARAM4,
92938 +};
92939 +
92940 +struct size_overflow_hash _002216_hash = {
92941 + .next = NULL,
92942 + .name = "ping_recvmsg",
92943 + .param = PARAM4,
92944 +};
92945 +
92946 +struct size_overflow_hash _002217_hash = {
92947 + .next = NULL,
92948 + .name = "pmcraid_change_queue_depth",
92949 + .param = PARAM2,
92950 +};
92951 +
92952 +struct size_overflow_hash _002218_hash = {
92953 + .next = NULL,
92954 + .name = "pn_recvmsg",
92955 + .param = PARAM4,
92956 +};
92957 +
92958 +struct size_overflow_hash _002219_hash = {
92959 + .next = NULL,
92960 + .name = "pointer_size_read",
92961 + .param = PARAM3,
92962 +};
92963 +
92964 +struct size_overflow_hash _002220_hash = {
92965 + .next = NULL,
92966 + .name = "power_read",
92967 + .param = PARAM3,
92968 +};
92969 +
92970 +struct size_overflow_hash _002221_hash = {
92971 + .next = NULL,
92972 + .name = "pppoe_recvmsg",
92973 + .param = PARAM4,
92974 +};
92975 +
92976 +struct size_overflow_hash _002222_hash = {
92977 + .next = NULL,
92978 + .name = "pppol2tp_recvmsg",
92979 + .param = PARAM4,
92980 +};
92981 +
92982 +struct size_overflow_hash _002223_hash = {
92983 + .next = NULL,
92984 + .name = "qla2x00_adjust_sdev_qdepth_up",
92985 + .param = PARAM2,
92986 +};
92987 +
92988 +struct size_overflow_hash _002224_hash = {
92989 + .next = NULL,
92990 + .name = "qla2x00_change_queue_depth",
92991 + .param = PARAM2,
92992 +};
92993 +
92994 +struct size_overflow_hash _002225_hash = {
92995 + .next = NULL,
92996 + .name = "raw_recvmsg",
92997 + .param = PARAM4,
92998 +};
92999 +
93000 +struct size_overflow_hash _002226_hash = {
93001 + .next = NULL,
93002 + .name = "rawsock_recvmsg",
93003 + .param = PARAM4,
93004 +};
93005 +
93006 +struct size_overflow_hash _002227_hash = {
93007 + .next = NULL,
93008 + .name = "rawv6_recvmsg",
93009 + .param = PARAM4,
93010 +};
93011 +
93012 +struct size_overflow_hash _002228_hash = {
93013 + .next = NULL,
93014 + .name = "reada_add_block",
93015 + .param = PARAM2,
93016 +};
93017 +
93018 +struct size_overflow_hash _002229_hash = {
93019 + .next = NULL,
93020 + .name = "readahead_tree_block",
93021 + .param = PARAM3,
93022 +};
93023 +
93024 +struct size_overflow_hash _002230_hash = {
93025 + .next = NULL,
93026 + .name = "reada_tree_block_flagged",
93027 + .param = PARAM3,
93028 +};
93029 +
93030 +struct size_overflow_hash _002231_hash = {
93031 + .next = NULL,
93032 + .name = "read_tree_block",
93033 + .param = PARAM3,
93034 +};
93035 +
93036 +struct size_overflow_hash _002232_hash = {
93037 + .next = NULL,
93038 + .name = "recover_peb",
93039 + .param = PARAM6|PARAM7,
93040 +};
93041 +
93042 +struct size_overflow_hash _002234_hash = {
93043 + .next = NULL,
93044 + .name = "recv_msg",
93045 + .param = PARAM4,
93046 +};
93047 +
93048 +struct size_overflow_hash _002235_hash = {
93049 + .next = NULL,
93050 + .name = "recv_stream",
93051 + .param = PARAM4,
93052 +};
93053 +
93054 +struct size_overflow_hash _002236_hash = {
93055 + .next = NULL,
93056 + .name = "_req_append_segment",
93057 + .param = PARAM2,
93058 +};
93059 +
93060 +struct size_overflow_hash _002237_hash = {
93061 + .next = NULL,
93062 + .name = "request_key_async",
93063 + .param = PARAM4,
93064 +};
93065 +
93066 +struct size_overflow_hash _002238_hash = {
93067 + .next = NULL,
93068 + .name = "request_key_async_with_auxdata",
93069 + .param = PARAM4,
93070 +};
93071 +
93072 +struct size_overflow_hash _002239_hash = {
93073 + .next = NULL,
93074 + .name = "request_key_with_auxdata",
93075 + .param = PARAM4,
93076 +};
93077 +
93078 +struct size_overflow_hash _002240_hash = {
93079 + .next = NULL,
93080 + .name = "rose_recvmsg",
93081 + .param = PARAM4,
93082 +};
93083 +
93084 +struct size_overflow_hash _002241_hash = {
93085 + .next = NULL,
93086 + .name = "rxrpc_recvmsg",
93087 + .param = PARAM4,
93088 +};
93089 +
93090 +struct size_overflow_hash _002242_hash = {
93091 + .next = NULL,
93092 + .name = "rx_streaming_always_read",
93093 + .param = PARAM3,
93094 +};
93095 +
93096 +struct size_overflow_hash _002243_hash = {
93097 + .next = NULL,
93098 + .name = "rx_streaming_interval_read",
93099 + .param = PARAM3,
93100 +};
93101 +
93102 +struct size_overflow_hash _002244_hash = {
93103 + .next = NULL,
93104 + .name = "sas_change_queue_depth",
93105 + .param = PARAM2,
93106 +};
93107 +
93108 +struct size_overflow_hash _002245_hash = {
93109 + .next = NULL,
93110 + .name = "scsi_activate_tcq",
93111 + .param = PARAM2,
93112 +};
93113 +
93114 +struct size_overflow_hash _002246_hash = {
93115 + .next = NULL,
93116 + .name = "scsi_deactivate_tcq",
93117 + .param = PARAM2,
93118 +};
93119 +
93120 +struct size_overflow_hash _002247_hash = {
93121 + .next = NULL,
93122 + .name = "scsi_execute",
93123 + .param = PARAM5,
93124 +};
93125 +
93126 +struct size_overflow_hash _002248_hash = {
93127 + .next = NULL,
93128 + .name = "_scsih_adjust_queue_depth",
93129 + .param = PARAM2,
93130 +};
93131 +
93132 +struct size_overflow_hash _002249_hash = {
93133 + .next = NULL,
93134 + .name = "scsi_init_shared_tag_map",
93135 + .param = PARAM2,
93136 +};
93137 +
93138 +struct size_overflow_hash _002250_hash = {
93139 + .next = NULL,
93140 + .name = "scsi_track_queue_full",
93141 + .param = PARAM2,
93142 +};
93143 +
93144 +struct size_overflow_hash _002251_hash = {
93145 + .next = NULL,
93146 + .name = "sctp_recvmsg",
93147 + .param = PARAM4,
93148 +};
93149 +
93150 +struct size_overflow_hash _002252_hash = {
93151 + .next = NULL,
93152 + .name = "send_stream",
93153 + .param = PARAM4,
93154 +};
93155 +
93156 +struct size_overflow_hash _002253_hash = {
93157 + .next = NULL,
93158 + .name = "skb_copy_and_csum_datagram_iovec",
93159 + .param = PARAM2,
93160 +};
93161 +
93162 +struct size_overflow_hash _002255_hash = {
93163 + .next = NULL,
93164 + .name = "snd_gf1_mem_proc_dump",
93165 + .param = PARAM5,
93166 +};
93167 +
93168 +struct size_overflow_hash _002256_hash = {
93169 + .next = NULL,
93170 + .name = "split_scan_timeout_read",
93171 + .param = PARAM3,
93172 +};
93173 +
93174 +struct size_overflow_hash _002257_hash = {
93175 + .next = NULL,
93176 + .name = "sta_dev_read",
93177 + .param = PARAM3,
93178 +};
93179 +
93180 +struct size_overflow_hash _002258_hash = {
93181 + .next = NULL,
93182 + .name = "sta_inactive_ms_read",
93183 + .param = PARAM3,
93184 +};
93185 +
93186 +struct size_overflow_hash _002259_hash = {
93187 + .next = NULL,
93188 + .name = "sta_last_signal_read",
93189 + .param = PARAM3,
93190 +};
93191 +
93192 +struct size_overflow_hash _002260_hash = {
93193 + .next = NULL,
93194 + .name = "stats_dot11ACKFailureCount_read",
93195 + .param = PARAM3,
93196 +};
93197 +
93198 +struct size_overflow_hash _002261_hash = {
93199 + .next = NULL,
93200 + .name = "stats_dot11FCSErrorCount_read",
93201 + .param = PARAM3,
93202 +};
93203 +
93204 +struct size_overflow_hash _002262_hash = {
93205 + .next = NULL,
93206 + .name = "stats_dot11RTSFailureCount_read",
93207 + .param = PARAM3,
93208 +};
93209 +
93210 +struct size_overflow_hash _002263_hash = {
93211 + .next = NULL,
93212 + .name = "stats_dot11RTSSuccessCount_read",
93213 + .param = PARAM3,
93214 +};
93215 +
93216 +struct size_overflow_hash _002264_hash = {
93217 + .next = NULL,
93218 + .name = "storvsc_connect_to_vsp",
93219 + .param = PARAM2,
93220 +};
93221 +
93222 +struct size_overflow_hash _002265_hash = {
93223 + .next = NULL,
93224 + .name = "suspend_dtim_interval_read",
93225 + .param = PARAM3,
93226 +};
93227 +
93228 +struct size_overflow_hash _002266_hash = {
93229 + .next = NULL,
93230 + .name = "sys_msgrcv",
93231 + .param = PARAM3,
93232 +};
93233 +
93234 +struct size_overflow_hash _002267_hash = {
93235 + .next = NULL,
93236 + .name = "tcm_loop_change_queue_depth",
93237 + .param = PARAM2,
93238 +};
93239 +
93240 +struct size_overflow_hash _002268_hash = {
93241 + .next = NULL,
93242 + .name = "tcp_copy_to_iovec",
93243 + .param = PARAM3,
93244 +};
93245 +
93246 +struct size_overflow_hash _002269_hash = {
93247 + .next = NULL,
93248 + .name = "tcp_recvmsg",
93249 + .param = PARAM4,
93250 +};
93251 +
93252 +struct size_overflow_hash _002270_hash = {
93253 + .next = NULL,
93254 + .name = "timeout_read",
93255 + .param = PARAM3,
93256 +};
93257 +
93258 +struct size_overflow_hash _002271_hash = {
93259 + .next = NULL,
93260 + .name = "total_ps_buffered_read",
93261 + .param = PARAM3,
93262 +};
93263 +
93264 +struct size_overflow_hash _002272_hash = {
93265 + .next = NULL,
93266 + .name = "tun_put_user",
93267 + .param = PARAM4,
93268 +};
93269 +
93270 +struct size_overflow_hash _002273_hash = {
93271 + .next = NULL,
93272 + .name = "twa_change_queue_depth",
93273 + .param = PARAM2,
93274 +};
93275 +
93276 +struct size_overflow_hash _002274_hash = {
93277 + .next = NULL,
93278 + .name = "tw_change_queue_depth",
93279 + .param = PARAM2,
93280 +};
93281 +
93282 +struct size_overflow_hash _002275_hash = {
93283 + .next = NULL,
93284 + .name = "twl_change_queue_depth",
93285 + .param = PARAM2,
93286 +};
93287 +
93288 +struct size_overflow_hash _002276_hash = {
93289 + .next = NULL,
93290 + .name = "ubi_eba_write_leb",
93291 + .param = PARAM5|PARAM6,
93292 +};
93293 +
93294 +struct size_overflow_hash _002278_hash = {
93295 + .next = NULL,
93296 + .name = "ubi_eba_write_leb_st",
93297 + .param = PARAM5,
93298 +};
93299 +
93300 +struct size_overflow_hash _002279_hash = {
93301 + .next = NULL,
93302 + .name = "udp_recvmsg",
93303 + .param = PARAM4,
93304 +};
93305 +
93306 +struct size_overflow_hash _002280_hash = {
93307 + .next = &_002171_hash,
93308 + .name = "udpv6_recvmsg",
93309 + .param = PARAM4,
93310 +};
93311 +
93312 +struct size_overflow_hash _002281_hash = {
93313 + .next = &_000511_hash,
93314 + .name = "ulong_read_file",
93315 + .param = PARAM3,
93316 +};
93317 +
93318 +struct size_overflow_hash _002282_hash = {
93319 + .next = NULL,
93320 + .name = "unix_dgram_recvmsg",
93321 + .param = PARAM4,
93322 +};
93323 +
93324 +struct size_overflow_hash _002283_hash = {
93325 + .next = NULL,
93326 + .name = "user_power_read",
93327 + .param = PARAM3,
93328 +};
93329 +
93330 +struct size_overflow_hash _002284_hash = {
93331 + .next = NULL,
93332 + .name = "vcc_recvmsg",
93333 + .param = PARAM4,
93334 +};
93335 +
93336 +struct size_overflow_hash _002285_hash = {
93337 + .next = NULL,
93338 + .name = "wep_iv_read",
93339 + .param = PARAM3,
93340 +};
93341 +
93342 +struct size_overflow_hash _002286_hash = {
93343 + .next = NULL,
93344 + .name = "x25_recvmsg",
93345 + .param = PARAM4,
93346 +};
93347 +
93348 +struct size_overflow_hash _002287_hash = {
93349 + .next = NULL,
93350 + .name = "xfs_iext_insert",
93351 + .param = PARAM3,
93352 +};
93353 +
93354 +struct size_overflow_hash _002288_hash = {
93355 + .next = NULL,
93356 + .name = "xfs_iext_remove",
93357 + .param = PARAM3,
93358 +};
93359 +
93360 +struct size_overflow_hash _002289_hash = {
93361 + .next = NULL,
93362 + .name = "xlog_find_verify_log_record",
93363 + .param = PARAM2,
93364 +};
93365 +
93366 +struct size_overflow_hash _002290_hash = {
93367 + .next = NULL,
93368 + .name = "btrfs_alloc_free_block",
93369 + .param = PARAM3,
93370 +};
93371 +
93372 +struct size_overflow_hash _002291_hash = {
93373 + .next = NULL,
93374 + .name = "cx18_read_pos",
93375 + .param = PARAM3,
93376 +};
93377 +
93378 +struct size_overflow_hash _002292_hash = {
93379 + .next = NULL,
93380 + .name = "l2cap_sock_recvmsg",
93381 + .param = PARAM4,
93382 +};
93383 +
93384 +struct size_overflow_hash _002293_hash = {
93385 + .next = NULL,
93386 + .name = "osd_req_list_dev_partitions",
93387 + .param = PARAM4,
93388 +};
93389 +
93390 +struct size_overflow_hash _002294_hash = {
93391 + .next = NULL,
93392 + .name = "osd_req_list_partition_collections",
93393 + .param = PARAM5,
93394 +};
93395 +
93396 +struct size_overflow_hash _002295_hash = {
93397 + .next = NULL,
93398 + .name = "osst_do_scsi",
93399 + .param = PARAM4,
93400 +};
93401 +
93402 +struct size_overflow_hash _002296_hash = {
93403 + .next = NULL,
93404 + .name = "qla2x00_handle_queue_full",
93405 + .param = PARAM2,
93406 +};
93407 +
93408 +struct size_overflow_hash _002297_hash = {
93409 + .next = NULL,
93410 + .name = "rfcomm_sock_recvmsg",
93411 + .param = PARAM4,
93412 +};
93413 +
93414 +struct size_overflow_hash _002298_hash = {
93415 + .next = NULL,
93416 + .name = "scsi_execute_req",
93417 + .param = PARAM5,
93418 +};
93419 +
93420 +struct size_overflow_hash _002299_hash = {
93421 + .next = NULL,
93422 + .name = "_scsih_change_queue_depth",
93423 + .param = PARAM2,
93424 +};
93425 +
93426 +struct size_overflow_hash _002300_hash = {
93427 + .next = NULL,
93428 + .name = "spi_execute",
93429 + .param = PARAM5,
93430 +};
93431 +
93432 +struct size_overflow_hash _002301_hash = {
93433 + .next = NULL,
93434 + .name = "submit_inquiry",
93435 + .param = PARAM3,
93436 +};
93437 +
93438 +struct size_overflow_hash _002302_hash = {
93439 + .next = NULL,
93440 + .name = "tcp_dma_try_early_copy",
93441 + .param = PARAM3,
93442 +};
93443 +
93444 +struct size_overflow_hash _002303_hash = {
93445 + .next = NULL,
93446 + .name = "tun_do_read",
93447 + .param = PARAM4,
93448 +};
93449 +
93450 +struct size_overflow_hash _002304_hash = {
93451 + .next = NULL,
93452 + .name = "ubi_eba_atomic_leb_change",
93453 + .param = PARAM5,
93454 +};
93455 +
93456 +struct size_overflow_hash _002305_hash = {
93457 + .next = NULL,
93458 + .name = "ubi_leb_write",
93459 + .param = PARAM4|PARAM5,
93460 +};
93461 +
93462 +struct size_overflow_hash _002307_hash = {
93463 + .next = NULL,
93464 + .name = "unix_seqpacket_recvmsg",
93465 + .param = PARAM4,
93466 +};
93467 +
93468 +struct size_overflow_hash _002308_hash = {
93469 + .next = NULL,
93470 + .name = "write_leb",
93471 + .param = PARAM5,
93472 +};
93473 +
93474 +struct size_overflow_hash _002309_hash = {
93475 + .next = NULL,
93476 + .name = "ch_do_scsi",
93477 + .param = PARAM4,
93478 +};
93479 +
93480 +struct size_overflow_hash _002310_hash = {
93481 + .next = NULL,
93482 + .name = "dbg_leb_write",
93483 + .param = PARAM4|PARAM5,
93484 +};
93485 +
93486 +struct size_overflow_hash _002312_hash = {
93487 + .next = NULL,
93488 + .name = "scsi_mode_sense",
93489 + .param = PARAM5,
93490 +};
93491 +
93492 +struct size_overflow_hash _002313_hash = {
93493 + .next = NULL,
93494 + .name = "scsi_vpd_inquiry",
93495 + .param = PARAM4,
93496 +};
93497 +
93498 +struct size_overflow_hash _002314_hash = {
93499 + .next = &_000673_hash,
93500 + .name = "ses_recv_diag",
93501 + .param = PARAM4,
93502 +};
93503 +
93504 +struct size_overflow_hash _002315_hash = {
93505 + .next = NULL,
93506 + .name = "ses_send_diag",
93507 + .param = PARAM4,
93508 +};
93509 +
93510 +struct size_overflow_hash _002316_hash = {
93511 + .next = NULL,
93512 + .name = "spi_dv_device_echo_buffer",
93513 + .param = PARAM2|PARAM3,
93514 +};
93515 +
93516 +struct size_overflow_hash _002318_hash = {
93517 + .next = NULL,
93518 + .name = "ubifs_leb_write",
93519 + .param = PARAM4|PARAM5,
93520 +};
93521 +
93522 +struct size_overflow_hash _002320_hash = {
93523 + .next = NULL,
93524 + .name = "ubi_leb_change",
93525 + .param = PARAM4,
93526 +};
93527 +
93528 +struct size_overflow_hash _002321_hash = {
93529 + .next = NULL,
93530 + .name = "ubi_write",
93531 + .param = PARAM4|PARAM5,
93532 +};
93533 +
93534 +struct size_overflow_hash _002322_hash = {
93535 + .next = NULL,
93536 + .name = "dbg_leb_change",
93537 + .param = PARAM4,
93538 +};
93539 +
93540 +struct size_overflow_hash _002323_hash = {
93541 + .next = NULL,
93542 + .name = "gluebi_write",
93543 + .param = PARAM3,
93544 +};
93545 +
93546 +struct size_overflow_hash _002324_hash = {
93547 + .next = NULL,
93548 + .name = "scsi_get_vpd_page",
93549 + .param = PARAM4,
93550 +};
93551 +
93552 +struct size_overflow_hash _002325_hash = {
93553 + .next = NULL,
93554 + .name = "sd_do_mode_sense",
93555 + .param = PARAM5,
93556 +};
93557 +
93558 +struct size_overflow_hash _002326_hash = {
93559 + .next = NULL,
93560 + .name = "ubifs_leb_change",
93561 + .param = PARAM4,
93562 +};
93563 +
93564 +struct size_overflow_hash _002327_hash = {
93565 + .next = NULL,
93566 + .name = "ubifs_write_node",
93567 + .param = PARAM5,
93568 +};
93569 +
93570 +struct size_overflow_hash _002328_hash = {
93571 + .next = NULL,
93572 + .name = "fixup_leb",
93573 + .param = PARAM3,
93574 +};
93575 +
93576 +struct size_overflow_hash _002329_hash = {
93577 + .next = NULL,
93578 + .name = "recover_head",
93579 + .param = PARAM3,
93580 +};
93581 +
93582 +struct size_overflow_hash _002330_hash = {
93583 + .next = NULL,
93584 + .name = "alloc_cpu_rmap",
93585 + .param = PARAM1,
93586 +};
93587 +
93588 +struct size_overflow_hash _002331_hash = {
93589 + .next = NULL,
93590 + .name = "alloc_ebda_hpc",
93591 + .param = PARAM1|PARAM2,
93592 +};
93593 +
93594 +struct size_overflow_hash _002333_hash = {
93595 + .next = NULL,
93596 + .name = "alloc_sched_domains",
93597 + .param = PARAM1,
93598 +};
93599 +
93600 +struct size_overflow_hash _002334_hash = {
93601 + .next = NULL,
93602 + .name = "amthi_read",
93603 + .param = PARAM4,
93604 +};
93605 +
93606 +struct size_overflow_hash _002335_hash = {
93607 + .next = NULL,
93608 + .name = "bcm_char_read",
93609 + .param = PARAM3,
93610 +};
93611 +
93612 +struct size_overflow_hash _002336_hash = {
93613 + .next = NULL,
93614 + .name = "BcmCopySection",
93615 + .param = PARAM5,
93616 +};
93617 +
93618 +struct size_overflow_hash _002337_hash = {
93619 + .next = NULL,
93620 + .name = "buffer_from_user",
93621 + .param = PARAM3,
93622 +};
93623 +
93624 +struct size_overflow_hash _002338_hash = {
93625 + .next = NULL,
93626 + .name = "buffer_to_user",
93627 + .param = PARAM3,
93628 +};
93629 +
93630 +struct size_overflow_hash _002339_hash = {
93631 + .next = NULL,
93632 + .name = "c4iw_init_resource_fifo",
93633 + .param = PARAM3,
93634 +};
93635 +
93636 +struct size_overflow_hash _002340_hash = {
93637 + .next = NULL,
93638 + .name = "c4iw_init_resource_fifo_random",
93639 + .param = PARAM3,
93640 +};
93641 +
93642 +struct size_overflow_hash _002341_hash = {
93643 + .next = NULL,
93644 + .name = "card_send_command",
93645 + .param = PARAM3,
93646 +};
93647 +
93648 +struct size_overflow_hash _002342_hash = {
93649 + .next = NULL,
93650 + .name = "chd_dec_fetch_cdata",
93651 + .param = PARAM3,
93652 +};
93653 +
93654 +struct size_overflow_hash _002343_hash = {
93655 + .next = NULL,
93656 + .name = "crystalhd_create_dio_pool",
93657 + .param = PARAM2,
93658 +};
93659 +
93660 +struct size_overflow_hash _002344_hash = {
93661 + .next = NULL,
93662 + .name = "crystalhd_user_data",
93663 + .param = PARAM3,
93664 +};
93665 +
93666 +struct size_overflow_hash _002345_hash = {
93667 + .next = NULL,
93668 + .name = "cxio_init_resource_fifo",
93669 + .param = PARAM3,
93670 +};
93671 +
93672 +struct size_overflow_hash _002346_hash = {
93673 + .next = NULL,
93674 + .name = "cxio_init_resource_fifo_random",
93675 + .param = PARAM3,
93676 +};
93677 +
93678 +struct size_overflow_hash _002347_hash = {
93679 + .next = NULL,
93680 + .name = "do_pages_stat",
93681 + .param = PARAM2,
93682 +};
93683 +
93684 +struct size_overflow_hash _002348_hash = {
93685 + .next = NULL,
93686 + .name = "do_read_log_to_user",
93687 + .param = PARAM4,
93688 +};
93689 +
93690 +struct size_overflow_hash _002349_hash = {
93691 + .next = NULL,
93692 + .name = "do_write_log_from_user",
93693 + .param = PARAM3,
93694 +};
93695 +
93696 +struct size_overflow_hash _002350_hash = {
93697 + .next = NULL,
93698 + .name = "dt3155_read",
93699 + .param = PARAM3,
93700 +};
93701 +
93702 +struct size_overflow_hash _002351_hash = {
93703 + .next = NULL,
93704 + .name = "easycap_alsa_vmalloc",
93705 + .param = PARAM2,
93706 +};
93707 +
93708 +struct size_overflow_hash _002352_hash = {
93709 + .next = NULL,
93710 + .name = "evm_read_key",
93711 + .param = PARAM3,
93712 +};
93713 +
93714 +struct size_overflow_hash _002353_hash = {
93715 + .next = NULL,
93716 + .name = "evm_write_key",
93717 + .param = PARAM3,
93718 +};
93719 +
93720 +struct size_overflow_hash _002354_hash = {
93721 + .next = NULL,
93722 + .name = "fir16_create",
93723 + .param = PARAM3,
93724 +};
93725 +
93726 +struct size_overflow_hash _002355_hash = {
93727 + .next = NULL,
93728 + .name = "iio_allocate_device",
93729 + .param = PARAM1,
93730 +};
93731 +
93732 +struct size_overflow_hash _002356_hash = {
93733 + .next = NULL,
93734 + .name = "__iio_allocate_kfifo",
93735 + .param = PARAM2|PARAM3,
93736 +};
93737 +
93738 +struct size_overflow_hash _002358_hash = {
93739 + .next = NULL,
93740 + .name = "__iio_allocate_sw_ring_buffer",
93741 + .param = PARAM3,
93742 +};
93743 +
93744 +struct size_overflow_hash _002359_hash = {
93745 + .next = NULL,
93746 + .name = "iio_debugfs_read_reg",
93747 + .param = PARAM3,
93748 +};
93749 +
93750 +struct size_overflow_hash _002360_hash = {
93751 + .next = NULL,
93752 + .name = "iio_debugfs_write_reg",
93753 + .param = PARAM3,
93754 +};
93755 +
93756 +struct size_overflow_hash _002361_hash = {
93757 + .next = NULL,
93758 + .name = "iio_event_chrdev_read",
93759 + .param = PARAM3,
93760 +};
93761 +
93762 +struct size_overflow_hash _002362_hash = {
93763 + .next = NULL,
93764 + .name = "iio_read_first_n_kfifo",
93765 + .param = PARAM2,
93766 +};
93767 +
93768 +struct size_overflow_hash _002363_hash = {
93769 + .next = NULL,
93770 + .name = "iio_read_first_n_sw_rb",
93771 + .param = PARAM2,
93772 +};
93773 +
93774 +struct size_overflow_hash _002364_hash = {
93775 + .next = NULL,
93776 + .name = "ioapic_setup_resources",
93777 + .param = PARAM1,
93778 +};
93779 +
93780 +struct size_overflow_hash _002365_hash = {
93781 + .next = NULL,
93782 + .name = "keymap_store",
93783 + .param = PARAM4,
93784 +};
93785 +
93786 +struct size_overflow_hash _002366_hash = {
93787 + .next = NULL,
93788 + .name = "kzalloc_node",
93789 + .param = PARAM1,
93790 +};
93791 +
93792 +struct size_overflow_hash _002367_hash = {
93793 + .next = NULL,
93794 + .name = "line6_alloc_sysex_buffer",
93795 + .param = PARAM4,
93796 +};
93797 +
93798 +struct size_overflow_hash _002368_hash = {
93799 + .next = NULL,
93800 + .name = "line6_dumpreq_initbuf",
93801 + .param = PARAM3,
93802 +};
93803 +
93804 +struct size_overflow_hash _002369_hash = {
93805 + .next = NULL,
93806 + .name = "line6_midibuf_init",
93807 + .param = PARAM2,
93808 +};
93809 +
93810 +struct size_overflow_hash _002370_hash = {
93811 + .next = NULL,
93812 + .name = "lirc_write",
93813 + .param = PARAM3,
93814 +};
93815 +
93816 +struct size_overflow_hash _002371_hash = {
93817 + .next = NULL,
93818 + .name = "_malloc",
93819 + .param = PARAM1,
93820 +};
93821 +
93822 +struct size_overflow_hash _002372_hash = {
93823 + .next = NULL,
93824 + .name = "mei_read",
93825 + .param = PARAM3,
93826 +};
93827 +
93828 +struct size_overflow_hash _002373_hash = {
93829 + .next = NULL,
93830 + .name = "mei_write",
93831 + .param = PARAM3,
93832 +};
93833 +
93834 +struct size_overflow_hash _002374_hash = {
93835 + .next = NULL,
93836 + .name = "mempool_create_node",
93837 + .param = PARAM1,
93838 +};
93839 +
93840 +struct size_overflow_hash _002375_hash = {
93841 + .next = NULL,
93842 + .name = "msg_set",
93843 + .param = PARAM3,
93844 +};
93845 +
93846 +struct size_overflow_hash _002376_hash = {
93847 + .next = NULL,
93848 + .name = "newpart",
93849 + .param = PARAM6,
93850 +};
93851 +
93852 +struct size_overflow_hash _002377_hash = {
93853 + .next = NULL,
93854 + .name = "OS_kmalloc",
93855 + .param = PARAM1,
93856 +};
93857 +
93858 +struct size_overflow_hash _002378_hash = {
93859 + .next = NULL,
93860 + .name = "pcpu_alloc_bootmem",
93861 + .param = PARAM2,
93862 +};
93863 +
93864 +struct size_overflow_hash _002379_hash = {
93865 + .next = NULL,
93866 + .name = "pcpu_get_vm_areas",
93867 + .param = PARAM3,
93868 +};
93869 +
93870 +struct size_overflow_hash _002380_hash = {
93871 + .next = NULL,
93872 + .name = "resource_from_user",
93873 + .param = PARAM3,
93874 +};
93875 +
93876 +struct size_overflow_hash _002381_hash = {
93877 + .next = NULL,
93878 + .name = "sca3000_read_data",
93879 + .param = PARAM4,
93880 +};
93881 +
93882 +struct size_overflow_hash _002382_hash = {
93883 + .next = NULL,
93884 + .name = "sca3000_read_first_n_hw_rb",
93885 + .param = PARAM2,
93886 +};
93887 +
93888 +struct size_overflow_hash _002383_hash = {
93889 + .next = NULL,
93890 + .name = "send_midi_async",
93891 + .param = PARAM3,
93892 +};
93893 +
93894 +struct size_overflow_hash _002384_hash = {
93895 + .next = NULL,
93896 + .name = "sep_create_dcb_dmatables_context",
93897 + .param = PARAM6,
93898 +};
93899 +
93900 +struct size_overflow_hash _002385_hash = {
93901 + .next = NULL,
93902 + .name = "sep_create_dcb_dmatables_context_kernel",
93903 + .param = PARAM6,
93904 +};
93905 +
93906 +struct size_overflow_hash _002386_hash = {
93907 + .next = NULL,
93908 + .name = "sep_create_msgarea_context",
93909 + .param = PARAM4,
93910 +};
93911 +
93912 +struct size_overflow_hash _002387_hash = {
93913 + .next = NULL,
93914 + .name = "sep_lli_table_secure_dma",
93915 + .param = PARAM2|PARAM3,
93916 +};
93917 +
93918 +struct size_overflow_hash _002389_hash = {
93919 + .next = &_002154_hash,
93920 + .name = "sep_lock_user_pages",
93921 + .param = PARAM2|PARAM3,
93922 +};
93923 +
93924 +struct size_overflow_hash _002391_hash = {
93925 + .next = NULL,
93926 + .name = "sep_prepare_input_output_dma_table_in_dcb",
93927 + .param = PARAM4|PARAM5,
93928 +};
93929 +
93930 +struct size_overflow_hash _002393_hash = {
93931 + .next = NULL,
93932 + .name = "sep_read",
93933 + .param = PARAM3,
93934 +};
93935 +
93936 +struct size_overflow_hash _002394_hash = {
93937 + .next = NULL,
93938 + .name = "TransmitTcb",
93939 + .param = PARAM4,
93940 +};
93941 +
93942 +struct size_overflow_hash _002395_hash = {
93943 + .next = NULL,
93944 + .name = "ValidateDSDParamsChecksum",
93945 + .param = PARAM3,
93946 +};
93947 +
93948 +struct size_overflow_hash _002396_hash = {
93949 + .next = NULL,
93950 + .name = "Wb35Reg_BurstWrite",
93951 + .param = PARAM4,
93952 +};
93953 +
93954 +struct size_overflow_hash _002397_hash = {
93955 + .next = &_001499_hash,
93956 + .name = "__alloc_bootmem_low_node",
93957 + .param = PARAM2,
93958 +};
93959 +
93960 +struct size_overflow_hash _002398_hash = {
93961 + .next = NULL,
93962 + .name = "__alloc_bootmem_node",
93963 + .param = PARAM2,
93964 +};
93965 +
93966 +struct size_overflow_hash _002399_hash = {
93967 + .next = NULL,
93968 + .name = "alloc_irq_cpu_rmap",
93969 + .param = PARAM1,
93970 +};
93971 +
93972 +struct size_overflow_hash _002400_hash = {
93973 + .next = NULL,
93974 + .name = "alloc_ring",
93975 + .param = PARAM2|PARAM4,
93976 +};
93977 +
93978 +struct size_overflow_hash _002402_hash = {
93979 + .next = NULL,
93980 + .name = "c4iw_init_resource",
93981 + .param = PARAM2|PARAM3,
93982 +};
93983 +
93984 +struct size_overflow_hash _002404_hash = {
93985 + .next = &_000284_hash,
93986 + .name = "cxio_hal_init_resource",
93987 + .param = PARAM2|PARAM7|PARAM6,
93988 +};
93989 +
93990 +struct size_overflow_hash _002407_hash = {
93991 + .next = NULL,
93992 + .name = "cxio_hal_init_rhdl_resource",
93993 + .param = PARAM1,
93994 +};
93995 +
93996 +struct size_overflow_hash _002408_hash = {
93997 + .next = NULL,
93998 + .name = "disk_expand_part_tbl",
93999 + .param = PARAM2,
94000 +};
94001 +
94002 +struct size_overflow_hash _002409_hash = {
94003 + .next = NULL,
94004 + .name = "InterfaceTransmitPacket",
94005 + .param = PARAM3,
94006 +};
94007 +
94008 +struct size_overflow_hash _002410_hash = {
94009 + .next = NULL,
94010 + .name = "line6_dumpreq_init",
94011 + .param = PARAM3,
94012 +};
94013 +
94014 +struct size_overflow_hash _002411_hash = {
94015 + .next = NULL,
94016 + .name = "mempool_create",
94017 + .param = PARAM1,
94018 +};
94019 +
94020 +struct size_overflow_hash _002412_hash = {
94021 + .next = NULL,
94022 + .name = "pcpu_fc_alloc",
94023 + .param = PARAM2,
94024 +};
94025 +
94026 +struct size_overflow_hash _002413_hash = {
94027 + .next = NULL,
94028 + .name = "pod_alloc_sysex_buffer",
94029 + .param = PARAM3,
94030 +};
94031 +
94032 +struct size_overflow_hash _002414_hash = {
94033 + .next = NULL,
94034 + .name = "r8712_usbctrl_vendorreq",
94035 + .param = PARAM6,
94036 +};
94037 +
94038 +struct size_overflow_hash _002415_hash = {
94039 + .next = NULL,
94040 + .name = "r871x_set_wpa_ie",
94041 + .param = PARAM3,
94042 +};
94043 +
94044 +struct size_overflow_hash _002416_hash = {
94045 + .next = NULL,
94046 + .name = "sys_move_pages",
94047 + .param = PARAM2,
94048 +};
94049 +
94050 +struct size_overflow_hash _002417_hash = {
94051 + .next = NULL,
94052 + .name = "variax_alloc_sysex_buffer",
94053 + .param = PARAM3,
94054 +};
94055 +
94056 +struct size_overflow_hash _002418_hash = {
94057 + .next = NULL,
94058 + .name = "vme_user_write",
94059 + .param = PARAM3,
94060 +};
94061 +
94062 +struct size_overflow_hash _002419_hash = {
94063 + .next = NULL,
94064 + .name = "add_partition",
94065 + .param = PARAM2,
94066 +};
94067 +
94068 +struct size_overflow_hash _002420_hash = {
94069 + .next = NULL,
94070 + .name = "__alloc_bootmem_node_high",
94071 + .param = PARAM2,
94072 +};
94073 +
94074 +struct size_overflow_hash _002421_hash = {
94075 + .next = NULL,
94076 + .name = "ceph_msgpool_init",
94077 + .param = PARAM3,
94078 +};
94079 +
94080 +struct size_overflow_hash _002423_hash = {
94081 + .next = NULL,
94082 + .name = "mempool_create_kmalloc_pool",
94083 + .param = PARAM1,
94084 +};
94085 +
94086 +struct size_overflow_hash _002424_hash = {
94087 + .next = NULL,
94088 + .name = "mempool_create_page_pool",
94089 + .param = PARAM1,
94090 +};
94091 +
94092 +struct size_overflow_hash _002425_hash = {
94093 + .next = NULL,
94094 + .name = "mempool_create_slab_pool",
94095 + .param = PARAM1,
94096 +};
94097 +
94098 +struct size_overflow_hash _002426_hash = {
94099 + .next = NULL,
94100 + .name = "variax_set_raw2",
94101 + .param = PARAM4,
94102 +};
94103 +
94104 +struct size_overflow_hash _002427_hash = {
94105 + .next = NULL,
94106 + .name = "bioset_create",
94107 + .param = PARAM1,
94108 +};
94109 +
94110 +struct size_overflow_hash _002428_hash = {
94111 + .next = NULL,
94112 + .name = "bioset_integrity_create",
94113 + .param = PARAM2,
94114 +};
94115 +
94116 +struct size_overflow_hash _002429_hash = {
94117 + .next = NULL,
94118 + .name = "biovec_create_pools",
94119 + .param = PARAM2,
94120 +};
94121 +
94122 +struct size_overflow_hash _002430_hash = {
94123 + .next = NULL,
94124 + .name = "i2o_pool_alloc",
94125 + .param = PARAM4,
94126 +};
94127 +
94128 +struct size_overflow_hash _002431_hash = {
94129 + .next = NULL,
94130 + .name = "prison_create",
94131 + .param = PARAM1,
94132 +};
94133 +
94134 +struct size_overflow_hash _002432_hash = {
94135 + .next = NULL,
94136 + .name = "unlink_simple",
94137 + .param = PARAM3,
94138 +};
94139 +
94140 +struct size_overflow_hash _002433_hash = {
94141 + .next = NULL,
94142 + .name = "alloc_ieee80211",
94143 + .param = PARAM1,
94144 +};
94145 +
94146 +struct size_overflow_hash _002434_hash = {
94147 + .next = NULL,
94148 + .name = "alloc_ieee80211_rsl",
94149 + .param = PARAM1,
94150 +};
94151 +
94152 +struct size_overflow_hash _002435_hash = {
94153 + .next = NULL,
94154 + .name = "alloc_page_cgroup",
94155 + .param = PARAM1,
94156 +};
94157 +
94158 +struct size_overflow_hash _002436_hash = {
94159 + .next = &_002326_hash,
94160 + .name = "alloc_private",
94161 + .param = PARAM2,
94162 +};
94163 +
94164 +struct size_overflow_hash _002437_hash = {
94165 + .next = NULL,
94166 + .name = "alloc_rtllib",
94167 + .param = PARAM1,
94168 +};
94169 +
94170 +struct size_overflow_hash _002438_hash = {
94171 + .next = NULL,
94172 + .name = "alloc_rx_desc_ring",
94173 + .param = PARAM2,
94174 +};
94175 +
94176 +struct size_overflow_hash _002439_hash = {
94177 + .next = NULL,
94178 + .name = "alloc_subdevices",
94179 + .param = PARAM2,
94180 +};
94181 +
94182 +struct size_overflow_hash _002440_hash = {
94183 + .next = NULL,
94184 + .name = "atomic_counters_read",
94185 + .param = PARAM3,
94186 +};
94187 +
94188 +struct size_overflow_hash _002441_hash = {
94189 + .next = NULL,
94190 + .name = "atomic_stats_read",
94191 + .param = PARAM3,
94192 +};
94193 +
94194 +struct size_overflow_hash _002442_hash = {
94195 + .next = NULL,
94196 + .name = "capabilities_read",
94197 + .param = PARAM3,
94198 +};
94199 +
94200 +struct size_overflow_hash _002443_hash = {
94201 + .next = NULL,
94202 + .name = "comedi_read",
94203 + .param = PARAM3,
94204 +};
94205 +
94206 +struct size_overflow_hash _002444_hash = {
94207 + .next = NULL,
94208 + .name = "comedi_write",
94209 + .param = PARAM3,
94210 +};
94211 +
94212 +struct size_overflow_hash _002445_hash = {
94213 + .next = NULL,
94214 + .name = "compat_do_arpt_set_ctl",
94215 + .param = PARAM4,
94216 +};
94217 +
94218 +struct size_overflow_hash _002446_hash = {
94219 + .next = NULL,
94220 + .name = "compat_do_ip6t_set_ctl",
94221 + .param = PARAM4,
94222 +};
94223 +
94224 +struct size_overflow_hash _002447_hash = {
94225 + .next = &_001852_hash,
94226 + .name = "compat_do_ipt_set_ctl",
94227 + .param = PARAM4,
94228 +};
94229 +
94230 +struct size_overflow_hash _002448_hash = {
94231 + .next = NULL,
94232 + .name = "compat_filldir",
94233 + .param = PARAM3,
94234 +};
94235 +
94236 +struct size_overflow_hash _002449_hash = {
94237 + .next = NULL,
94238 + .name = "compat_filldir64",
94239 + .param = PARAM3,
94240 +};
94241 +
94242 +struct size_overflow_hash _002450_hash = {
94243 + .next = NULL,
94244 + .name = "compat_fillonedir",
94245 + .param = PARAM3,
94246 +};
94247 +
94248 +struct size_overflow_hash _002451_hash = {
94249 + .next = NULL,
94250 + .name = "compat_rw_copy_check_uvector",
94251 + .param = PARAM3,
94252 +};
94253 +
94254 +struct size_overflow_hash _002452_hash = {
94255 + .next = NULL,
94256 + .name = "compat_sock_setsockopt",
94257 + .param = PARAM5,
94258 +};
94259 +
94260 +struct size_overflow_hash _002453_hash = {
94261 + .next = NULL,
94262 + .name = "compat_sys_kexec_load",
94263 + .param = PARAM2,
94264 +};
94265 +
94266 +struct size_overflow_hash _002454_hash = {
94267 + .next = NULL,
94268 + .name = "compat_sys_keyctl",
94269 + .param = PARAM4,
94270 +};
94271 +
94272 +struct size_overflow_hash _002455_hash = {
94273 + .next = NULL,
94274 + .name = "compat_sys_move_pages",
94275 + .param = PARAM2,
94276 +};
94277 +
94278 +struct size_overflow_hash _002456_hash = {
94279 + .next = NULL,
94280 + .name = "compat_sys_mq_timedsend",
94281 + .param = PARAM3,
94282 +};
94283 +
94284 +struct size_overflow_hash _002457_hash = {
94285 + .next = NULL,
94286 + .name = "compat_sys_msgrcv",
94287 + .param = PARAM2,
94288 +};
94289 +
94290 +struct size_overflow_hash _002458_hash = {
94291 + .next = NULL,
94292 + .name = "compat_sys_msgsnd",
94293 + .param = PARAM2,
94294 +};
94295 +
94296 +struct size_overflow_hash _002459_hash = {
94297 + .next = NULL,
94298 + .name = "compat_sys_semtimedop",
94299 + .param = PARAM3,
94300 +};
94301 +
94302 +struct size_overflow_hash _002460_hash = {
94303 + .next = NULL,
94304 + .name = "__copy_in_user",
94305 + .param = PARAM3,
94306 +};
94307 +
94308 +struct size_overflow_hash _002461_hash = {
94309 + .next = NULL,
94310 + .name = "copy_in_user",
94311 + .param = PARAM3,
94312 +};
94313 +
94314 +struct size_overflow_hash _002462_hash = {
94315 + .next = NULL,
94316 + .name = "dev_counters_read",
94317 + .param = PARAM3,
94318 +};
94319 +
94320 +struct size_overflow_hash _002463_hash = {
94321 + .next = NULL,
94322 + .name = "dev_names_read",
94323 + .param = PARAM3,
94324 +};
94325 +
94326 +struct size_overflow_hash _002464_hash = {
94327 + .next = NULL,
94328 + .name = "do_arpt_set_ctl",
94329 + .param = PARAM4,
94330 +};
94331 +
94332 +struct size_overflow_hash _002465_hash = {
94333 + .next = NULL,
94334 + .name = "do_ip6t_set_ctl",
94335 + .param = PARAM4,
94336 +};
94337 +
94338 +struct size_overflow_hash _002466_hash = {
94339 + .next = NULL,
94340 + .name = "do_ipt_set_ctl",
94341 + .param = PARAM4,
94342 +};
94343 +
94344 +struct size_overflow_hash _002467_hash = {
94345 + .next = NULL,
94346 + .name = "drbd_bm_resize",
94347 + .param = PARAM2,
94348 +};
94349 +
94350 +struct size_overflow_hash _002468_hash = {
94351 + .next = NULL,
94352 + .name = "driver_names_read",
94353 + .param = PARAM3,
94354 +};
94355 +
94356 +struct size_overflow_hash _002469_hash = {
94357 + .next = NULL,
94358 + .name = "driver_stats_read",
94359 + .param = PARAM3,
94360 +};
94361 +
94362 +struct size_overflow_hash _002470_hash = {
94363 + .next = NULL,
94364 + .name = "__earlyonly_bootmem_alloc",
94365 + .param = PARAM2,
94366 +};
94367 +
94368 +struct size_overflow_hash _002471_hash = {
94369 + .next = NULL,
94370 + .name = "evtchn_read",
94371 + .param = PARAM3,
94372 +};
94373 +
94374 +struct size_overflow_hash _002472_hash = {
94375 + .next = NULL,
94376 + .name = "ext_sd_execute_read_data",
94377 + .param = PARAM9,
94378 +};
94379 +
94380 +struct size_overflow_hash _002473_hash = {
94381 + .next = NULL,
94382 + .name = "ext_sd_execute_write_data",
94383 + .param = PARAM9,
94384 +};
94385 +
94386 +struct size_overflow_hash _002474_hash = {
94387 + .next = NULL,
94388 + .name = "fat_compat_ioctl_filldir",
94389 + .param = PARAM3,
94390 +};
94391 +
94392 +struct size_overflow_hash _002475_hash = {
94393 + .next = NULL,
94394 + .name = "firmwareUpload",
94395 + .param = PARAM3,
94396 +};
94397 +
94398 +struct size_overflow_hash _002476_hash = {
94399 + .next = NULL,
94400 + .name = "flash_read",
94401 + .param = PARAM3,
94402 +};
94403 +
94404 +struct size_overflow_hash _002477_hash = {
94405 + .next = NULL,
94406 + .name = "flash_write",
94407 + .param = PARAM3,
94408 +};
94409 +
94410 +struct size_overflow_hash _002478_hash = {
94411 + .next = NULL,
94412 + .name = "gather_array",
94413 + .param = PARAM3,
94414 +};
94415 +
94416 +struct size_overflow_hash _002479_hash = {
94417 + .next = NULL,
94418 + .name = "ghash_async_setkey",
94419 + .param = PARAM3,
94420 +};
94421 +
94422 +struct size_overflow_hash _002480_hash = {
94423 + .next = NULL,
94424 + .name = "gntdev_alloc_map",
94425 + .param = PARAM2,
94426 +};
94427 +
94428 +struct size_overflow_hash _002481_hash = {
94429 + .next = NULL,
94430 + .name = "gnttab_map",
94431 + .param = PARAM2,
94432 +};
94433 +
94434 +struct size_overflow_hash _002482_hash = {
94435 + .next = NULL,
94436 + .name = "gru_alloc_gts",
94437 + .param = PARAM2|PARAM3,
94438 +};
94439 +
94440 +struct size_overflow_hash _002484_hash = {
94441 + .next = NULL,
94442 + .name = "handle_eviocgbit",
94443 + .param = PARAM3,
94444 +};
94445 +
94446 +struct size_overflow_hash _002485_hash = {
94447 + .next = NULL,
94448 + .name = "hid_parse_report",
94449 + .param = PARAM3,
94450 +};
94451 +
94452 +struct size_overflow_hash _002486_hash = {
94453 + .next = NULL,
94454 + .name = "ieee80211_alloc_txb",
94455 + .param = PARAM1,
94456 +};
94457 +
94458 +struct size_overflow_hash _002487_hash = {
94459 + .next = NULL,
94460 + .name = "ieee80211_wx_set_gen_ie",
94461 + .param = PARAM3,
94462 +};
94463 +
94464 +struct size_overflow_hash _002488_hash = {
94465 + .next = NULL,
94466 + .name = "ieee80211_wx_set_gen_ie_rsl",
94467 + .param = PARAM3,
94468 +};
94469 +
94470 +struct size_overflow_hash _002489_hash = {
94471 + .next = NULL,
94472 + .name = "init_cdev",
94473 + .param = PARAM1,
94474 +};
94475 +
94476 +struct size_overflow_hash _002490_hash = {
94477 + .next = NULL,
94478 + .name = "init_per_cpu",
94479 + .param = PARAM1,
94480 +};
94481 +
94482 +struct size_overflow_hash _002491_hash = {
94483 + .next = NULL,
94484 + .name = "ipath_create_cq",
94485 + .param = PARAM2,
94486 +};
94487 +
94488 +struct size_overflow_hash _002492_hash = {
94489 + .next = NULL,
94490 + .name = "ipath_get_base_info",
94491 + .param = PARAM3,
94492 +};
94493 +
94494 +struct size_overflow_hash _002493_hash = {
94495 + .next = NULL,
94496 + .name = "ipath_init_qp_table",
94497 + .param = PARAM2,
94498 +};
94499 +
94500 +struct size_overflow_hash _002494_hash = {
94501 + .next = NULL,
94502 + .name = "ipath_resize_cq",
94503 + .param = PARAM2,
94504 +};
94505 +
94506 +struct size_overflow_hash _002495_hash = {
94507 + .next = NULL,
94508 + .name = "ni_gpct_device_construct",
94509 + .param = PARAM5,
94510 +};
94511 +
94512 +struct size_overflow_hash _002496_hash = {
94513 + .next = NULL,
94514 + .name = "options_write",
94515 + .param = PARAM3,
94516 +};
94517 +
94518 +struct size_overflow_hash _002497_hash = {
94519 + .next = NULL,
94520 + .name = "portcntrs_1_read",
94521 + .param = PARAM3,
94522 +};
94523 +
94524 +struct size_overflow_hash _002498_hash = {
94525 + .next = NULL,
94526 + .name = "portcntrs_2_read",
94527 + .param = PARAM3,
94528 +};
94529 +
94530 +struct size_overflow_hash _002499_hash = {
94531 + .next = NULL,
94532 + .name = "portnames_read",
94533 + .param = PARAM3,
94534 +};
94535 +
94536 +struct size_overflow_hash _002500_hash = {
94537 + .next = NULL,
94538 + .name = "ptc_proc_write",
94539 + .param = PARAM3,
94540 +};
94541 +
94542 +struct size_overflow_hash _002501_hash = {
94543 + .next = NULL,
94544 + .name = "put_cmsg_compat",
94545 + .param = PARAM4,
94546 +};
94547 +
94548 +struct size_overflow_hash _002502_hash = {
94549 + .next = NULL,
94550 + .name = "qib_alloc_devdata",
94551 + .param = PARAM2,
94552 +};
94553 +
94554 +struct size_overflow_hash _002503_hash = {
94555 + .next = NULL,
94556 + .name = "qib_alloc_fast_reg_page_list",
94557 + .param = PARAM2,
94558 +};
94559 +
94560 +struct size_overflow_hash _002504_hash = {
94561 + .next = NULL,
94562 + .name = "qib_cdev_init",
94563 + .param = PARAM1,
94564 +};
94565 +
94566 +struct size_overflow_hash _002505_hash = {
94567 + .next = NULL,
94568 + .name = "qib_create_cq",
94569 + .param = PARAM2,
94570 +};
94571 +
94572 +struct size_overflow_hash _002506_hash = {
94573 + .next = NULL,
94574 + .name = "qib_diag_write",
94575 + .param = PARAM3,
94576 +};
94577 +
94578 +struct size_overflow_hash _002507_hash = {
94579 + .next = NULL,
94580 + .name = "qib_get_base_info",
94581 + .param = PARAM3,
94582 +};
94583 +
94584 +struct size_overflow_hash _002508_hash = {
94585 + .next = NULL,
94586 + .name = "qib_resize_cq",
94587 + .param = PARAM2,
94588 +};
94589 +
94590 +struct size_overflow_hash _002509_hash = {
94591 + .next = NULL,
94592 + .name = "qsfp_1_read",
94593 + .param = PARAM3,
94594 +};
94595 +
94596 +struct size_overflow_hash _002510_hash = {
94597 + .next = NULL,
94598 + .name = "qsfp_2_read",
94599 + .param = PARAM3,
94600 +};
94601 +
94602 +struct size_overflow_hash _002511_hash = {
94603 + .next = NULL,
94604 + .name = "queue_reply",
94605 + .param = PARAM3,
94606 +};
94607 +
94608 +struct size_overflow_hash _002512_hash = {
94609 + .next = NULL,
94610 + .name = "Realloc",
94611 + .param = PARAM2,
94612 +};
94613 +
94614 +struct size_overflow_hash _002513_hash = {
94615 + .next = NULL,
94616 + .name = "rfc4106_set_key",
94617 + .param = PARAM3,
94618 +};
94619 +
94620 +struct size_overflow_hash _002514_hash = {
94621 + .next = NULL,
94622 + .name = "rtllib_alloc_txb",
94623 + .param = PARAM1,
94624 +};
94625 +
94626 +struct size_overflow_hash _002515_hash = {
94627 + .next = NULL,
94628 + .name = "rtllib_wx_set_gen_ie",
94629 + .param = PARAM3,
94630 +};
94631 +
94632 +struct size_overflow_hash _002516_hash = {
94633 + .next = NULL,
94634 + .name = "rts51x_transfer_data_partial",
94635 + .param = PARAM6,
94636 +};
94637 +
94638 +struct size_overflow_hash _002517_hash = {
94639 + .next = NULL,
94640 + .name = "sparse_early_usemaps_alloc_node",
94641 + .param = PARAM4,
94642 +};
94643 +
94644 +struct size_overflow_hash _002518_hash = {
94645 + .next = NULL,
94646 + .name = "split",
94647 + .param = PARAM2,
94648 +};
94649 +
94650 +struct size_overflow_hash _002519_hash = {
94651 + .next = NULL,
94652 + .name = "stats_read_ul",
94653 + .param = PARAM3,
94654 +};
94655 +
94656 +struct size_overflow_hash _002520_hash = {
94657 + .next = NULL,
94658 + .name = "store_debug_level",
94659 + .param = PARAM3,
94660 +};
94661 +
94662 +struct size_overflow_hash _002521_hash = {
94663 + .next = NULL,
94664 + .name = "sys32_ipc",
94665 + .param = PARAM3,
94666 +};
94667 +
94668 +struct size_overflow_hash _002522_hash = {
94669 + .next = NULL,
94670 + .name = "sys32_rt_sigpending",
94671 + .param = PARAM2,
94672 +};
94673 +
94674 +struct size_overflow_hash _002523_hash = {
94675 + .next = NULL,
94676 + .name = "tunables_read",
94677 + .param = PARAM3,
94678 +};
94679 +
94680 +struct size_overflow_hash _002524_hash = {
94681 + .next = NULL,
94682 + .name = "tunables_write",
94683 + .param = PARAM3,
94684 +};
94685 +
94686 +struct size_overflow_hash _002525_hash = {
94687 + .next = NULL,
94688 + .name = "u32_array_read",
94689 + .param = PARAM3,
94690 +};
94691 +
94692 +struct size_overflow_hash _002526_hash = {
94693 + .next = NULL,
94694 + .name = "usb_buffer_alloc",
94695 + .param = PARAM2,
94696 +};
94697 +
94698 +struct size_overflow_hash _002527_hash = {
94699 + .next = NULL,
94700 + .name = "xenbus_file_write",
94701 + .param = PARAM3,
94702 +};
94703 +
94704 +struct size_overflow_hash _002528_hash = {
94705 + .next = NULL,
94706 + .name = "xpc_kmalloc_cacheline_aligned",
94707 + .param = PARAM1,
94708 +};
94709 +
94710 +struct size_overflow_hash _002529_hash = {
94711 + .next = NULL,
94712 + .name = "xpc_kzalloc_cacheline_aligned",
94713 + .param = PARAM1,
94714 +};
94715 +
94716 +struct size_overflow_hash _002530_hash = {
94717 + .next = NULL,
94718 + .name = "xsd_read",
94719 + .param = PARAM3,
94720 +};
94721 +
94722 +struct size_overflow_hash _002531_hash = {
94723 + .next = NULL,
94724 + .name = "compat_do_readv_writev",
94725 + .param = PARAM4,
94726 +};
94727 +
94728 +struct size_overflow_hash _002532_hash = {
94729 + .next = NULL,
94730 + .name = "compat_keyctl_instantiate_key_iov",
94731 + .param = PARAM3,
94732 +};
94733 +
94734 +struct size_overflow_hash _002533_hash = {
94735 + .next = NULL,
94736 + .name = "compat_process_vm_rw",
94737 + .param = PARAM3|PARAM5,
94738 +};
94739 +
94740 +struct size_overflow_hash _002535_hash = {
94741 + .next = NULL,
94742 + .name = "compat_sys_setsockopt",
94743 + .param = PARAM5,
94744 +};
94745 +
94746 +struct size_overflow_hash _002536_hash = {
94747 + .next = NULL,
94748 + .name = "ipath_cdev_init",
94749 + .param = PARAM1,
94750 +};
94751 +
94752 +struct size_overflow_hash _002537_hash = {
94753 + .next = NULL,
94754 + .name = "ms_read_multiple_pages",
94755 + .param = PARAM4|PARAM5,
94756 +};
94757 +
94758 +struct size_overflow_hash _002539_hash = {
94759 + .next = NULL,
94760 + .name = "ms_write_multiple_pages",
94761 + .param = PARAM5|PARAM6,
94762 +};
94763 +
94764 +struct size_overflow_hash _002541_hash = {
94765 + .next = &_002004_hash,
94766 + .name = "sparse_mem_maps_populate_node",
94767 + .param = PARAM4,
94768 +};
94769 +
94770 +struct size_overflow_hash _002542_hash = {
94771 + .next = NULL,
94772 + .name = "vmemmap_alloc_block",
94773 + .param = PARAM1,
94774 +};
94775 +
94776 +struct size_overflow_hash _002543_hash = {
94777 + .next = NULL,
94778 + .name = "xd_read_multiple_pages",
94779 + .param = PARAM4|PARAM5,
94780 +};
94781 +
94782 +struct size_overflow_hash _002545_hash = {
94783 + .next = NULL,
94784 + .name = "xd_write_multiple_pages",
94785 + .param = PARAM5|PARAM6,
94786 +};
94787 +
94788 +struct size_overflow_hash _002546_hash = {
94789 + .next = NULL,
94790 + .name = "compat_readv",
94791 + .param = PARAM3,
94792 +};
94793 +
94794 +struct size_overflow_hash _002547_hash = {
94795 + .next = NULL,
94796 + .name = "compat_sys_process_vm_readv",
94797 + .param = PARAM3|PARAM5,
94798 +};
94799 +
94800 +struct size_overflow_hash _002549_hash = {
94801 + .next = NULL,
94802 + .name = "compat_sys_process_vm_writev",
94803 + .param = PARAM3|PARAM5,
94804 +};
94805 +
94806 +struct size_overflow_hash _002551_hash = {
94807 + .next = NULL,
94808 + .name = "compat_writev",
94809 + .param = PARAM3,
94810 +};
94811 +
94812 +struct size_overflow_hash _002552_hash = {
94813 + .next = NULL,
94814 + .name = "ms_rw_multi_sector",
94815 + .param = PARAM4,
94816 +};
94817 +
94818 +struct size_overflow_hash _002553_hash = {
94819 + .next = NULL,
94820 + .name = "sparse_early_mem_maps_alloc_node",
94821 + .param = PARAM4,
94822 +};
94823 +
94824 +struct size_overflow_hash _002554_hash = {
94825 + .next = NULL,
94826 + .name = "vmemmap_alloc_block_buf",
94827 + .param = PARAM1,
94828 +};
94829 +
94830 +struct size_overflow_hash _002555_hash = {
94831 + .next = NULL,
94832 + .name = "xd_rw",
94833 + .param = PARAM4,
94834 +};
94835 +
94836 +struct size_overflow_hash _002556_hash = {
94837 + .next = NULL,
94838 + .name = "compat_sys_preadv64",
94839 + .param = PARAM3,
94840 +};
94841 +
94842 +struct size_overflow_hash _002557_hash = {
94843 + .next = NULL,
94844 + .name = "compat_sys_pwritev64",
94845 + .param = PARAM3,
94846 +};
94847 +
94848 +struct size_overflow_hash _002558_hash = {
94849 + .next = NULL,
94850 + .name = "compat_sys_readv",
94851 + .param = PARAM3,
94852 +};
94853 +
94854 +struct size_overflow_hash _002559_hash = {
94855 + .next = NULL,
94856 + .name = "compat_sys_writev",
94857 + .param = PARAM3,
94858 +};
94859 +
94860 +struct size_overflow_hash _002560_hash = {
94861 + .next = NULL,
94862 + .name = "ms_rw",
94863 + .param = PARAM4,
94864 +};
94865 +
94866 +struct size_overflow_hash _002561_hash = {
94867 + .next = NULL,
94868 + .name = "compat_sys_preadv",
94869 + .param = PARAM3,
94870 +};
94871 +
94872 +struct size_overflow_hash _002562_hash = {
94873 + .next = NULL,
94874 + .name = "compat_sys_pwritev",
94875 + .param = PARAM3,
94876 +};
94877 +
94878 +struct size_overflow_hash _002563_hash = {
94879 + .next = NULL,
94880 + .name = "alloc_apertures",
94881 + .param = PARAM1,
94882 +};
94883 +
94884 +struct size_overflow_hash _002564_hash = {
94885 + .next = NULL,
94886 + .name = "bin_uuid",
94887 + .param = PARAM3,
94888 +};
94889 +
94890 +struct size_overflow_hash _002565_hash = {
94891 + .next = NULL,
94892 + .name = "__copy_from_user_inatomic_nocache",
94893 + .param = PARAM3,
94894 +};
94895 +
94896 +struct size_overflow_hash _002566_hash = {
94897 + .next = NULL,
94898 + .name = "do_dmabuf_dirty_sou",
94899 + .param = PARAM7,
94900 +};
94901 +
94902 +struct size_overflow_hash _002567_hash = {
94903 + .next = NULL,
94904 + .name = "do_surface_dirty_sou",
94905 + .param = PARAM7,
94906 +};
94907 +
94908 +struct size_overflow_hash _002568_hash = {
94909 + .next = NULL,
94910 + .name = "drm_agp_bind_pages",
94911 + .param = PARAM3,
94912 +};
94913 +
94914 +struct size_overflow_hash _002569_hash = {
94915 + .next = NULL,
94916 + .name = "drm_calloc_large",
94917 + .param = PARAM1|PARAM2,
94918 +};
94919 +
94920 +struct size_overflow_hash _002571_hash = {
94921 + .next = NULL,
94922 + .name = "drm_fb_helper_init",
94923 + .param = PARAM3|PARAM4,
94924 +};
94925 +
94926 +struct size_overflow_hash _002573_hash = {
94927 + .next = NULL,
94928 + .name = "drm_ht_create",
94929 + .param = PARAM2,
94930 +};
94931 +
94932 +struct size_overflow_hash _002574_hash = {
94933 + .next = NULL,
94934 + .name = "drm_malloc_ab",
94935 + .param = PARAM1|PARAM2,
94936 +};
94937 +
94938 +struct size_overflow_hash _002576_hash = {
94939 + .next = NULL,
94940 + .name = "drm_mode_crtc_set_gamma_size",
94941 + .param = PARAM2,
94942 +};
94943 +
94944 +struct size_overflow_hash _002577_hash = {
94945 + .next = NULL,
94946 + .name = "drm_plane_init",
94947 + .param = PARAM6,
94948 +};
94949 +
94950 +struct size_overflow_hash _002578_hash = {
94951 + .next = NULL,
94952 + .name = "drm_property_create",
94953 + .param = PARAM4,
94954 +};
94955 +
94956 +struct size_overflow_hash _002579_hash = {
94957 + .next = NULL,
94958 + .name = "drm_property_create_blob",
94959 + .param = PARAM2,
94960 +};
94961 +
94962 +struct size_overflow_hash _002580_hash = {
94963 + .next = NULL,
94964 + .name = "drm_vblank_init",
94965 + .param = PARAM2,
94966 +};
94967 +
94968 +struct size_overflow_hash _002581_hash = {
94969 + .next = NULL,
94970 + .name = "drm_vmalloc_dma",
94971 + .param = PARAM1,
94972 +};
94973 +
94974 +struct size_overflow_hash _002582_hash = {
94975 + .next = NULL,
94976 + .name = "fb_alloc_cmap_gfp",
94977 + .param = PARAM2,
94978 +};
94979 +
94980 +struct size_overflow_hash _002583_hash = {
94981 + .next = NULL,
94982 + .name = "fbcon_prepare_logo",
94983 + .param = PARAM5,
94984 +};
94985 +
94986 +struct size_overflow_hash _002584_hash = {
94987 + .next = NULL,
94988 + .name = "fb_read",
94989 + .param = PARAM3,
94990 +};
94991 +
94992 +struct size_overflow_hash _002585_hash = {
94993 + .next = NULL,
94994 + .name = "fb_write",
94995 + .param = PARAM3,
94996 +};
94997 +
94998 +struct size_overflow_hash _002586_hash = {
94999 + .next = NULL,
95000 + .name = "framebuffer_alloc",
95001 + .param = PARAM1,
95002 +};
95003 +
95004 +struct size_overflow_hash _002587_hash = {
95005 + .next = NULL,
95006 + .name = "i915_cache_sharing_read",
95007 + .param = PARAM3,
95008 +};
95009 +
95010 +struct size_overflow_hash _002588_hash = {
95011 + .next = NULL,
95012 + .name = "i915_cache_sharing_write",
95013 + .param = PARAM3,
95014 +};
95015 +
95016 +struct size_overflow_hash _002589_hash = {
95017 + .next = NULL,
95018 + .name = "i915_max_freq_read",
95019 + .param = PARAM3,
95020 +};
95021 +
95022 +struct size_overflow_hash _002590_hash = {
95023 + .next = NULL,
95024 + .name = "i915_max_freq_write",
95025 + .param = PARAM3,
95026 +};
95027 +
95028 +struct size_overflow_hash _002591_hash = {
95029 + .next = NULL,
95030 + .name = "i915_wedged_read",
95031 + .param = PARAM3,
95032 +};
95033 +
95034 +struct size_overflow_hash _002592_hash = {
95035 + .next = NULL,
95036 + .name = "i915_wedged_write",
95037 + .param = PARAM3,
95038 +};
95039 +
95040 +struct size_overflow_hash _002593_hash = {
95041 + .next = NULL,
95042 + .name = "p9_client_read",
95043 + .param = PARAM5,
95044 +};
95045 +
95046 +struct size_overflow_hash _002594_hash = {
95047 + .next = NULL,
95048 + .name = "probe_kernel_write",
95049 + .param = PARAM3,
95050 +};
95051 +
95052 +struct size_overflow_hash _002595_hash = {
95053 + .next = NULL,
95054 + .name = "sched_feat_write",
95055 + .param = PARAM3,
95056 +};
95057 +
95058 +struct size_overflow_hash _002596_hash = {
95059 + .next = NULL,
95060 + .name = "sd_alloc_ctl_entry",
95061 + .param = PARAM1,
95062 +};
95063 +
95064 +struct size_overflow_hash _002597_hash = {
95065 + .next = &_000009_hash,
95066 + .name = "tstats_write",
95067 + .param = PARAM3,
95068 +};
95069 +
95070 +struct size_overflow_hash _002598_hash = {
95071 + .next = NULL,
95072 + .name = "ttm_bo_fbdev_io",
95073 + .param = PARAM4,
95074 +};
95075 +
95076 +struct size_overflow_hash _002599_hash = {
95077 + .next = NULL,
95078 + .name = "ttm_bo_io",
95079 + .param = PARAM5,
95080 +};
95081 +
95082 +struct size_overflow_hash _002600_hash = {
95083 + .next = NULL,
95084 + .name = "ttm_dma_page_pool_free",
95085 + .param = PARAM2,
95086 +};
95087 +
95088 +struct size_overflow_hash _002601_hash = {
95089 + .next = NULL,
95090 + .name = "ttm_page_pool_free",
95091 + .param = PARAM2,
95092 +};
95093 +
95094 +struct size_overflow_hash _002602_hash = {
95095 + .next = NULL,
95096 + .name = "vmw_execbuf_process",
95097 + .param = PARAM5,
95098 +};
95099 +
95100 +struct size_overflow_hash _002603_hash = {
95101 + .next = NULL,
95102 + .name = "vmw_fifo_reserve",
95103 + .param = PARAM2,
95104 +};
95105 +
95106 +struct size_overflow_hash _002604_hash = {
95107 + .next = NULL,
95108 + .name = "vmw_kms_present",
95109 + .param = PARAM9,
95110 +};
95111 +
95112 +struct size_overflow_hash _002605_hash = {
95113 + .next = NULL,
95114 + .name = "vmw_kms_readback",
95115 + .param = PARAM6,
95116 +};
95117 +
95118 +struct size_overflow_hash _002606_hash = {
95119 + .next = NULL,
95120 + .name = "do_dmabuf_dirty_ldu",
95121 + .param = PARAM6,
95122 +};
95123 +
95124 +struct size_overflow_hash _002607_hash = {
95125 + .next = NULL,
95126 + .name = "drm_mode_create_tv_properties",
95127 + .param = PARAM2,
95128 +};
95129 +
95130 +struct size_overflow_hash _002608_hash = {
95131 + .next = NULL,
95132 + .name = "drm_property_create_enum",
95133 + .param = PARAM5,
95134 +};
95135 +
95136 +struct size_overflow_hash _002609_hash = {
95137 + .next = NULL,
95138 + .name = "fast_user_write",
95139 + .param = PARAM5,
95140 +};
95141 +
95142 +struct size_overflow_hash _002610_hash = {
95143 + .next = NULL,
95144 + .name = "fb_alloc_cmap",
95145 + .param = PARAM2,
95146 +};
95147 +
95148 +struct size_overflow_hash _002611_hash = {
95149 + .next = NULL,
95150 + .name = "i915_gem_execbuffer_relocate_slow",
95151 + .param = PARAM7,
95152 +};
95153 +
95154 +struct size_overflow_hash _002612_hash = {
95155 + .next = NULL,
95156 + .name = "kgdb_hex2mem",
95157 + .param = PARAM3,
95158 +};
95159 +
95160 +struct size_overflow_hash _002613_hash = {
95161 + .next = NULL,
95162 + .name = "ttm_object_device_init",
95163 + .param = PARAM2,
95164 +};
95165 +
95166 +struct size_overflow_hash _002614_hash = {
95167 + .next = NULL,
95168 + .name = "ttm_object_file_init",
95169 + .param = PARAM2,
95170 +};
95171 +
95172 +struct size_overflow_hash _002615_hash = {
95173 + .next = NULL,
95174 + .name = "vmw_cursor_update_image",
95175 + .param = PARAM3|PARAM4,
95176 +};
95177 +
95178 +struct size_overflow_hash _002617_hash = {
95179 + .next = NULL,
95180 + .name = "vmw_gmr2_bind",
95181 + .param = PARAM3,
95182 +};
95183 +
95184 +struct size_overflow_hash _002618_hash = {
95185 + .next = NULL,
95186 + .name = "vmw_cursor_update_dmabuf",
95187 + .param = PARAM3|PARAM4,
95188 +};
95189 +
95190 +struct size_overflow_hash _002620_hash = {
95191 + .next = NULL,
95192 + .name = "vmw_gmr_bind",
95193 + .param = PARAM3,
95194 +};
95195 +
95196 +struct size_overflow_hash _002621_hash = {
95197 + .next = NULL,
95198 + .name = "vmw_du_crtc_cursor_set",
95199 + .param = PARAM4|PARAM5,
95200 +};
95201 +
95202 +struct size_overflow_hash _002622_hash = {
95203 + .next = NULL,
95204 + .name = "__module_alloc",
95205 + .param = PARAM1,
95206 +};
95207 +
95208 +struct size_overflow_hash _002623_hash = {
95209 + .next = NULL,
95210 + .name = "module_alloc_update_bounds_rw",
95211 + .param = PARAM1,
95212 +};
95213 +
95214 +struct size_overflow_hash _002624_hash = {
95215 + .next = NULL,
95216 + .name = "module_alloc_update_bounds_rx",
95217 + .param = PARAM1,
95218 +};
95219 +
95220 +struct size_overflow_hash *size_overflow_hash[65536] = {
95221 + [65495] = &_000001_hash,
95222 + [10918] = &_000002_hash,
95223 + [17559] = &_000003_hash,
95224 + [4365] = &_000004_hash,
95225 + [39351] = &_000005_hash,
95226 + [19214] = &_000006_hash,
95227 + [60297] = &_000007_hash,
95228 + [11268] = &_000008_hash,
95229 + [9444] = &_000010_hash,
95230 + [11917] = &_000012_hash,
95231 + [64015] = &_000013_hash,
95232 + [59590] = &_000015_hash,
95233 + [63630] = &_000016_hash,
95234 + [14302] = &_000017_hash,
95235 + [63488] = &_000018_hash,
95236 + [47570] = &_000019_hash,
95237 + [19366] = &_000020_hash,
95238 + [39308] = &_000021_hash,
95239 + [49683] = &_000023_hash,
95240 + [64140] = &_000024_hash,
95241 + [47274] = &_000025_hash,
95242 + [1240] = &_000026_hash,
95243 + [14892] = &_000027_hash,
95244 + [34853] = &_000028_hash,
95245 + [30487] = &_000029_hash,
95246 + [52399] = &_000030_hash,
95247 + [36399] = &_000031_hash,
95248 + [61139] = &_000033_hash,
95249 + [15822] = &_000034_hash,
95250 + [49465] = &_000035_hash,
95251 + [22554] = &_000036_hash,
95252 + [54378] = &_000037_hash,
95253 + [33521] = &_000038_hash,
95254 + [31485] = &_000039_hash,
95255 + [3628] = &_000040_hash,
95256 + [3194] = &_000041_hash,
95257 + [54860] = &_000042_hash,
95258 + [27083] = &_000043_hash,
95259 + [57259] = &_000044_hash,
95260 + [22960] = &_000045_hash,
95261 + [44398] = &_000046_hash,
95262 + [63550] = &_000047_hash,
95263 + [13245] = &_000048_hash,
95264 + [58192] = &_000049_hash,
95265 + [9991] = &_000050_hash,
95266 + [4999] = &_000051_hash,
95267 + [4471] = &_000052_hash,
95268 + [24317] = &_000053_hash,
95269 + [5224] = &_000054_hash,
95270 + [13791] = &_000055_hash,
95271 + [21113] = &_000056_hash,
95272 + [50539] = &_000057_hash,
95273 + [43185] = &_000058_hash,
95274 + [2275] = &_000059_hash,
95275 + [58860] = &_000060_hash,
95276 + [38971] = &_000061_hash,
95277 + [19986] = &_000062_hash,
95278 + [13748] = &_000064_hash,
95279 + [34384] = &_000065_hash,
95280 + [42740] = &_000066_hash,
95281 + [34838] = &_000067_hash,
95282 + [28556] = &_000068_hash,
95283 + [4593] = &_000069_hash,
95284 + [17163] = &_000070_hash,
95285 + [25628] = &_000071_hash,
95286 + [15218] = &_000072_hash,
95287 + [61841] = &_000073_hash,
95288 + [50782] = &_000074_hash,
95289 + [54672] = &_000076_hash,
95290 + [24075] = &_000077_hash,
95291 + [52733] = &_000078_hash,
95292 + [34264] = &_000079_hash,
95293 + [6706] = &_000080_hash,
95294 + [57500] = &_000081_hash,
95295 + [24873] = &_000082_hash,
95296 + [7790] = &_000084_hash,
95297 + [42064] = &_000085_hash,
95298 + [35053] = &_000086_hash,
95299 + [27143] = &_000087_hash,
95300 + [27089] = &_000088_hash,
95301 + [42252] = &_000089_hash,
95302 + [29504] = &_000090_hash,
95303 + [3703] = &_000091_hash,
95304 + [11678] = &_000092_hash,
95305 + [6926] = &_000093_hash,
95306 + [33274] = &_000094_hash,
95307 + [43535] = &_000095_hash,
95308 + [35104] = &_000096_hash,
95309 + [50857] = &_000097_hash,
95310 + [5368] = &_000098_hash,
95311 + [10259] = &_000100_hash,
95312 + [27664] = &_000101_hash,
95313 + [18710] = &_000102_hash,
95314 + [35974] = &_000103_hash,
95315 + [656] = &_000104_hash,
95316 + [41917] = &_000105_hash,
95317 + [5846] = &_000106_hash,
95318 + [18913] = &_000107_hash,
95319 + [24366] = &_000108_hash,
95320 + [10900] = &_000109_hash,
95321 + [54514] = &_000110_hash,
95322 + [61390] = &_000111_hash,
95323 + [2143] = &_000112_hash,
95324 + [54503] = &_000113_hash,
95325 + [5484] = &_000114_hash,
95326 + [23957] = &_000115_hash,
95327 + [23588] = &_000116_hash,
95328 + [3740] = &_000117_hash,
95329 + [15058] = &_000118_hash,
95330 + [61904] = &_000119_hash,
95331 + [44729] = &_000120_hash,
95332 + [58079] = &_000121_hash,
95333 + [3649] = &_000122_hash,
95334 + [2896] = &_000123_hash,
95335 + [36858] = &_000124_hash,
95336 + [1711] = &_000125_hash,
95337 + [36280] = &_000126_hash,
95338 + [31318] = &_000127_hash,
95339 + [58841] = &_000128_hash,
95340 + [21451] = &_000130_hash,
95341 + [19089] = &_000131_hash,
95342 + [31966] = &_000132_hash,
95343 + [50140] = &_000133_hash,
95344 + [45534] = &_000134_hash,
95345 + [17551] = &_000135_hash,
95346 + [45340] = &_000136_hash,
95347 + [1774] = &_000137_hash,
95348 + [33479] = &_000138_hash,
95349 + [9088] = &_000139_hash,
95350 + [54106] = &_000141_hash,
95351 + [33356] = &_000142_hash,
95352 + [9957] = &_000143_hash,
95353 + [8712] = &_000145_hash,
95354 + [41975] = &_000147_hash,
95355 + [4412] = &_000148_hash,
95356 + [4707] = &_000149_hash,
95357 + [3071] = &_000150_hash,
95358 + [11942] = &_000152_hash,
95359 + [30701] = &_000153_hash,
95360 + [37766] = &_000154_hash,
95361 + [65336] = &_000155_hash,
95362 + [31902] = &_000156_hash,
95363 + [18055] = &_000157_hash,
95364 + [8506] = &_000158_hash,
95365 + [45156] = &_000159_hash,
95366 + [4966] = &_000160_hash,
95367 + [551] = &_000161_hash,
95368 + [41196] = &_000162_hash,
95369 + [38836] = &_000163_hash,
95370 + [44320] = &_000164_hash,
95371 + [7792] = &_000165_hash,
95372 + [54296] = &_000166_hash,
95373 + [28385] = &_000167_hash,
95374 + [6892] = &_000168_hash,
95375 + [15674] = &_000169_hash,
95376 + [2513] = &_000170_hash,
95377 + [13060] = &_000171_hash,
95378 + [9676] = &_000172_hash,
95379 + [63314] = &_000173_hash,
95380 + [3992] = &_000174_hash,
95381 + [58763] = &_000175_hash,
95382 + [41852] = &_000176_hash,
95383 + [18215] = &_000177_hash,
95384 + [48641] = &_000178_hash,
95385 + [64827] = &_000179_hash,
95386 + [29478] = &_000180_hash,
95387 + [44896] = &_000181_hash,
95388 + [2259] = &_000182_hash,
95389 + [46218] = &_000183_hash,
95390 + [26363] = &_000184_hash,
95391 + [56296] = &_000185_hash,
95392 + [43533] = &_000186_hash,
95393 + [19423] = &_000187_hash,
95394 + [50814] = &_000188_hash,
95395 + [39915] = &_000189_hash,
95396 + [12641] = &_000190_hash,
95397 + [51997] = &_000191_hash,
95398 + [30516] = &_000192_hash,
95399 + [11868] = &_000193_hash,
95400 + [26847] = &_000194_hash,
95401 + [64816] = &_000195_hash,
95402 + [58545] = &_000196_hash,
95403 + [57908] = &_000197_hash,
95404 + [29731] = &_000198_hash,
95405 + [3168] = &_000199_hash,
95406 + [13414] = &_000200_hash,
95407 + [58813] = &_000202_hash,
95408 + [59008] = &_000203_hash,
95409 + [46715] = &_000204_hash,
95410 + [40558] = &_000205_hash,
95411 + [17733] = &_000206_hash,
95412 + [14796] = &_000207_hash,
95413 + [45976] = &_000208_hash,
95414 + [64623] = &_000209_hash,
95415 + [56247] = &_000210_hash,
95416 + [64886] = &_000211_hash,
95417 + [59892] = &_000212_hash,
95418 + [7932] = &_000213_hash,
95419 + [1401] = &_000214_hash,
95420 + [3142] = &_000215_hash,
95421 + [56435] = &_000216_hash,
95422 + [57309] = &_000217_hash,
95423 + [30777] = &_000218_hash,
95424 + [54582] = &_000219_hash,
95425 + [58191] = &_000220_hash,
95426 + [3883] = &_000221_hash,
95427 + [5694] = &_000222_hash,
95428 + [62908] = &_000223_hash,
95429 + [41916] = &_000224_hash,
95430 + [51869] = &_000225_hash,
95431 + [26187] = &_000226_hash,
95432 + [10897] = &_000227_hash,
95433 + [16804] = &_000228_hash,
95434 + [38202] = &_000229_hash,
95435 + [14861] = &_000230_hash,
95436 + [18275] = &_000231_hash,
95437 + [20347] = &_000232_hash,
95438 + [43753] = &_000233_hash,
95439 + [1060] = &_000234_hash,
95440 + [58883] = &_000235_hash,
95441 + [25067] = &_000236_hash,
95442 + [42437] = &_000237_hash,
95443 + [23182] = &_000238_hash,
95444 + [10024] = &_000239_hash,
95445 + [62224] = &_000240_hash,
95446 + [33769] = &_000241_hash,
95447 + [27495] = &_000242_hash,
95448 + [28993] = &_000243_hash,
95449 + [49617] = &_000244_hash,
95450 + [46766] = &_000245_hash,
95451 + [45609] = &_000246_hash,
95452 + [23449] = &_000248_hash,
95453 + [41497] = &_000249_hash,
95454 + [52145] = &_000250_hash,
95455 + [48694] = &_000251_hash,
95456 + [23652] = &_000252_hash,
95457 + [1206] = &_000253_hash,
95458 + [23310] = &_000254_hash,
95459 + [34477] = &_000255_hash,
95460 + [61635] = &_000256_hash,
95461 + [59502] = &_000257_hash,
95462 + [36885] = &_000258_hash,
95463 + [47993] = &_000259_hash,
95464 + [12251] = &_000260_hash,
95465 + [38629] = &_000261_hash,
95466 + [32493] = &_000262_hash,
95467 + [35110] = &_000263_hash,
95468 + [53976] = &_000264_hash,
95469 + [14591] = &_000265_hash,
95470 + [27660] = &_000266_hash,
95471 + [34894] = &_000267_hash,
95472 + [51756] = &_000268_hash,
95473 + [64378] = &_000269_hash,
95474 + [40548] = &_000270_hash,
95475 + [60709] = &_000271_hash,
95476 + [34586] = &_000272_hash,
95477 + [65034] = &_000273_hash,
95478 + [19393] = &_000274_hash,
95479 + [39542] = &_000275_hash,
95480 + [1273] = &_000276_hash,
95481 + [31942] = &_000277_hash,
95482 + [56368] = &_000278_hash,
95483 + [18604] = &_000279_hash,
95484 + [1192] = &_000280_hash,
95485 + [21208] = &_000281_hash,
95486 + [64958] = &_000282_hash,
95487 + [38813] = &_000283_hash,
95488 + [64478] = &_000285_hash,
95489 + [49161] = &_000286_hash,
95490 + [19824] = &_000287_hash,
95491 + [13596] = &_000288_hash,
95492 + [64403] = &_000289_hash,
95493 + [24139] = &_000290_hash,
95494 + [40905] = &_000291_hash,
95495 + [41428] = &_000292_hash,
95496 + [29109] = &_000293_hash,
95497 + [50021] = &_000294_hash,
95498 + [2418] = &_000295_hash,
95499 + [52383] = &_000296_hash,
95500 + [34133] = &_000297_hash,
95501 + [43208] = &_000298_hash,
95502 + [29061] = &_000299_hash,
95503 + [8628] = &_000300_hash,
95504 + [25502] = &_000301_hash,
95505 + [40153] = &_000302_hash,
95506 + [36336] = &_000304_hash,
95507 + [56331] = &_000306_hash,
95508 + [27770] = &_000307_hash,
95509 + [47889] = &_000309_hash,
95510 + [26061] = &_000310_hash,
95511 + [22173] = &_000311_hash,
95512 + [5091] = &_000313_hash,
95513 + [50163] = &_000314_hash,
95514 + [65279] = &_000315_hash,
95515 + [31920] = &_000316_hash,
95516 + [9566] = &_000317_hash,
95517 + [4690] = &_000319_hash,
95518 + [63435] = &_000320_hash,
95519 + [14908] = &_000321_hash,
95520 + [32646] = &_000322_hash,
95521 + [10765] = &_000323_hash,
95522 + [39666] = &_000324_hash,
95523 + [18074] = &_000325_hash,
95524 + [54740] = &_000326_hash,
95525 + [50207] = &_000327_hash,
95526 + [29346] = &_000328_hash,
95527 + [45398] = &_000329_hash,
95528 + [48413] = &_000330_hash,
95529 + [14857] = &_000331_hash,
95530 + [5611] = &_000332_hash,
95531 + [31668] = &_000333_hash,
95532 + [27579] = &_000334_hash,
95533 + [46556] = &_000335_hash,
95534 + [12769] = &_000336_hash,
95535 + [19658] = &_000337_hash,
95536 + [95] = &_000338_hash,
95537 + [17424] = &_000339_hash,
95538 + [9511] = &_000340_hash,
95539 + [17307] = &_000341_hash,
95540 + [26201] = &_000342_hash,
95541 + [33308] = &_000343_hash,
95542 + [31413] = &_000344_hash,
95543 + [55247] = &_000345_hash,
95544 + [2707] = &_000346_hash,
95545 + [53831] = &_000348_hash,
95546 + [22085] = &_000350_hash,
95547 + [7349] = &_000351_hash,
95548 + [46976] = &_000352_hash,
95549 + [46119] = &_000353_hash,
95550 + [21504] = &_000354_hash,
95551 + [18285] = &_000355_hash,
95552 + [38655] = &_000356_hash,
95553 + [47205] = &_000357_hash,
95554 + [32673] = &_000358_hash,
95555 + [23190] = &_000359_hash,
95556 + [44674] = &_000360_hash,
95557 + [28545] = &_000361_hash,
95558 + [31200] = &_000362_hash,
95559 + [56831] = &_000363_hash,
95560 + [3393] = &_000364_hash,
95561 + [5024] = &_000365_hash,
95562 + [18844] = &_000366_hash,
95563 + [59622] = &_000367_hash,
95564 + [33518] = &_000368_hash,
95565 + [17131] = &_000369_hash,
95566 + [16908] = &_000370_hash,
95567 + [3813] = &_000373_hash,
95568 + [4804] = &_000374_hash,
95569 + [33523] = &_000375_hash,
95570 + [29886] = &_000376_hash,
95571 + [49806] = &_000377_hash,
95572 + [15070] = &_000378_hash,
95573 + [59574] = &_000379_hash,
95574 + [63442] = &_000380_hash,
95575 + [42990] = &_000381_hash,
95576 + [12509] = &_000383_hash,
95577 + [12285] = &_000384_hash,
95578 + [15072] = &_000385_hash,
95579 + [38153] = &_000387_hash,
95580 + [23097] = &_000389_hash,
95581 + [56027] = &_000390_hash,
95582 + [3894] = &_000391_hash,
95583 + [18744] = &_000392_hash,
95584 + [37750] = &_000394_hash,
95585 + [41116] = &_000395_hash,
95586 + [42594] = &_000396_hash,
95587 + [31453] = &_000397_hash,
95588 + [57251] = &_000398_hash,
95589 + [60775] = &_000399_hash,
95590 + [32833] = &_000400_hash,
95591 + [28371] = &_000401_hash,
95592 + [15534] = &_000402_hash,
95593 + [1607] = &_000403_hash,
95594 + [17662] = &_000404_hash,
95595 + [37079] = &_000405_hash,
95596 + [12332] = &_000406_hash,
95597 + [57066] = &_000407_hash,
95598 + [50453] = &_000408_hash,
95599 + [40766] = &_000409_hash,
95600 + [64404] = &_000410_hash,
95601 + [23102] = &_000411_hash,
95602 + [3447] = &_000412_hash,
95603 + [39897] = &_000413_hash,
95604 + [55628] = &_000414_hash,
95605 + [310] = &_000415_hash,
95606 + [13289] = &_000416_hash,
95607 + [52513] = &_000417_hash,
95608 + [19758] = &_000418_hash,
95609 + [42012] = &_000419_hash,
95610 + [48063] = &_000420_hash,
95611 + [5214] = &_000421_hash,
95612 + [34034] = &_000422_hash,
95613 + [33210] = &_000423_hash,
95614 + [39554] = &_000424_hash,
95615 + [60276] = &_000426_hash,
95616 + [29277] = &_000427_hash,
95617 + [61119] = &_000428_hash,
95618 + [29842] = &_000429_hash,
95619 + [25625] = &_000430_hash,
95620 + [6376] = &_000431_hash,
95621 + [15954] = &_000432_hash,
95622 + [63648] = &_000434_hash,
95623 + [63845] = &_000435_hash,
95624 + [32064] = &_000436_hash,
95625 + [29142] = &_000437_hash,
95626 + [267] = &_000438_hash,
95627 + [50830] = &_000439_hash,
95628 + [60960] = &_000440_hash,
95629 + [45021] = &_000441_hash,
95630 + [40159] = &_000442_hash,
95631 + [59882] = &_000443_hash,
95632 + [33719] = &_000444_hash,
95633 + [18262] = &_000445_hash,
95634 + [57662] = &_000446_hash,
95635 + [6888] = &_000447_hash,
95636 + [45800] = &_000448_hash,
95637 + [12137] = &_000449_hash,
95638 + [17875] = &_000450_hash,
95639 + [16366] = &_000451_hash,
95640 + [19678] = &_000452_hash,
95641 + [45592] = &_000453_hash,
95642 + [35691] = &_000454_hash,
95643 + [9397] = &_000455_hash,
95644 + [20469] = &_000456_hash,
95645 + [29735] = &_000457_hash,
95646 + [25816] = &_000458_hash,
95647 + [25569] = &_000459_hash,
95648 + [9904] = &_000460_hash,
95649 + [8206] = &_000461_hash,
95650 + [37223] = &_000462_hash,
95651 + [59049] = &_000463_hash,
95652 + [37685] = &_000464_hash,
95653 + [18459] = &_000465_hash,
95654 + [27724] = &_000466_hash,
95655 + [34736] = &_000467_hash,
95656 + [22987] = &_000468_hash,
95657 + [38092] = &_000469_hash,
95658 + [17528] = &_000470_hash,
95659 + [19043] = &_000471_hash,
95660 + [18254] = &_000472_hash,
95661 + [58544] = &_000473_hash,
95662 + [6547] = &_000474_hash,
95663 + [34366] = &_000475_hash,
95664 + [53407] = &_000476_hash,
95665 + [12284] = &_000477_hash,
95666 + [43573] = &_000478_hash,
95667 + [8212] = &_000479_hash,
95668 + [64753] = &_000480_hash,
95669 + [23091] = &_000481_hash,
95670 + [38195] = &_000482_hash,
95671 + [26577] = &_000483_hash,
95672 + [11830] = &_000484_hash,
95673 + [17598] = &_000485_hash,
95674 + [34271] = &_000486_hash,
95675 + [27235] = &_000487_hash,
95676 + [16431] = &_000488_hash,
95677 + [58129] = &_000489_hash,
95678 + [55836] = &_000490_hash,
95679 + [10984] = &_000493_hash,
95680 + [37330] = &_000494_hash,
95681 + [26933] = &_000495_hash,
95682 + [51641] = &_000496_hash,
95683 + [9232] = &_000497_hash,
95684 + [25178] = &_000498_hash,
95685 + [29654] = &_000499_hash,
95686 + [43755] = &_000500_hash,
95687 + [51684] = &_000502_hash,
95688 + [30843] = &_000503_hash,
95689 + [3793] = &_000504_hash,
95690 + [5775] = &_000505_hash,
95691 + [49870] = &_000506_hash,
95692 + [46949] = &_000507_hash,
95693 + [31091] = &_000508_hash,
95694 + [49578] = &_000509_hash,
95695 + [40129] = &_000510_hash,
95696 + [18862] = &_000512_hash,
95697 + [11687] = &_000513_hash,
95698 + [1383] = &_000514_hash,
95699 + [28041] = &_000515_hash,
95700 + [11829] = &_000516_hash,
95701 + [734] = &_000517_hash,
95702 + [13440] = &_000518_hash,
95703 + [30941] = &_000519_hash,
95704 + [29248] = &_000520_hash,
95705 + [46077] = &_000521_hash,
95706 + [2256] = &_000522_hash,
95707 + [46996] = &_000523_hash,
95708 + [60774] = &_000524_hash,
95709 + [10511] = &_000525_hash,
95710 + [48998] = &_000526_hash,
95711 + [63830] = &_000527_hash,
95712 + [61932] = &_000528_hash,
95713 + [48498] = &_000529_hash,
95714 + [42737] = &_000530_hash,
95715 + [39474] = &_000531_hash,
95716 + [53582] = &_000532_hash,
95717 + [54539] = &_000533_hash,
95718 + [5848] = &_000534_hash,
95719 + [59178] = &_000535_hash,
95720 + [37006] = &_000536_hash,
95721 + [2861] = &_000537_hash,
95722 + [50240] = &_000538_hash,
95723 + [30610] = &_000539_hash,
95724 + [8620] = &_000540_hash,
95725 + [61190] = &_000541_hash,
95726 + [46029] = &_000542_hash,
95727 + [50633] = &_000543_hash,
95728 + [3122] = &_000544_hash,
95729 + [17864] = &_000546_hash,
95730 + [48363] = &_000547_hash,
95731 + [12465] = &_000548_hash,
95732 + [50380] = &_000549_hash,
95733 + [64086] = &_000550_hash,
95734 + [45691] = &_000551_hash,
95735 + [17989] = &_000552_hash,
95736 + [46363] = &_000553_hash,
95737 + [49621] = &_000554_hash,
95738 + [52280] = &_000555_hash,
95739 + [2618] = &_000556_hash,
95740 + [42525] = &_000557_hash,
95741 + [14400] = &_000558_hash,
95742 + [11695] = &_000559_hash,
95743 + [9605] = &_000560_hash,
95744 + [9061] = &_000561_hash,
95745 + [18525] = &_000563_hash,
95746 + [20676] = &_000564_hash,
95747 + [63474] = &_000565_hash,
95748 + [36169] = &_000566_hash,
95749 + [9786] = &_000567_hash,
95750 + [30092] = &_000568_hash,
95751 + [9670] = &_000570_hash,
95752 + [13900] = &_000571_hash,
95753 + [28738] = &_000572_hash,
95754 + [10872] = &_000573_hash,
95755 + [27332] = &_000574_hash,
95756 + [56847] = &_000575_hash,
95757 + [20970] = &_000576_hash,
95758 + [45190] = &_000577_hash,
95759 + [8436] = &_000578_hash,
95760 + [50942] = &_000579_hash,
95761 + [62205] = &_000580_hash,
95762 + [44510] = &_000581_hash,
95763 + [16945] = &_000582_hash,
95764 + [51042] = &_000583_hash,
95765 + [49330] = &_000585_hash,
95766 + [61340] = &_000586_hash,
95767 + [45164] = &_000587_hash,
95768 + [36876] = &_000588_hash,
95769 + [61288] = &_000589_hash,
95770 + [18824] = &_000590_hash,
95771 + [4486] = &_000591_hash,
95772 + [4373] = &_000592_hash,
95773 + [42962] = &_000593_hash,
95774 + [11050] = &_000594_hash,
95775 + [34803] = &_000595_hash,
95776 + [45161] = &_000596_hash,
95777 + [8580] = &_000597_hash,
95778 + [50798] = &_000598_hash,
95779 + [4710] = &_000599_hash,
95780 + [12664] = &_000600_hash,
95781 + [62649] = &_000601_hash,
95782 + [45437] = &_000602_hash,
95783 + [22300] = &_000603_hash,
95784 + [63534] = &_000604_hash,
95785 + [39189] = &_000605_hash,
95786 + [50172] = &_000606_hash,
95787 + [35786] = &_000607_hash,
95788 + [64139] = &_000608_hash,
95789 + [47613] = &_000609_hash,
95790 + [3888] = &_000610_hash,
95791 + [43202] = &_000611_hash,
95792 + [18293] = &_000612_hash,
95793 + [13330] = &_000613_hash,
95794 + [43436] = &_000614_hash,
95795 + [22894] = &_000615_hash,
95796 + [23178] = &_000616_hash,
95797 + [34446] = &_000617_hash,
95798 + [64340] = &_000619_hash,
95799 + [26131] = &_000620_hash,
95800 + [41332] = &_000621_hash,
95801 + [31303] = &_000622_hash,
95802 + [35892] = &_000623_hash,
95803 + [55799] = &_000624_hash,
95804 + [42150] = &_000625_hash,
95805 + [63033] = &_000626_hash,
95806 + [48842] = &_000627_hash,
95807 + [3910] = &_000629_hash,
95808 + [14717] = &_000630_hash,
95809 + [36112] = &_000631_hash,
95810 + [33984] = &_000632_hash,
95811 + [45039] = &_000633_hash,
95812 + [6724] = &_000634_hash,
95813 + [57003] = &_000635_hash,
95814 + [61168] = &_000637_hash,
95815 + [1135] = &_000638_hash,
95816 + [37519] = &_000639_hash,
95817 + [36132] = &_000640_hash,
95818 + [58700] = &_000641_hash,
95819 + [30352] = &_000642_hash,
95820 + [32308] = &_000643_hash,
95821 + [20751] = &_000644_hash,
95822 + [28849] = &_000645_hash,
95823 + [20737] = &_000646_hash,
95824 + [9671] = &_000648_hash,
95825 + [46184] = &_000649_hash,
95826 + [56348] = &_000650_hash,
95827 + [53735] = &_000651_hash,
95828 + [48528] = &_000652_hash,
95829 + [60621] = &_000653_hash,
95830 + [64367] = &_000654_hash,
95831 + [62671] = &_000655_hash,
95832 + [50803] = &_000656_hash,
95833 + [54837] = &_000657_hash,
95834 + [26928] = &_000658_hash,
95835 + [8395] = &_000659_hash,
95836 + [3034] = &_000660_hash,
95837 + [62573] = &_000661_hash,
95838 + [59346] = &_000662_hash,
95839 + [12135] = &_000663_hash,
95840 + [2733] = &_000664_hash,
95841 + [31372] = &_000665_hash,
95842 + [23984] = &_000666_hash,
95843 + [22049] = &_000667_hash,
95844 + [11828] = &_000668_hash,
95845 + [22404] = &_000669_hash,
95846 + [7685] = &_000671_hash,
95847 + [37422] = &_000672_hash,
95848 + [36311] = &_000674_hash,
95849 + [27643] = &_000675_hash,
95850 + [14273] = &_000676_hash,
95851 + [21310] = &_000677_hash,
95852 + [54703] = &_000678_hash,
95853 + [49831] = &_000679_hash,
95854 + [24052] = &_000680_hash,
95855 + [38037] = &_000681_hash,
95856 + [1075] = &_000682_hash,
95857 + [62955] = &_000683_hash,
95858 + [20985] = &_000684_hash,
95859 + [14208] = &_000685_hash,
95860 + [53626] = &_000686_hash,
95861 + [34532] = &_000687_hash,
95862 + [49575] = &_000688_hash,
95863 + [41283] = &_000689_hash,
95864 + [44667] = &_000690_hash,
95865 + [46698] = &_000691_hash,
95866 + [59670] = &_000692_hash,
95867 + [54343] = &_000693_hash,
95868 + [17269] = &_000694_hash,
95869 + [15093] = &_000695_hash,
95870 + [64490] = &_000697_hash,
95871 + [30030] = &_000698_hash,
95872 + [7203] = &_000699_hash,
95873 + [15345] = &_000700_hash,
95874 + [39151] = &_000701_hash,
95875 + [64171] = &_000704_hash,
95876 + [51337] = &_000708_hash,
95877 + [3566] = &_000709_hash,
95878 + [45775] = &_000710_hash,
95879 + [62186] = &_000711_hash,
95880 + [48698] = &_000712_hash,
95881 + [62396] = &_000713_hash,
95882 + [54291] = &_000714_hash,
95883 + [64862] = &_000715_hash,
95884 + [20948] = &_000716_hash,
95885 + [54103] = &_000717_hash,
95886 + [50090] = &_000718_hash,
95887 + [9194] = &_000719_hash,
95888 + [20537] = &_000720_hash,
95889 + [49392] = &_000722_hash,
95890 + [7519] = &_000723_hash,
95891 + [31617] = &_000725_hash,
95892 + [3311] = &_000726_hash,
95893 + [10165] = &_000727_hash,
95894 + [46094] = &_000728_hash,
95895 + [21677] = &_000729_hash,
95896 + [13443] = &_000730_hash,
95897 + [21153] = &_000731_hash,
95898 + [43440] = &_000732_hash,
95899 + [17347] = &_000733_hash,
95900 + [12257] = &_000734_hash,
95901 + [65483] = &_000735_hash,
95902 + [4722] = &_000736_hash,
95903 + [61917] = &_000737_hash,
95904 + [38644] = &_000739_hash,
95905 + [62770] = &_000740_hash,
95906 + [36155] = &_000741_hash,
95907 + [36481] = &_000742_hash,
95908 + [37660] = &_000743_hash,
95909 + [47379] = &_000744_hash,
95910 + [51424] = &_000746_hash,
95911 + [28338] = &_000748_hash,
95912 + [9431] = &_000749_hash,
95913 + [9893] = &_000750_hash,
95914 + [12964] = &_000751_hash,
95915 + [42643] = &_000752_hash,
95916 + [43806] = &_000753_hash,
95917 + [63720] = &_000754_hash,
95918 + [49839] = &_000755_hash,
95919 + [8334] = &_000756_hash,
95920 + [13666] = &_000757_hash,
95921 + [8570] = &_000758_hash,
95922 + [64541] = &_000759_hash,
95923 + [22961] = &_000760_hash,
95924 + [9110] = &_000761_hash,
95925 + [950] = &_000762_hash,
95926 + [38176] = &_000763_hash,
95927 + [50478] = &_000765_hash,
95928 + [62488] = &_000766_hash,
95929 + [54258] = &_000767_hash,
95930 + [56515] = &_000768_hash,
95931 + [57] = &_000770_hash,
95932 + [19332] = &_000771_hash,
95933 + [41078] = &_000772_hash,
95934 + [19852] = &_000773_hash,
95935 + [50198] = &_000774_hash,
95936 + [50318] = &_000776_hash,
95937 + [19109] = &_000777_hash,
95938 + [32880] = &_000778_hash,
95939 + [34641] = &_000779_hash,
95940 + [61621] = &_000780_hash,
95941 + [11329] = &_000781_hash,
95942 + [24645] = &_000782_hash,
95943 + [57835] = &_000783_hash,
95944 + [22861] = &_000784_hash,
95945 + [39155] = &_000785_hash,
95946 + [19064] = &_000786_hash,
95947 + [31244] = &_000787_hash,
95948 + [18048] = &_000788_hash,
95949 + [55134] = &_000790_hash,
95950 + [25277] = &_000791_hash,
95951 + [60483] = &_000792_hash,
95952 + [47024] = &_000793_hash,
95953 + [56453] = &_000794_hash,
95954 + [28053] = &_000796_hash,
95955 + [24007] = &_000798_hash,
95956 + [25747] = &_000799_hash,
95957 + [36746] = &_000800_hash,
95958 + [23447] = &_000802_hash,
95959 + [56058] = &_000803_hash,
95960 + [12179] = &_000804_hash,
95961 + [3021] = &_000805_hash,
95962 + [11398] = &_000806_hash,
95963 + [50084] = &_000807_hash,
95964 + [7708] = &_000808_hash,
95965 + [6112] = &_000809_hash,
95966 + [45679] = &_000810_hash,
95967 + [32521] = &_000811_hash,
95968 + [50831] = &_000812_hash,
95969 + [38390] = &_000814_hash,
95970 + [54377] = &_000815_hash,
95971 + [25910] = &_000816_hash,
95972 + [1387] = &_000817_hash,
95973 + [55215] = &_000818_hash,
95974 + [51849] = &_000819_hash,
95975 + [9604] = &_000820_hash,
95976 + [33551] = &_000821_hash,
95977 + [51275] = &_000822_hash,
95978 + [45718] = &_000823_hash,
95979 + [754] = &_000824_hash,
95980 + [51430] = &_000825_hash,
95981 + [10379] = &_000826_hash,
95982 + [2109] = &_000827_hash,
95983 + [31801] = &_000828_hash,
95984 + [5941] = &_000829_hash,
95985 + [54846] = &_000830_hash,
95986 + [63388] = &_000831_hash,
95987 + [19485] = &_000832_hash,
95988 + [46892] = &_000833_hash,
95989 + [30456] = &_000834_hash,
95990 + [57412] = &_000835_hash,
95991 + [47605] = &_000836_hash,
95992 + [31680] = &_000837_hash,
95993 + [64712] = &_000838_hash,
95994 + [48868] = &_000839_hash,
95995 + [9438] = &_000840_hash,
95996 + [18775] = &_000841_hash,
95997 + [48014] = &_000842_hash,
95998 + [9075] = &_000843_hash,
95999 + [41746] = &_000844_hash,
96000 + [54793] = &_000845_hash,
96001 + [15981] = &_000846_hash,
96002 + [9559] = &_000847_hash,
96003 + [27509] = &_000848_hash,
96004 + [7471] = &_000849_hash,
96005 + [61100] = &_000850_hash,
96006 + [16003] = &_000851_hash,
96007 + [33714] = &_000852_hash,
96008 + [51665] = &_000853_hash,
96009 + [24398] = &_000854_hash,
96010 + [59833] = &_000855_hash,
96011 + [55862] = &_000856_hash,
96012 + [37420] = &_000857_hash,
96013 + [4874] = &_000858_hash,
96014 + [7024] = &_000860_hash,
96015 + [35351] = &_000861_hash,
96016 + [34547] = &_000862_hash,
96017 + [12579] = &_000865_hash,
96018 + [46328] = &_000866_hash,
96019 + [26483] = &_000868_hash,
96020 + [1196] = &_000869_hash,
96021 + [25714] = &_000870_hash,
96022 + [50186] = &_000871_hash,
96023 + [60202] = &_000872_hash,
96024 + [63138] = &_000873_hash,
96025 + [19065] = &_000874_hash,
96026 + [46860] = &_000875_hash,
96027 + [6924] = &_000876_hash,
96028 + [815] = &_000877_hash,
96029 + [64130] = &_000878_hash,
96030 + [48187] = &_000880_hash,
96031 + [39188] = &_000881_hash,
96032 + [132] = &_000883_hash,
96033 + [60165] = &_000884_hash,
96034 + [57515] = &_000885_hash,
96035 + [21240] = &_000886_hash,
96036 + [31183] = &_000887_hash,
96037 + [38859] = &_000888_hash,
96038 + [11172] = &_000889_hash,
96039 + [12187] = &_000890_hash,
96040 + [40199] = &_000892_hash,
96041 + [57953] = &_000893_hash,
96042 + [28867] = &_000895_hash,
96043 + [54036] = &_000896_hash,
96044 + [4388] = &_000897_hash,
96045 + [38563] = &_000898_hash,
96046 + [42047] = &_000899_hash,
96047 + [29983] = &_000900_hash,
96048 + [26650] = &_000901_hash,
96049 + [29236] = &_000902_hash,
96050 + [49390] = &_000903_hash,
96051 + [50425] = &_000904_hash,
96052 + [15193] = &_000905_hash,
96053 + [38510] = &_000906_hash,
96054 + [58292] = &_000907_hash,
96055 + [54913] = &_000908_hash,
96056 + [38683] = &_000910_hash,
96057 + [45863] = &_000911_hash,
96058 + [54130] = &_000912_hash,
96059 + [41542] = &_000913_hash,
96060 + [23610] = &_000914_hash,
96061 + [39226] = &_000916_hash,
96062 + [36147] = &_000917_hash,
96063 + [37876] = &_000918_hash,
96064 + [12295] = &_000919_hash,
96065 + [11686] = &_000920_hash,
96066 + [17320] = &_000921_hash,
96067 + [45428] = &_000922_hash,
96068 + [51088] = &_000924_hash,
96069 + [37311] = &_000925_hash,
96070 + [56933] = &_000926_hash,
96071 + [41855] = &_000927_hash,
96072 + [16969] = &_000928_hash,
96073 + [20399] = &_000929_hash,
96074 + [3233] = &_000930_hash,
96075 + [31140] = &_000931_hash,
96076 + [37489] = &_000932_hash,
96077 + [11510] = &_000933_hash,
96078 + [18226] = &_000934_hash,
96079 + [42792] = &_000935_hash,
96080 + [10009] = &_000936_hash,
96081 + [37611] = &_000937_hash,
96082 + [48704] = &_000938_hash,
96083 + [11106] = &_000939_hash,
96084 + [63555] = &_000940_hash,
96085 + [35003] = &_000941_hash,
96086 + [50411] = &_000943_hash,
96087 + [25286] = &_000944_hash,
96088 + [48662] = &_000945_hash,
96089 + [29566] = &_000946_hash,
96090 + [23485] = &_000947_hash,
96091 + [53282] = &_000948_hash,
96092 + [24987] = &_000950_hash,
96093 + [62646] = &_000951_hash,
96094 + [1073] = &_000952_hash,
96095 + [52259] = &_000953_hash,
96096 + [142] = &_000956_hash,
96097 + [22305] = &_000957_hash,
96098 + [56753] = &_000958_hash,
96099 + [41035] = &_000959_hash,
96100 + [48097] = &_000960_hash,
96101 + [28102] = &_000961_hash,
96102 + [18094] = &_000962_hash,
96103 + [23918] = &_000963_hash,
96104 + [16405] = &_000964_hash,
96105 + [15104] = &_000965_hash,
96106 + [14416] = &_000967_hash,
96107 + [47750] = &_000968_hash,
96108 + [63806] = &_000969_hash,
96109 + [60961] = &_000970_hash,
96110 + [23110] = &_000972_hash,
96111 + [17595] = &_000973_hash,
96112 + [13417] = &_000975_hash,
96113 + [35324] = &_000976_hash,
96114 + [31919] = &_000977_hash,
96115 + [32866] = &_000978_hash,
96116 + [19536] = &_000979_hash,
96117 + [59130] = &_000980_hash,
96118 + [2454] = &_000981_hash,
96119 + [64800] = &_000982_hash,
96120 + [30208] = &_000983_hash,
96121 + [9990] = &_000984_hash,
96122 + [62868] = &_000985_hash,
96123 + [23314] = &_000986_hash,
96124 + [53656] = &_000987_hash,
96125 + [38060] = &_000988_hash,
96126 + [49829] = &_000989_hash,
96127 + [41442] = &_000991_hash,
96128 + [34022] = &_000993_hash,
96129 + [46865] = &_000994_hash,
96130 + [21604] = &_000995_hash,
96131 + [19564] = &_000996_hash,
96132 + [64521] = &_000997_hash,
96133 + [2166] = &_000998_hash,
96134 + [53676] = &_000999_hash,
96135 + [45080] = &_001000_hash,
96136 + [17878] = &_001001_hash,
96137 + [57630] = &_001003_hash,
96138 + [54352] = &_001004_hash,
96139 + [38986] = &_001005_hash,
96140 + [17607] = &_001007_hash,
96141 + [31581] = &_001008_hash,
96142 + [10594] = &_001009_hash,
96143 + [33475] = &_001012_hash,
96144 + [28885] = &_001013_hash,
96145 + [18176] = &_001015_hash,
96146 + [3426] = &_001016_hash,
96147 + [36598] = &_001017_hash,
96148 + [38428] = &_001018_hash,
96149 + [22948] = &_001019_hash,
96150 + [62524] = &_001020_hash,
96151 + [47394] = &_001021_hash,
96152 + [496] = &_001022_hash,
96153 + [46510] = &_001023_hash,
96154 + [8360] = &_001024_hash,
96155 + [5204] = &_001026_hash,
96156 + [48284] = &_001027_hash,
96157 + [12785] = &_001028_hash,
96158 + [8219] = &_001030_hash,
96159 + [55716] = &_001031_hash,
96160 + [9864] = &_001032_hash,
96161 + [7659] = &_001033_hash,
96162 + [12640] = &_001035_hash,
96163 + [48039] = &_001036_hash,
96164 + [56420] = &_001037_hash,
96165 + [47533] = &_001038_hash,
96166 + [26213] = &_001040_hash,
96167 + [51003] = &_001042_hash,
96168 + [58267] = &_001043_hash,
96169 + [8556] = &_001044_hash,
96170 + [14652] = &_001045_hash,
96171 + [56103] = &_001046_hash,
96172 + [12061] = &_001047_hash,
96173 + [19298] = &_001048_hash,
96174 + [44813] = &_001049_hash,
96175 + [46160] = &_001050_hash,
96176 + [26795] = &_001051_hash,
96177 + [9657] = &_001052_hash,
96178 + [58803] = &_001053_hash,
96179 + [23078] = &_001054_hash,
96180 + [60010] = &_001055_hash,
96181 + [35425] = &_001056_hash,
96182 + [3102] = &_001057_hash,
96183 + [33678] = &_001058_hash,
96184 + [4605] = &_001059_hash,
96185 + [55007] = &_001060_hash,
96186 + [29214] = &_001061_hash,
96187 + [10517] = &_001062_hash,
96188 + [45510] = &_001063_hash,
96189 + [26540] = &_001064_hash,
96190 + [64244] = &_001065_hash,
96191 + [58009] = &_001066_hash,
96192 + [42652] = &_001067_hash,
96193 + [21681] = &_001068_hash,
96194 + [52735] = &_001069_hash,
96195 + [64505] = &_001070_hash,
96196 + [29147] = &_001071_hash,
96197 + [56286] = &_001072_hash,
96198 + [61650] = &_001073_hash,
96199 + [37661] = &_001074_hash,
96200 + [20249] = &_001075_hash,
96201 + [21553] = &_001076_hash,
96202 + [50286] = &_001077_hash,
96203 + [19780] = &_001078_hash,
96204 + [9627] = &_001079_hash,
96205 + [41815] = &_001081_hash,
96206 + [63041] = &_001082_hash,
96207 + [9522] = &_001083_hash,
96208 + [61919] = &_001084_hash,
96209 + [44788] = &_001085_hash,
96210 + [6073] = &_001086_hash,
96211 + [22631] = &_001087_hash,
96212 + [36446] = &_001088_hash,
96213 + [62047] = &_001089_hash,
96214 + [19839] = &_001091_hash,
96215 + [3492] = &_001092_hash,
96216 + [20724] = &_001093_hash,
96217 + [59038] = &_001094_hash,
96218 + [51704] = &_001095_hash,
96219 + [11353] = &_001096_hash,
96220 + [28800] = &_001097_hash,
96221 + [55195] = &_001098_hash,
96222 + [45060] = &_001099_hash,
96223 + [40715] = &_001100_hash,
96224 + [46582] = &_001101_hash,
96225 + [56458] = &_001102_hash,
96226 + [48141] = &_001104_hash,
96227 + [14987] = &_001105_hash,
96228 + [30581] = &_001106_hash,
96229 + [4502] = &_001107_hash,
96230 + [55757] = &_001108_hash,
96231 + [12129] = &_001109_hash,
96232 + [62411] = &_001110_hash,
96233 + [60179] = &_001111_hash,
96234 + [51724] = &_001112_hash,
96235 + [11843] = &_001113_hash,
96236 + [60420] = &_001114_hash,
96237 + [11649] = &_001115_hash,
96238 + [20891] = &_001116_hash,
96239 + [4682] = &_001117_hash,
96240 + [52014] = &_001118_hash,
96241 + [58624] = &_001121_hash,
96242 + [42095] = &_001122_hash,
96243 + [30290] = &_001123_hash,
96244 + [7396] = &_001124_hash,
96245 + [58135] = &_001125_hash,
96246 + [48668] = &_001127_hash,
96247 + [49561] = &_001129_hash,
96248 + [28351] = &_001130_hash,
96249 + [30218] = &_001131_hash,
96250 + [42533] = &_001133_hash,
96251 + [38784] = &_001134_hash,
96252 + [28153] = &_001135_hash,
96253 + [17820] = &_001136_hash,
96254 + [45298] = &_001137_hash,
96255 + [38108] = &_001138_hash,
96256 + [25505] = &_001139_hash,
96257 + [25106] = &_001140_hash,
96258 + [28008] = &_001141_hash,
96259 + [43732] = &_001142_hash,
96260 + [13111] = &_001143_hash,
96261 + [31753] = &_001144_hash,
96262 + [14283] = &_001145_hash,
96263 + [40948] = &_001146_hash,
96264 + [50088] = &_001147_hash,
96265 + [5102] = &_001148_hash,
96266 + [16944] = &_001149_hash,
96267 + [45223] = &_001150_hash,
96268 + [5518] = &_001151_hash,
96269 + [35651] = &_001152_hash,
96270 + [41656] = &_001153_hash,
96271 + [59440] = &_001154_hash,
96272 + [44227] = &_001155_hash,
96273 + [17920] = &_001156_hash,
96274 + [26760] = &_001157_hash,
96275 + [565] = &_001159_hash,
96276 + [57168] = &_001160_hash,
96277 + [60209] = &_001161_hash,
96278 + [1974] = &_001162_hash,
96279 + [16874] = &_001163_hash,
96280 + [47181] = &_001164_hash,
96281 + [61050] = &_001165_hash,
96282 + [10799] = &_001166_hash,
96283 + [15291] = &_001167_hash,
96284 + [41407] = &_001168_hash,
96285 + [49736] = &_001169_hash,
96286 + [57116] = &_001170_hash,
96287 + [4889] = &_001171_hash,
96288 + [33708] = &_001172_hash,
96289 + [27833] = &_001173_hash,
96290 + [4532] = &_001174_hash,
96291 + [61177] = &_001175_hash,
96292 + [57661] = &_001176_hash,
96293 + [32046] = &_001177_hash,
96294 + [39457] = &_001178_hash,
96295 + [20809] = &_001179_hash,
96296 + [37880] = &_001180_hash,
96297 + [32342] = &_001181_hash,
96298 + [54360] = &_001182_hash,
96299 + [63941] = &_001183_hash,
96300 + [52333] = &_001184_hash,
96301 + [10903] = &_001185_hash,
96302 + [50991] = &_001186_hash,
96303 + [37920] = &_001187_hash,
96304 + [5957] = &_001188_hash,
96305 + [50774] = &_001189_hash,
96306 + [49407] = &_001190_hash,
96307 + [20167] = &_001191_hash,
96308 + [15642] = &_001192_hash,
96309 + [39531] = &_001194_hash,
96310 + [64336] = &_001195_hash,
96311 + [3100] = &_001196_hash,
96312 + [5494] = &_001197_hash,
96313 + [59810] = &_001198_hash,
96314 + [48525] = &_001199_hash,
96315 + [62313] = &_001201_hash,
96316 + [14479] = &_001202_hash,
96317 + [26485] = &_001204_hash,
96318 + [45699] = &_001205_hash,
96319 + [61455] = &_001206_hash,
96320 + [645] = &_001207_hash,
96321 + [2481] = &_001208_hash,
96322 + [65444] = &_001209_hash,
96323 + [9656] = &_001210_hash,
96324 + [20836] = &_001211_hash,
96325 + [38725] = &_001212_hash,
96326 + [19510] = &_001213_hash,
96327 + [42703] = &_001214_hash,
96328 + [31948] = &_001215_hash,
96329 + [3585] = &_001216_hash,
96330 + [26554] = &_001218_hash,
96331 + [27062] = &_001219_hash,
96332 + [6963] = &_001220_hash,
96333 + [59470] = &_001221_hash,
96334 + [15464] = &_001223_hash,
96335 + [58354] = &_001224_hash,
96336 + [40915] = &_001225_hash,
96337 + [7752] = &_001226_hash,
96338 + [55462] = &_001227_hash,
96339 + [47421] = &_001228_hash,
96340 + [23424] = &_001229_hash,
96341 + [8858] = &_001230_hash,
96342 + [56725] = &_001231_hash,
96343 + [2482] = &_001232_hash,
96344 + [48056] = &_001233_hash,
96345 + [32199] = &_001234_hash,
96346 + [12487] = &_001235_hash,
96347 + [10997] = &_001236_hash,
96348 + [46811] = &_001237_hash,
96349 + [55845] = &_001238_hash,
96350 + [21785] = &_001239_hash,
96351 + [64288] = &_001240_hash,
96352 + [32400] = &_001241_hash,
96353 + [12384] = &_001242_hash,
96354 + [46826] = &_001243_hash,
96355 + [63902] = &_001244_hash,
96356 + [62839] = &_001245_hash,
96357 + [1475] = &_001246_hash,
96358 + [57903] = &_001247_hash,
96359 + [39034] = &_001248_hash,
96360 + [49744] = &_001249_hash,
96361 + [58271] = &_001250_hash,
96362 + [55362] = &_001251_hash,
96363 + [42030] = &_001252_hash,
96364 + [17594] = &_001253_hash,
96365 + [15360] = &_001254_hash,
96366 + [8218] = &_001255_hash,
96367 + [45201] = &_001256_hash,
96368 + [54941] = &_001257_hash,
96369 + [24177] = &_001258_hash,
96370 + [27346] = &_001259_hash,
96371 + [9470] = &_001260_hash,
96372 + [39427] = &_001261_hash,
96373 + [5329] = &_001262_hash,
96374 + [11410] = &_001263_hash,
96375 + [65397] = &_001264_hash,
96376 + [12127] = &_001265_hash,
96377 + [7776] = &_001266_hash,
96378 + [51475] = &_001268_hash,
96379 + [36450] = &_001269_hash,
96380 + [52824] = &_001270_hash,
96381 + [18780] = &_001271_hash,
96382 + [15382] = &_001272_hash,
96383 + [51320] = &_001273_hash,
96384 + [19140] = &_001274_hash,
96385 + [38071] = &_001275_hash,
96386 + [10747] = &_001276_hash,
96387 + [21371] = &_001277_hash,
96388 + [16399] = &_001279_hash,
96389 + [35521] = &_001280_hash,
96390 + [17325] = &_001281_hash,
96391 + [14267] = &_001284_hash,
96392 + [49836] = &_001285_hash,
96393 + [32101] = &_001286_hash,
96394 + [43564] = &_001287_hash,
96395 + [60515] = &_001288_hash,
96396 + [31221] = &_001289_hash,
96397 + [46655] = &_001290_hash,
96398 + [46525] = &_001291_hash,
96399 + [41719] = &_001292_hash,
96400 + [64751] = &_001293_hash,
96401 + [47733] = &_001294_hash,
96402 + [9778] = &_001295_hash,
96403 + [28670] = &_001296_hash,
96404 + [16772] = &_001297_hash,
96405 + [13529] = &_001298_hash,
96406 + [60347] = &_001299_hash,
96407 + [59304] = &_001300_hash,
96408 + [64379] = &_001301_hash,
96409 + [54968] = &_001302_hash,
96410 + [24790] = &_001303_hash,
96411 + [13589] = &_001304_hash,
96412 + [32010] = &_001305_hash,
96413 + [53634] = &_001306_hash,
96414 + [31270] = &_001307_hash,
96415 + [17005] = &_001308_hash,
96416 + [32632] = &_001309_hash,
96417 + [12930] = &_001311_hash,
96418 + [35029] = &_001312_hash,
96419 + [8321] = &_001313_hash,
96420 + [52367] = &_001315_hash,
96421 + [27308] = &_001316_hash,
96422 + [26164] = &_001317_hash,
96423 + [57432] = &_001318_hash,
96424 + [42781] = &_001319_hash,
96425 + [6985] = &_001320_hash,
96426 + [80] = &_001321_hash,
96427 + [22735] = &_001322_hash,
96428 + [30091] = &_001324_hash,
96429 + [40038] = &_001325_hash,
96430 + [24786] = &_001326_hash,
96431 + [33204] = &_001327_hash,
96432 + [14536] = &_001328_hash,
96433 + [17914] = &_001329_hash,
96434 + [9743] = &_001330_hash,
96435 + [56369] = &_001331_hash,
96436 + [48508] = &_001332_hash,
96437 + [16406] = &_001333_hash,
96438 + [56662] = &_001334_hash,
96439 + [16814] = &_001335_hash,
96440 + [14096] = &_001337_hash,
96441 + [43518] = &_001338_hash,
96442 + [37227] = &_001339_hash,
96443 + [9766] = &_001340_hash,
96444 + [16280] = &_001341_hash,
96445 + [11684] = &_001342_hash,
96446 + [51040] = &_001343_hash,
96447 + [55643] = &_001344_hash,
96448 + [9840] = &_001345_hash,
96449 + [4303] = &_001346_hash,
96450 + [56702] = &_001348_hash,
96451 + [6622] = &_001349_hash,
96452 + [40775] = &_001350_hash,
96453 + [18322] = &_001351_hash,
96454 + [62883] = &_001352_hash,
96455 + [25420] = &_001353_hash,
96456 + [19509] = &_001354_hash,
96457 + [36621] = &_001355_hash,
96458 + [59323] = &_001356_hash,
96459 + [6238] = &_001357_hash,
96460 + [55137] = &_001358_hash,
96461 + [46469] = &_001359_hash,
96462 + [40672] = &_001360_hash,
96463 + [18625] = &_001361_hash,
96464 + [16134] = &_001362_hash,
96465 + [62581] = &_001363_hash,
96466 + [2570] = &_001364_hash,
96467 + [22457] = &_001365_hash,
96468 + [48310] = &_001366_hash,
96469 + [6792] = &_001367_hash,
96470 + [9273] = &_001368_hash,
96471 + [32458] = &_001369_hash,
96472 + [59650] = &_001370_hash,
96473 + [15752] = &_001371_hash,
96474 + [42038] = &_001373_hash,
96475 + [36510] = &_001374_hash,
96476 + [27525] = &_001375_hash,
96477 + [2097] = &_001376_hash,
96478 + [43829] = &_001377_hash,
96479 + [13991] = &_001378_hash,
96480 + [32531] = &_001379_hash,
96481 + [63896] = &_001380_hash,
96482 + [13252] = &_001382_hash,
96483 + [53483] = &_001385_hash,
96484 + [56711] = &_001386_hash,
96485 + [8328] = &_001387_hash,
96486 + [1362] = &_001388_hash,
96487 + [35159] = &_001389_hash,
96488 + [51647] = &_001390_hash,
96489 + [36671] = &_001391_hash,
96490 + [196] = &_001392_hash,
96491 + [50356] = &_001395_hash,
96492 + [58806] = &_001396_hash,
96493 + [49767] = &_001397_hash,
96494 + [45206] = &_001398_hash,
96495 + [6159] = &_001400_hash,
96496 + [24899] = &_001401_hash,
96497 + [38415] = &_001402_hash,
96498 + [41359] = &_001403_hash,
96499 + [42048] = &_001404_hash,
96500 + [62020] = &_001405_hash,
96501 + [62107] = &_001406_hash,
96502 + [17048] = &_001407_hash,
96503 + [10182] = &_001408_hash,
96504 + [35913] = &_001409_hash,
96505 + [36853] = &_001410_hash,
96506 + [64418] = &_001411_hash,
96507 + [13438] = &_001412_hash,
96508 + [20646] = &_001413_hash,
96509 + [56128] = &_001414_hash,
96510 + [41373] = &_001415_hash,
96511 + [35993] = &_001416_hash,
96512 + [2308] = &_001417_hash,
96513 + [13337] = &_001418_hash,
96514 + [24869] = &_001419_hash,
96515 + [37327] = &_001420_hash,
96516 + [51801] = &_001421_hash,
96517 + [57669] = &_001422_hash,
96518 + [7917] = &_001423_hash,
96519 + [1092] = &_001424_hash,
96520 + [28185] = &_001425_hash,
96521 + [36513] = &_001426_hash,
96522 + [58056] = &_001427_hash,
96523 + [14976] = &_001428_hash,
96524 + [22896] = &_001429_hash,
96525 + [54166] = &_001430_hash,
96526 + [19736] = &_001431_hash,
96527 + [60916] = &_001433_hash,
96528 + [62498] = &_001434_hash,
96529 + [48501] = &_001435_hash,
96530 + [52863] = &_001436_hash,
96531 + [47123] = &_001437_hash,
96532 + [2868] = &_001438_hash,
96533 + [27805] = &_001439_hash,
96534 + [9337] = &_001441_hash,
96535 + [23179] = &_001442_hash,
96536 + [55719] = &_001443_hash,
96537 + [17283] = &_001444_hash,
96538 + [22859] = &_001445_hash,
96539 + [39616] = &_001446_hash,
96540 + [52089] = &_001447_hash,
96541 + [49572] = &_001448_hash,
96542 + [63631] = &_001449_hash,
96543 + [170] = &_001450_hash,
96544 + [54737] = &_001451_hash,
96545 + [18207] = &_001452_hash,
96546 + [52032] = &_001454_hash,
96547 + [13634] = &_001455_hash,
96548 + [27364] = &_001456_hash,
96549 + [12589] = &_001457_hash,
96550 + [17276] = &_001458_hash,
96551 + [14549] = &_001459_hash,
96552 + [37928] = &_001460_hash,
96553 + [62034] = &_001461_hash,
96554 + [27142] = &_001462_hash,
96555 + [5662] = &_001463_hash,
96556 + [54851] = &_001464_hash,
96557 + [54978] = &_001465_hash,
96558 + [49136] = &_001466_hash,
96559 + [36589] = &_001467_hash,
96560 + [51735] = &_001469_hash,
96561 + [49371] = &_001470_hash,
96562 + [13977] = &_001471_hash,
96563 + [20080] = &_001472_hash,
96564 + [40976] = &_001473_hash,
96565 + [43851] = &_001474_hash,
96566 + [27859] = &_001475_hash,
96567 + [26501] = &_001476_hash,
96568 + [65297] = &_001477_hash,
96569 + [54223] = &_001478_hash,
96570 + [53803] = &_001480_hash,
96571 + [21732] = &_001481_hash,
96572 + [34377] = &_001482_hash,
96573 + [24109] = &_001483_hash,
96574 + [54711] = &_001484_hash,
96575 + [41581] = &_001485_hash,
96576 + [41093] = &_001486_hash,
96577 + [62542] = &_001487_hash,
96578 + [49094] = &_001488_hash,
96579 + [3607] = &_001489_hash,
96580 + [42941] = &_001490_hash,
96581 + [10737] = &_001491_hash,
96582 + [17741] = &_001492_hash,
96583 + [7509] = &_001493_hash,
96584 + [26037] = &_001494_hash,
96585 + [18148] = &_001495_hash,
96586 + [10708] = &_001496_hash,
96587 + [63744] = &_001497_hash,
96588 + [55611] = &_001498_hash,
96589 + [18543] = &_001500_hash,
96590 + [61589] = &_001502_hash,
96591 + [42824] = &_001503_hash,
96592 + [25799] = &_001504_hash,
96593 + [10720] = &_001505_hash,
96594 + [12267] = &_001506_hash,
96595 + [55867] = &_001507_hash,
96596 + [55957] = &_001508_hash,
96597 + [23615] = &_001511_hash,
96598 + [29305] = &_001512_hash,
96599 + [25930] = &_001513_hash,
96600 + [9062] = &_001514_hash,
96601 + [4011] = &_001515_hash,
96602 + [92] = &_001516_hash,
96603 + [63774] = &_001517_hash,
96604 + [44595] = &_001518_hash,
96605 + [63771] = &_001519_hash,
96606 + [36740] = &_001520_hash,
96607 + [10287] = &_001521_hash,
96608 + [37288] = &_001522_hash,
96609 + [16291] = &_001523_hash,
96610 + [35088] = &_001524_hash,
96611 + [56417] = &_001525_hash,
96612 + [11411] = &_001526_hash,
96613 + [2071] = &_001527_hash,
96614 + [25166] = &_001528_hash,
96615 + [49698] = &_001529_hash,
96616 + [37418] = &_001530_hash,
96617 + [14222] = &_001531_hash,
96618 + [44537] = &_001532_hash,
96619 + [19090] = &_001533_hash,
96620 + [4928] = &_001535_hash,
96621 + [60033] = &_001537_hash,
96622 + [35320] = &_001538_hash,
96623 + [8261] = &_001539_hash,
96624 + [20091] = &_001540_hash,
96625 + [16809] = &_001541_hash,
96626 + [63935] = &_001542_hash,
96627 + [51238] = &_001543_hash,
96628 + [2949] = &_001544_hash,
96629 + [46215] = &_001545_hash,
96630 + [20250] = &_001546_hash,
96631 + [44757] = &_001547_hash,
96632 + [33539] = &_001548_hash,
96633 + [5498] = &_001549_hash,
96634 + [40458] = &_001550_hash,
96635 + [50344] = &_001551_hash,
96636 + [17486] = &_001552_hash,
96637 + [57219] = &_001553_hash,
96638 + [33178] = &_001554_hash,
96639 + [3870] = &_001555_hash,
96640 + [15870] = &_001556_hash,
96641 + [49300] = &_001558_hash,
96642 + [27893] = &_001559_hash,
96643 + [63059] = &_001560_hash,
96644 + [8964] = &_001562_hash,
96645 + [45114] = &_001563_hash,
96646 + [57342] = &_001564_hash,
96647 + [32377] = &_001565_hash,
96648 + [34386] = &_001566_hash,
96649 + [31682] = &_001567_hash,
96650 + [51881] = &_001568_hash,
96651 + [39672] = &_001569_hash,
96652 + [8017] = &_001570_hash,
96653 + [6171] = &_001571_hash,
96654 + [20555] = &_001572_hash,
96655 + [32165] = &_001573_hash,
96656 + [8121] = &_001574_hash,
96657 + [8967] = &_001575_hash,
96658 + [59781] = &_001576_hash,
96659 + [17707] = &_001577_hash,
96660 + [45564] = &_001579_hash,
96661 + [23570] = &_001580_hash,
96662 + [54368] = &_001582_hash,
96663 + [38011] = &_001583_hash,
96664 + [25278] = &_001584_hash,
96665 + [4886] = &_001585_hash,
96666 + [12604] = &_001586_hash,
96667 + [10760] = &_001587_hash,
96668 + [15423] = &_001588_hash,
96669 + [3708] = &_001589_hash,
96670 + [4548] = &_001590_hash,
96671 + [61993] = &_001591_hash,
96672 + [4495] = &_001593_hash,
96673 + [8968] = &_001594_hash,
96674 + [31148] = &_001595_hash,
96675 + [6549] = &_001596_hash,
96676 + [54261] = &_001597_hash,
96677 + [48900] = &_001598_hash,
96678 + [51477] = &_001599_hash,
96679 + [5463] = &_001600_hash,
96680 + [5476] = &_001601_hash,
96681 + [20521] = &_001602_hash,
96682 + [59211] = &_001603_hash,
96683 + [12817] = &_001604_hash,
96684 + [44102] = &_001605_hash,
96685 + [36353] = &_001606_hash,
96686 + [44725] = &_001607_hash,
96687 + [12221] = &_001608_hash,
96688 + [12269] = &_001609_hash,
96689 + [54815] = &_001610_hash,
96690 + [19910] = &_001611_hash,
96691 + [10155] = &_001612_hash,
96692 + [32562] = &_001613_hash,
96693 + [39919] = &_001614_hash,
96694 + [48666] = &_001615_hash,
96695 + [8482] = &_001616_hash,
96696 + [58761] = &_001617_hash,
96697 + [31498] = &_001618_hash,
96698 + [43423] = &_001619_hash,
96699 + [6432] = &_001620_hash,
96700 + [48186] = &_001621_hash,
96701 + [29338] = &_001622_hash,
96702 + [51549] = &_001623_hash,
96703 + [22708] = &_001624_hash,
96704 + [8533] = &_001625_hash,
96705 + [17868] = &_001626_hash,
96706 + [8074] = &_001627_hash,
96707 + [3650] = &_001630_hash,
96708 + [41554] = &_001631_hash,
96709 + [2050] = &_001632_hash,
96710 + [44303] = &_001633_hash,
96711 + [10355] = &_001634_hash,
96712 + [61022] = &_001635_hash,
96713 + [14393] = &_001636_hash,
96714 + [25884] = &_001637_hash,
96715 + [48747] = &_001638_hash,
96716 + [26166] = &_001639_hash,
96717 + [25316] = &_001640_hash,
96718 + [29522] = &_001641_hash,
96719 + [24425] = &_001642_hash,
96720 + [2473] = &_001643_hash,
96721 + [43992] = &_001644_hash,
96722 + [13119] = &_001645_hash,
96723 + [57830] = &_001646_hash,
96724 + [30592] = &_001647_hash,
96725 + [44355] = &_001648_hash,
96726 + [47004] = &_001649_hash,
96727 + [10976] = &_001650_hash,
96728 + [19583] = &_001651_hash,
96729 + [50665] = &_001652_hash,
96730 + [51308] = &_001653_hash,
96731 + [46939] = &_001654_hash,
96732 + [4718] = &_001655_hash,
96733 + [56303] = &_001656_hash,
96734 + [1614] = &_001657_hash,
96735 + [1647] = &_001658_hash,
96736 + [920] = &_001659_hash,
96737 + [24308] = &_001660_hash,
96738 + [22395] = &_001661_hash,
96739 + [50683] = &_001662_hash,
96740 + [413] = &_001663_hash,
96741 + [9973] = &_001664_hash,
96742 + [38547] = &_001665_hash,
96743 + [55812] = &_001666_hash,
96744 + [56751] = &_001667_hash,
96745 + [13173] = &_001668_hash,
96746 + [27] = &_001669_hash,
96747 + [47738] = &_001670_hash,
96748 + [19570] = &_001671_hash,
96749 + [45532] = &_001672_hash,
96750 + [47308] = &_001673_hash,
96751 + [24910] = &_001674_hash,
96752 + [1683] = &_001675_hash,
96753 + [8800] = &_001676_hash,
96754 + [2347] = &_001677_hash,
96755 + [45549] = &_001678_hash,
96756 + [25421] = &_001679_hash,
96757 + [64715] = &_001680_hash,
96758 + [45917] = &_001684_hash,
96759 + [38894] = &_001685_hash,
96760 + [62535] = &_001686_hash,
96761 + [18575] = &_001687_hash,
96762 + [19322] = &_001688_hash,
96763 + [18116] = &_001689_hash,
96764 + [1725] = &_001690_hash,
96765 + [12173] = &_001691_hash,
96766 + [52045] = &_001692_hash,
96767 + [65354] = &_001693_hash,
96768 + [35266] = &_001694_hash,
96769 + [46060] = &_001695_hash,
96770 + [39645] = &_001697_hash,
96771 + [17213] = &_001698_hash,
96772 + [54658] = &_001699_hash,
96773 + [5590] = &_001700_hash,
96774 + [17194] = &_001701_hash,
96775 + [34871] = &_001702_hash,
96776 + [20682] = &_001703_hash,
96777 + [43355] = &_001704_hash,
96778 + [40978] = &_001705_hash,
96779 + [21083] = &_001706_hash,
96780 + [48961] = &_001707_hash,
96781 + [61175] = &_001708_hash,
96782 + [10471] = &_001709_hash,
96783 + [40363] = &_001710_hash,
96784 + [38518] = &_001711_hash,
96785 + [25792] = &_001712_hash,
96786 + [19305] = &_001713_hash,
96787 + [60425] = &_001714_hash,
96788 + [35332] = &_001715_hash,
96789 + [61692] = &_001716_hash,
96790 + [32550] = &_001717_hash,
96791 + [61718] = &_001718_hash,
96792 + [20084] = &_001719_hash,
96793 + [49762] = &_001720_hash,
96794 + [32654] = &_001721_hash,
96795 + [36059] = &_001722_hash,
96796 + [50924] = &_001723_hash,
96797 + [55209] = &_001724_hash,
96798 + [52318] = &_001725_hash,
96799 + [42324] = &_001726_hash,
96800 + [57706] = &_001727_hash,
96801 + [28527] = &_001728_hash,
96802 + [63374] = &_001729_hash,
96803 + [6785] = &_001730_hash,
96804 + [61630] = &_001731_hash,
96805 + [1658] = &_001732_hash,
96806 + [48802] = &_001733_hash,
96807 + [46111] = &_001734_hash,
96808 + [3048] = &_001735_hash,
96809 + [36016] = &_001736_hash,
96810 + [37792] = &_001737_hash,
96811 + [38564] = &_001738_hash,
96812 + [49206] = &_001739_hash,
96813 + [56277] = &_001740_hash,
96814 + [61948] = &_001741_hash,
96815 + [12064] = &_001742_hash,
96816 + [38044] = &_001743_hash,
96817 + [25005] = &_001744_hash,
96818 + [42386] = &_001745_hash,
96819 + [40993] = &_001746_hash,
96820 + [57517] = &_001747_hash,
96821 + [3351] = &_001748_hash,
96822 + [59956] = &_001749_hash,
96823 + [12288] = &_001750_hash,
96824 + [43165] = &_001751_hash,
96825 + [33615] = &_001752_hash,
96826 + [19281] = &_001753_hash,
96827 + [59950] = &_001754_hash,
96828 + [11221] = &_001755_hash,
96829 + [15243] = &_001756_hash,
96830 + [2370] = &_001757_hash,
96831 + [62296] = &_001758_hash,
96832 + [21532] = &_001759_hash,
96833 + [58388] = &_001760_hash,
96834 + [55668] = &_001761_hash,
96835 + [32913] = &_001762_hash,
96836 + [57136] = &_001763_hash,
96837 + [59807] = &_001765_hash,
96838 + [35280] = &_001766_hash,
96839 + [16376] = &_001767_hash,
96840 + [53439] = &_001768_hash,
96841 + [41398] = &_001769_hash,
96842 + [49490] = &_001770_hash,
96843 + [8574] = &_001771_hash,
96844 + [48159] = &_001772_hash,
96845 + [34687] = &_001773_hash,
96846 + [54136] = &_001774_hash,
96847 + [16110] = &_001775_hash,
96848 + [9181] = &_001776_hash,
96849 + [64789] = &_001777_hash,
96850 + [30271] = &_001778_hash,
96851 + [38325] = &_001779_hash,
96852 + [43025] = &_001780_hash,
96853 + [31804] = &_001781_hash,
96854 + [35283] = &_001782_hash,
96855 + [16103] = &_001783_hash,
96856 + [23084] = &_001784_hash,
96857 + [49607] = &_001785_hash,
96858 + [57796] = &_001786_hash,
96859 + [35534] = &_001787_hash,
96860 + [46355] = &_001788_hash,
96861 + [6784] = &_001789_hash,
96862 + [2081] = &_001790_hash,
96863 + [35761] = &_001791_hash,
96864 + [879] = &_001792_hash,
96865 + [62628] = &_001793_hash,
96866 + [23574] = &_001794_hash,
96867 + [2107] = &_001795_hash,
96868 + [50584] = &_001796_hash,
96869 + [23845] = &_001797_hash,
96870 + [55407] = &_001798_hash,
96871 + [54392] = &_001799_hash,
96872 + [13943] = &_001800_hash,
96873 + [11753] = &_001801_hash,
96874 + [19205] = &_001802_hash,
96875 + [18708] = &_001803_hash,
96876 + [28832] = &_001804_hash,
96877 + [20795] = &_001805_hash,
96878 + [19943] = &_001806_hash,
96879 + [62687] = &_001807_hash,
96880 + [63116] = &_001808_hash,
96881 + [3038] = &_001809_hash,
96882 + [44505] = &_001810_hash,
96883 + [9309] = &_001811_hash,
96884 + [5171] = &_001812_hash,
96885 + [29224] = &_001813_hash,
96886 + [38779] = &_001814_hash,
96887 + [58870] = &_001815_hash,
96888 + [4635] = &_001816_hash,
96889 + [314] = &_001817_hash,
96890 + [42820] = &_001818_hash,
96891 + [49199] = &_001819_hash,
96892 + [58023] = &_001820_hash,
96893 + [47983] = &_001821_hash,
96894 + [31611] = &_001822_hash,
96895 + [540] = &_001823_hash,
96896 + [17027] = &_001824_hash,
96897 + [35617] = &_001825_hash,
96898 + [57946] = &_001826_hash,
96899 + [20895] = &_001827_hash,
96900 + [3241] = &_001828_hash,
96901 + [62746] = &_001829_hash,
96902 + [12736] = &_001830_hash,
96903 + [4862] = &_001831_hash,
96904 + [39123] = &_001833_hash,
96905 + [65268] = &_001834_hash,
96906 + [49995] = &_001836_hash,
96907 + [27701] = &_001837_hash,
96908 + [30721] = &_001838_hash,
96909 + [51023] = &_001839_hash,
96910 + [31013] = &_001840_hash,
96911 + [45805] = &_001841_hash,
96912 + [418] = &_001842_hash,
96913 + [41431] = &_001843_hash,
96914 + [10840] = &_001844_hash,
96915 + [21046] = &_001845_hash,
96916 + [4415] = &_001846_hash,
96917 + [45752] = &_001847_hash,
96918 + [13556] = &_001848_hash,
96919 + [59766] = &_001849_hash,
96920 + [36303] = &_001850_hash,
96921 + [16566] = &_001851_hash,
96922 + [33943] = &_001853_hash,
96923 + [15948] = &_001854_hash,
96924 + [48301] = &_001855_hash,
96925 + [28061] = &_001856_hash,
96926 + [50334] = &_001857_hash,
96927 + [13950] = &_001858_hash,
96928 + [55662] = &_001859_hash,
96929 + [41010] = &_001860_hash,
96930 + [55609] = &_001861_hash,
96931 + [30629] = &_001862_hash,
96932 + [59700] = &_001863_hash,
96933 + [27972] = &_001864_hash,
96934 + [17290] = &_001865_hash,
96935 + [13205] = &_001866_hash,
96936 + [6841] = &_001867_hash,
96937 + [25238] = &_001868_hash,
96938 + [6228] = &_001870_hash,
96939 + [53074] = &_001871_hash,
96940 + [54269] = &_001872_hash,
96941 + [53447] = &_001873_hash,
96942 + [51429] = &_001874_hash,
96943 + [34472] = &_001875_hash,
96944 + [62605] = &_001876_hash,
96945 + [54577] = &_001877_hash,
96946 + [10660] = &_001878_hash,
96947 + [37851] = &_001879_hash,
96948 + [52105] = &_001880_hash,
96949 + [56405] = &_001881_hash,
96950 + [14507] = &_001882_hash,
96951 + [50656] = &_001883_hash,
96952 + [25127] = &_001884_hash,
96953 + [42182] = &_001886_hash,
96954 + [11582] = &_001887_hash,
96955 + [4204] = &_001888_hash,
96956 + [59990] = &_001889_hash,
96957 + [53486] = &_001890_hash,
96958 + [23850] = &_001891_hash,
96959 + [24954] = &_001892_hash,
96960 + [23346] = &_001893_hash,
96961 + [2124] = &_001894_hash,
96962 + [34734] = &_001895_hash,
96963 + [43875] = &_001896_hash,
96964 + [55891] = &_001897_hash,
96965 + [53873] = &_001898_hash,
96966 + [5904] = &_001899_hash,
96967 + [39153] = &_001900_hash,
96968 + [62043] = &_001901_hash,
96969 + [63344] = &_001902_hash,
96970 + [15631] = &_001903_hash,
96971 + [10173] = &_001904_hash,
96972 + [52186] = &_001905_hash,
96973 + [43614] = &_001906_hash,
96974 + [38094] = &_001907_hash,
96975 + [41105] = &_001909_hash,
96976 + [6699] = &_001910_hash,
96977 + [11776] = &_001911_hash,
96978 + [5361] = &_001912_hash,
96979 + [57288] = &_001913_hash,
96980 + [19918] = &_001914_hash,
96981 + [63362] = &_001915_hash,
96982 + [28924] = &_001916_hash,
96983 + [51669] = &_001917_hash,
96984 + [18006] = &_001918_hash,
96985 + [13176] = &_001919_hash,
96986 + [5324] = &_001920_hash,
96987 + [17686] = &_001921_hash,
96988 + [26627] = &_001922_hash,
96989 + [25824] = &_001923_hash,
96990 + [18355] = &_001924_hash,
96991 + [26935] = &_001925_hash,
96992 + [50505] = &_001926_hash,
96993 + [52836] = &_001927_hash,
96994 + [48423] = &_001928_hash,
96995 + [60851] = &_001929_hash,
96996 + [26321] = &_001930_hash,
96997 + [22640] = &_001931_hash,
96998 + [24877] = &_001932_hash,
96999 + [17277] = &_001933_hash,
97000 + [25919] = &_001934_hash,
97001 + [30212] = &_001935_hash,
97002 + [59327] = &_001936_hash,
97003 + [63486] = &_001937_hash,
97004 + [14041] = &_001938_hash,
97005 + [37744] = &_001939_hash,
97006 + [23161] = &_001940_hash,
97007 + [13574] = &_001941_hash,
97008 + [42168] = &_001942_hash,
97009 + [32595] = &_001943_hash,
97010 + [57406] = &_001944_hash,
97011 + [4180] = &_001945_hash,
97012 + [54367] = &_001946_hash,
97013 + [58256] = &_001947_hash,
97014 + [6536] = &_001948_hash,
97015 + [9530] = &_001949_hash,
97016 + [18766] = &_001950_hash,
97017 + [64001] = &_001951_hash,
97018 + [9948] = &_001953_hash,
97019 + [39909] = &_001954_hash,
97020 + [40895] = &_001955_hash,
97021 + [22854] = &_001956_hash,
97022 + [48232] = &_001957_hash,
97023 + [33370] = &_001958_hash,
97024 + [61742] = &_001959_hash,
97025 + [52310] = &_001960_hash,
97026 + [41605] = &_001961_hash,
97027 + [50111] = &_001962_hash,
97028 + [35795] = &_001963_hash,
97029 + [20697] = &_001964_hash,
97030 + [33944] = &_001965_hash,
97031 + [8959] = &_001966_hash,
97032 + [51847] = &_001967_hash,
97033 + [3829] = &_001968_hash,
97034 + [292] = &_001969_hash,
97035 + [21487] = &_001970_hash,
97036 + [3337] = &_001971_hash,
97037 + [55658] = &_001972_hash,
97038 + [39379] = &_001973_hash,
97039 + [4815] = &_001974_hash,
97040 + [42693] = &_001975_hash,
97041 + [33499] = &_001976_hash,
97042 + [52129] = &_001977_hash,
97043 + [47165] = &_001978_hash,
97044 + [40262] = &_001979_hash,
97045 + [56573] = &_001980_hash,
97046 + [44384] = &_001981_hash,
97047 + [44799] = &_001982_hash,
97048 + [62844] = &_001983_hash,
97049 + [64407] = &_001984_hash,
97050 + [57179] = &_001985_hash,
97051 + [10157] = &_001986_hash,
97052 + [23801] = &_001987_hash,
97053 + [55106] = &_001988_hash,
97054 + [22001] = &_001989_hash,
97055 + [63405] = &_001990_hash,
97056 + [2403] = &_001991_hash,
97057 + [35538] = &_001992_hash,
97058 + [58001] = &_001993_hash,
97059 + [40283] = &_001994_hash,
97060 + [4833] = &_001995_hash,
97061 + [56245] = &_001996_hash,
97062 + [12802] = &_001998_hash,
97063 + [36896] = &_001999_hash,
97064 + [33942] = &_002000_hash,
97065 + [1984] = &_002001_hash,
97066 + [24236] = &_002002_hash,
97067 + [33068] = &_002003_hash,
97068 + [2828] = &_002005_hash,
97069 + [56139] = &_002006_hash,
97070 + [57933] = &_002007_hash,
97071 + [32362] = &_002008_hash,
97072 + [25369] = &_002009_hash,
97073 + [42302] = &_002010_hash,
97074 + [55947] = &_002011_hash,
97075 + [28544] = &_002012_hash,
97076 + [55] = &_002013_hash,
97077 + [37323] = &_002014_hash,
97078 + [52960] = &_002015_hash,
97079 + [4687] = &_002016_hash,
97080 + [24738] = &_002017_hash,
97081 + [17076] = &_002018_hash,
97082 + [48102] = &_002019_hash,
97083 + [5806] = &_002022_hash,
97084 + [61220] = &_002024_hash,
97085 + [15803] = &_002025_hash,
97086 + [30813] = &_002026_hash,
97087 + [37804] = &_002027_hash,
97088 + [3855] = &_002028_hash,
97089 + [22601] = &_002029_hash,
97090 + [6847] = &_002030_hash,
97091 + [20323] = &_002031_hash,
97092 + [45734] = &_002032_hash,
97093 + [56686] = &_002033_hash,
97094 + [28317] = &_002034_hash,
97095 + [39653] = &_002035_hash,
97096 + [58484] = &_002036_hash,
97097 + [14245] = &_002037_hash,
97098 + [6911] = &_002038_hash,
97099 + [14058] = &_002039_hash,
97100 + [17435] = &_002040_hash,
97101 + [56710] = &_002041_hash,
97102 + [10366] = &_002042_hash,
97103 + [19106] = &_002043_hash,
97104 + [1488] = &_002044_hash,
97105 + [51251] = &_002045_hash,
97106 + [10608] = &_002046_hash,
97107 + [42113] = &_002047_hash,
97108 + [45531] = &_002048_hash,
97109 + [17100] = &_002049_hash,
97110 + [41722] = &_002050_hash,
97111 + [50664] = &_002051_hash,
97112 + [24961] = &_002052_hash,
97113 + [28384] = &_002053_hash,
97114 + [62534] = &_002054_hash,
97115 + [44687] = &_002055_hash,
97116 + [12839] = &_002056_hash,
97117 + [31429] = &_002057_hash,
97118 + [40520] = &_002058_hash,
97119 + [8013] = &_002060_hash,
97120 + [10337] = &_002061_hash,
97121 + [47300] = &_002062_hash,
97122 + [1463] = &_002063_hash,
97123 + [44978] = &_002064_hash,
97124 + [40305] = &_002065_hash,
97125 + [5911] = &_002067_hash,
97126 + [35210] = &_002068_hash,
97127 + [56651] = &_002069_hash,
97128 + [8038] = &_002070_hash,
97129 + [33762] = &_002071_hash,
97130 + [12154] = &_002072_hash,
97131 + [40902] = &_002074_hash,
97132 + [20174] = &_002075_hash,
97133 + [58567] = &_002076_hash,
97134 + [43035] = &_002077_hash,
97135 + [41492] = &_002078_hash,
97136 + [13377] = &_002079_hash,
97137 + [18751] = &_002080_hash,
97138 + [20834] = &_002081_hash,
97139 + [13615] = &_002082_hash,
97140 + [29203] = &_002083_hash,
97141 + [51065] = &_002084_hash,
97142 + [27519] = &_002085_hash,
97143 + [41422] = &_002086_hash,
97144 + [40744] = &_002087_hash,
97145 + [51148] = &_002088_hash,
97146 + [7898] = &_002089_hash,
97147 + [23229] = &_002090_hash,
97148 + [29029] = &_002091_hash,
97149 + [825] = &_002092_hash,
97150 + [16576] = &_002093_hash,
97151 + [16756] = &_002094_hash,
97152 + [22053] = &_002095_hash,
97153 + [16227] = &_002097_hash,
97154 + [64441] = &_002098_hash,
97155 + [7091] = &_002099_hash,
97156 + [55761] = &_002100_hash,
97157 + [39479] = &_002101_hash,
97158 + [12316] = &_002102_hash,
97159 + [52518] = &_002103_hash,
97160 + [32241] = &_002104_hash,
97161 + [36540] = &_002105_hash,
97162 + [23699] = &_002106_hash,
97163 + [16056] = &_002107_hash,
97164 + [31112] = &_002108_hash,
97165 + [7787] = &_002109_hash,
97166 + [23104] = &_002110_hash,
97167 + [21516] = &_002111_hash,
97168 + [654] = &_002113_hash,
97169 + [10110] = &_002114_hash,
97170 + [2117] = &_002115_hash,
97171 + [39921] = &_002116_hash,
97172 + [36841] = &_002117_hash,
97173 + [31685] = &_002118_hash,
97174 + [64031] = &_002119_hash,
97175 + [4166] = &_002120_hash,
97176 + [45882] = &_002121_hash,
97177 + [7072] = &_002122_hash,
97178 + [15449] = &_002123_hash,
97179 + [20122] = &_002124_hash,
97180 + [11673] = &_002125_hash,
97181 + [42355] = &_002126_hash,
97182 + [29562] = &_002127_hash,
97183 + [9705] = &_002128_hash,
97184 + [38268] = &_002129_hash,
97185 + [64924] = &_002130_hash,
97186 + [35161] = &_002131_hash,
97187 + [23884] = &_002132_hash,
97188 + [60670] = &_002133_hash,
97189 + [14486] = &_002134_hash,
97190 + [47356] = &_002135_hash,
97191 + [7368] = &_002136_hash,
97192 + [59829] = &_002137_hash,
97193 + [1589] = &_002138_hash,
97194 + [17346] = &_002139_hash,
97195 + [24208] = &_002140_hash,
97196 + [2249] = &_002141_hash,
97197 + [51441] = &_002142_hash,
97198 + [23878] = &_002143_hash,
97199 + [12756] = &_002144_hash,
97200 + [52168] = &_002145_hash,
97201 + [58307] = &_002146_hash,
97202 + [32603] = &_002147_hash,
97203 + [33383] = &_002148_hash,
97204 + [44500] = &_002149_hash,
97205 + [37053] = &_002150_hash,
97206 + [38419] = &_002151_hash,
97207 + [18869] = &_002152_hash,
97208 + [32533] = &_002153_hash,
97209 + [36520] = &_002155_hash,
97210 + [39571] = &_002156_hash,
97211 + [59740] = &_002157_hash,
97212 + [31257] = &_002158_hash,
97213 + [13946] = &_002159_hash,
97214 + [12716] = &_002160_hash,
97215 + [56177] = &_002161_hash,
97216 + [34722] = &_002162_hash,
97217 + [25545] = &_002163_hash,
97218 + [45233] = &_002164_hash,
97219 + [61570] = &_002165_hash,
97220 + [27183] = &_002166_hash,
97221 + [37675] = &_002167_hash,
97222 + [44423] = &_002168_hash,
97223 + [49260] = &_002169_hash,
97224 + [27416] = &_002170_hash,
97225 + [19565] = &_002172_hash,
97226 + [16420] = &_002173_hash,
97227 + [15067] = &_002174_hash,
97228 + [55150] = &_002175_hash,
97229 + [24430] = &_002176_hash,
97230 + [6918] = &_002177_hash,
97231 + [10619] = &_002178_hash,
97232 + [23536] = &_002179_hash,
97233 + [61668] = &_002180_hash,
97234 + [6431] = &_002181_hash,
97235 + [23109] = &_002182_hash,
97236 + [56347] = &_002183_hash,
97237 + [7142] = &_002184_hash,
97238 + [44366] = &_002185_hash,
97239 + [32631] = &_002186_hash,
97240 + [23416] = &_002187_hash,
97241 + [34400] = &_002188_hash,
97242 + [25003] = &_002189_hash,
97243 + [42443] = &_002190_hash,
97244 + [49758] = &_002191_hash,
97245 + [25931] = &_002192_hash,
97246 + [31411] = &_002193_hash,
97247 + [44742] = &_002194_hash,
97248 + [63427] = &_002195_hash,
97249 + [22681] = &_002196_hash,
97250 + [3826] = &_002197_hash,
97251 + [25905] = &_002198_hash,
97252 + [36555] = &_002199_hash,
97253 + [64815] = &_002200_hash,
97254 + [32747] = &_002201_hash,
97255 + [26036] = &_002202_hash,
97256 + [31742] = &_002203_hash,
97257 + [61600] = &_002204_hash,
97258 + [48250] = &_002205_hash,
97259 + [15892] = &_002206_hash,
97260 + [51132] = &_002207_hash,
97261 + [12649] = &_002208_hash,
97262 + [36664] = &_002209_hash,
97263 + [56464] = &_002210_hash,
97264 + [16669] = &_002212_hash,
97265 + [47700] = &_002213_hash,
97266 + [19402] = &_002214_hash,
97267 + [53604] = &_002215_hash,
97268 + [25597] = &_002216_hash,
97269 + [9116] = &_002217_hash,
97270 + [30887] = &_002218_hash,
97271 + [51863] = &_002219_hash,
97272 + [15939] = &_002220_hash,
97273 + [15073] = &_002221_hash,
97274 + [57742] = &_002222_hash,
97275 + [20097] = &_002223_hash,
97276 + [24742] = &_002224_hash,
97277 + [52529] = &_002225_hash,
97278 + [12144] = &_002226_hash,
97279 + [30265] = &_002227_hash,
97280 + [54247] = &_002228_hash,
97281 + [36285] = &_002229_hash,
97282 + [18402] = &_002230_hash,
97283 + [841] = &_002231_hash,
97284 + [29238] = &_002232_hash,
97285 + [48709] = &_002234_hash,
97286 + [30138] = &_002235_hash,
97287 + [41031] = &_002236_hash,
97288 + [6990] = &_002237_hash,
97289 + [46624] = &_002238_hash,
97290 + [24515] = &_002239_hash,
97291 + [2368] = &_002240_hash,
97292 + [26233] = &_002241_hash,
97293 + [49401] = &_002242_hash,
97294 + [55291] = &_002243_hash,
97295 + [18555] = &_002244_hash,
97296 + [42640] = &_002245_hash,
97297 + [47086] = &_002246_hash,
97298 + [33596] = &_002247_hash,
97299 + [1083] = &_002248_hash,
97300 + [59812] = &_002249_hash,
97301 + [44239] = &_002250_hash,
97302 + [23265] = &_002251_hash,
97303 + [3397] = &_002252_hash,
97304 + [24466] = &_002253_hash,
97305 + [16926] = &_002255_hash,
97306 + [20029] = &_002256_hash,
97307 + [14782] = &_002257_hash,
97308 + [25690] = &_002258_hash,
97309 + [31818] = &_002259_hash,
97310 + [45558] = &_002260_hash,
97311 + [28154] = &_002261_hash,
97312 + [43948] = &_002262_hash,
97313 + [33065] = &_002263_hash,
97314 + [22] = &_002264_hash,
97315 + [64971] = &_002265_hash,
97316 + [959] = &_002266_hash,
97317 + [42454] = &_002267_hash,
97318 + [28344] = &_002268_hash,
97319 + [31238] = &_002269_hash,
97320 + [47915] = &_002270_hash,
97321 + [16365] = &_002271_hash,
97322 + [59849] = &_002272_hash,
97323 + [48808] = &_002273_hash,
97324 + [11116] = &_002274_hash,
97325 + [41342] = &_002275_hash,
97326 + [19826] = &_002276_hash,
97327 + [27896] = &_002278_hash,
97328 + [42558] = &_002279_hash,
97329 + [9813] = &_002280_hash,
97330 + [42304] = &_002281_hash,
97331 + [14952] = &_002282_hash,
97332 + [39414] = &_002283_hash,
97333 + [37198] = &_002284_hash,
97334 + [54744] = &_002285_hash,
97335 + [42777] = &_002286_hash,
97336 + [18667] = &_002287_hash,
97337 + [50909] = &_002288_hash,
97338 + [18870] = &_002289_hash,
97339 + [29982] = &_002290_hash,
97340 + [4683] = &_002291_hash,
97341 + [59886] = &_002292_hash,
97342 + [60027] = &_002293_hash,
97343 + [38223] = &_002294_hash,
97344 + [44410] = &_002295_hash,
97345 + [24365] = &_002296_hash,
97346 + [22227] = &_002297_hash,
97347 + [42088] = &_002298_hash,
97348 + [26230] = &_002299_hash,
97349 + [28736] = &_002300_hash,
97350 + [42108] = &_002301_hash,
97351 + [37651] = &_002302_hash,
97352 + [50800] = &_002303_hash,
97353 + [13041] = &_002304_hash,
97354 + [41691] = &_002305_hash,
97355 + [23062] = &_002307_hash,
97356 + [36957] = &_002308_hash,
97357 + [31171] = &_002309_hash,
97358 + [20478] = &_002310_hash,
97359 + [16835] = &_002312_hash,
97360 + [30040] = &_002313_hash,
97361 + [47143] = &_002314_hash,
97362 + [64527] = &_002315_hash,
97363 + [39846] = &_002316_hash,
97364 + [61226] = &_002318_hash,
97365 + [14899] = &_002320_hash,
97366 + [30809] = &_002321_hash,
97367 + [19969] = &_002322_hash,
97368 + [27905] = &_002323_hash,
97369 + [51951] = &_002324_hash,
97370 + [11507] = &_002325_hash,
97371 + [15088] = &_002327_hash,
97372 + [43256] = &_002328_hash,
97373 + [17904] = &_002329_hash,
97374 + [65363] = &_002330_hash,
97375 + [50046] = &_002331_hash,
97376 + [28972] = &_002333_hash,
97377 + [45831] = &_002334_hash,
97378 + [31750] = &_002335_hash,
97379 + [2035] = &_002336_hash,
97380 + [51826] = &_002337_hash,
97381 + [35439] = &_002338_hash,
97382 + [48090] = &_002339_hash,
97383 + [25547] = &_002340_hash,
97384 + [40757] = &_002341_hash,
97385 + [50926] = &_002342_hash,
97386 + [3427] = &_002343_hash,
97387 + [18407] = &_002344_hash,
97388 + [28764] = &_002345_hash,
97389 + [47151] = &_002346_hash,
97390 + [4437] = &_002347_hash,
97391 + [3236] = &_002348_hash,
97392 + [39362] = &_002349_hash,
97393 + [59226] = &_002350_hash,
97394 + [14426] = &_002351_hash,
97395 + [54674] = &_002352_hash,
97396 + [27715] = &_002353_hash,
97397 + [5574] = &_002354_hash,
97398 + [18821] = &_002355_hash,
97399 + [55738] = &_002356_hash,
97400 + [4843] = &_002358_hash,
97401 + [60908] = &_002359_hash,
97402 + [22742] = &_002360_hash,
97403 + [54757] = &_002361_hash,
97404 + [57910] = &_002362_hash,
97405 + [51911] = &_002363_hash,
97406 + [35255] = &_002364_hash,
97407 + [45406] = &_002365_hash,
97408 + [24352] = &_002366_hash,
97409 + [28225] = &_002367_hash,
97410 + [53123] = &_002368_hash,
97411 + [52425] = &_002369_hash,
97412 + [20604] = &_002370_hash,
97413 + [54077] = &_002371_hash,
97414 + [6507] = &_002372_hash,
97415 + [4005] = &_002373_hash,
97416 + [44715] = &_002374_hash,
97417 + [51725] = &_002375_hash,
97418 + [47485] = &_002376_hash,
97419 + [36909] = &_002377_hash,
97420 + [62074] = &_002378_hash,
97421 + [50085] = &_002379_hash,
97422 + [30341] = &_002380_hash,
97423 + [57064] = &_002381_hash,
97424 + [11479] = &_002382_hash,
97425 + [57463] = &_002383_hash,
97426 + [37551] = &_002384_hash,
97427 + [49728] = &_002385_hash,
97428 + [33829] = &_002386_hash,
97429 + [64042] = &_002387_hash,
97430 + [57470] = &_002389_hash,
97431 + [63087] = &_002391_hash,
97432 + [17161] = &_002393_hash,
97433 + [12989] = &_002394_hash,
97434 + [63654] = &_002395_hash,
97435 + [62327] = &_002396_hash,
97436 + [25726] = &_002397_hash,
97437 + [1992] = &_002398_hash,
97438 + [28459] = &_002399_hash,
97439 + [18278] = &_002400_hash,
97440 + [30393] = &_002402_hash,
97441 + [29771] = &_002404_hash,
97442 + [25104] = &_002407_hash,
97443 + [30561] = &_002408_hash,
97444 + [42058] = &_002409_hash,
97445 + [34473] = &_002410_hash,
97446 + [29437] = &_002411_hash,
97447 + [11818] = &_002412_hash,
97448 + [31651] = &_002413_hash,
97449 + [48489] = &_002414_hash,
97450 + [7000] = &_002415_hash,
97451 + [42626] = &_002416_hash,
97452 + [15237] = &_002417_hash,
97453 + [15587] = &_002418_hash,
97454 + [55588] = &_002419_hash,
97455 + [65076] = &_002420_hash,
97456 + [33312] = &_002421_hash,
97457 + [41650] = &_002423_hash,
97458 + [30189] = &_002424_hash,
97459 + [62907] = &_002425_hash,
97460 + [32374] = &_002426_hash,
97461 + [5580] = &_002427_hash,
97462 + [62708] = &_002428_hash,
97463 + [9575] = &_002429_hash,
97464 + [55485] = &_002430_hash,
97465 + [43623] = &_002431_hash,
97466 + [47506] = &_002432_hash,
97467 + [20063] = &_002433_hash,
97468 + [34564] = &_002434_hash,
97469 + [2919] = &_002435_hash,
97470 + [22399] = &_002436_hash,
97471 + [51136] = &_002437_hash,
97472 + [18016] = &_002438_hash,
97473 + [43300] = &_002439_hash,
97474 + [48827] = &_002440_hash,
97475 + [36228] = &_002441_hash,
97476 + [58457] = &_002442_hash,
97477 + [13199] = &_002443_hash,
97478 + [47926] = &_002444_hash,
97479 + [12184] = &_002445_hash,
97480 + [3184] = &_002446_hash,
97481 + [58466] = &_002447_hash,
97482 + [32999] = &_002448_hash,
97483 + [35354] = &_002449_hash,
97484 + [15620] = &_002450_hash,
97485 + [25242] = &_002451_hash,
97486 + [23] = &_002452_hash,
97487 + [35674] = &_002453_hash,
97488 + [9639] = &_002454_hash,
97489 + [5861] = &_002455_hash,
97490 + [31060] = &_002456_hash,
97491 + [7482] = &_002457_hash,
97492 + [10738] = &_002458_hash,
97493 + [3606] = &_002459_hash,
97494 + [34790] = &_002460_hash,
97495 + [57502] = &_002461_hash,
97496 + [19216] = &_002462_hash,
97497 + [38509] = &_002463_hash,
97498 + [51053] = &_002464_hash,
97499 + [60040] = &_002465_hash,
97500 + [56238] = &_002466_hash,
97501 + [20522] = &_002467_hash,
97502 + [60399] = &_002468_hash,
97503 + [8944] = &_002469_hash,
97504 + [23824] = &_002470_hash,
97505 + [3569] = &_002471_hash,
97506 + [48589] = &_002472_hash,
97507 + [8175] = &_002473_hash,
97508 + [36328] = &_002474_hash,
97509 + [32794] = &_002475_hash,
97510 + [57843] = &_002476_hash,
97511 + [62354] = &_002477_hash,
97512 + [56641] = &_002478_hash,
97513 + [60001] = &_002479_hash,
97514 + [35145] = &_002480_hash,
97515 + [56439] = &_002481_hash,
97516 + [60056] = &_002482_hash,
97517 + [44193] = &_002484_hash,
97518 + [51737] = &_002485_hash,
97519 + [52477] = &_002486_hash,
97520 + [51399] = &_002487_hash,
97521 + [3521] = &_002488_hash,
97522 + [8274] = &_002489_hash,
97523 + [17880] = &_002490_hash,
97524 + [45586] = &_002491_hash,
97525 + [7043] = &_002492_hash,
97526 + [25167] = &_002493_hash,
97527 + [712] = &_002494_hash,
97528 + [610] = &_002495_hash,
97529 + [47243] = &_002496_hash,
97530 + [47253] = &_002497_hash,
97531 + [56586] = &_002498_hash,
97532 + [41958] = &_002499_hash,
97533 + [12076] = &_002500_hash,
97534 + [35937] = &_002501_hash,
97535 + [51819] = &_002502_hash,
97536 + [10507] = &_002503_hash,
97537 + [34778] = &_002504_hash,
97538 + [27497] = &_002505_hash,
97539 + [62133] = &_002506_hash,
97540 + [11369] = &_002507_hash,
97541 + [53090] = &_002508_hash,
97542 + [21915] = &_002509_hash,
97543 + [31491] = &_002510_hash,
97544 + [22416] = &_002511_hash,
97545 + [34961] = &_002512_hash,
97546 + [54519] = &_002513_hash,
97547 + [21687] = &_002514_hash,
97548 + [59808] = &_002515_hash,
97549 + [5735] = &_002516_hash,
97550 + [9269] = &_002517_hash,
97551 + [11691] = &_002518_hash,
97552 + [32751] = &_002519_hash,
97553 + [35652] = &_002520_hash,
97554 + [7238] = &_002521_hash,
97555 + [25814] = &_002522_hash,
97556 + [36385] = &_002523_hash,
97557 + [59563] = &_002524_hash,
97558 + [2219] = &_002525_hash,
97559 + [36276] = &_002526_hash,
97560 + [6282] = &_002527_hash,
97561 + [42895] = &_002528_hash,
97562 + [65433] = &_002529_hash,
97563 + [15653] = &_002530_hash,
97564 + [49102] = &_002531_hash,
97565 + [57431] = &_002532_hash,
97566 + [22254] = &_002533_hash,
97567 + [3326] = &_002535_hash,
97568 + [37752] = &_002536_hash,
97569 + [8052] = &_002537_hash,
97570 + [10362] = &_002539_hash,
97571 + [12669] = &_002541_hash,
97572 + [43245] = &_002542_hash,
97573 + [11422] = &_002543_hash,
97574 + [53633] = &_002545_hash,
97575 + [30273] = &_002546_hash,
97576 + [15374] = &_002547_hash,
97577 + [41194] = &_002549_hash,
97578 + [60063] = &_002551_hash,
97579 + [7459] = &_002552_hash,
97580 + [36971] = &_002553_hash,
97581 + [61126] = &_002554_hash,
97582 + [49020] = &_002555_hash,
97583 + [24283] = &_002556_hash,
97584 + [51151] = &_002557_hash,
97585 + [20911] = &_002558_hash,
97586 + [5784] = &_002559_hash,
97587 + [17220] = &_002560_hash,
97588 + [583] = &_002561_hash,
97589 + [17886] = &_002562_hash,
97590 + [56561] = &_002563_hash,
97591 + [28999] = &_002564_hash,
97592 + [49921] = &_002565_hash,
97593 + [3017] = &_002566_hash,
97594 + [39678] = &_002567_hash,
97595 + [56748] = &_002568_hash,
97596 + [65421] = &_002569_hash,
97597 + [19044] = &_002571_hash,
97598 + [18853] = &_002573_hash,
97599 + [16831] = &_002574_hash,
97600 + [31881] = &_002576_hash,
97601 + [28731] = &_002577_hash,
97602 + [51239] = &_002578_hash,
97603 + [7414] = &_002579_hash,
97604 + [11362] = &_002580_hash,
97605 + [14550] = &_002581_hash,
97606 + [20792] = &_002582_hash,
97607 + [6246] = &_002583_hash,
97608 + [33506] = &_002584_hash,
97609 + [46924] = &_002585_hash,
97610 + [59145] = &_002586_hash,
97611 + [24775] = &_002587_hash,
97612 + [57961] = &_002588_hash,
97613 + [20581] = &_002589_hash,
97614 + [11350] = &_002590_hash,
97615 + [35474] = &_002591_hash,
97616 + [47771] = &_002592_hash,
97617 + [19750] = &_002593_hash,
97618 + [17481] = &_002594_hash,
97619 + [55202] = &_002595_hash,
97620 + [29708] = &_002596_hash,
97621 + [60432] = &_002597_hash,
97622 + [9805] = &_002598_hash,
97623 + [47000] = &_002599_hash,
97624 + [34135] = &_002600_hash,
97625 + [61661] = &_002601_hash,
97626 + [22885] = &_002602_hash,
97627 + [12141] = &_002603_hash,
97628 + [38130] = &_002604_hash,
97629 + [5727] = &_002605_hash,
97630 + [52241] = &_002606_hash,
97631 + [23122] = &_002607_hash,
97632 + [29201] = &_002608_hash,
97633 + [20494] = &_002609_hash,
97634 + [6554] = &_002610_hash,
97635 + [25355] = &_002611_hash,
97636 + [24755] = &_002612_hash,
97637 + [10321] = &_002613_hash,
97638 + [27804] = &_002614_hash,
97639 + [16332] = &_002615_hash,
97640 + [21305] = &_002617_hash,
97641 + [32045] = &_002618_hash,
97642 + [44130] = &_002620_hash,
97643 + [28479] = &_002621_hash,
97644 + [50004] = &_002622_hash,
97645 + [63233] = &_002623_hash,
97646 + [58634] = &_002624_hash,
97647 +};
97648 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
97649 new file mode 100644
97650 index 0000000..9ad0f39
97651 --- /dev/null
97652 +++ b/tools/gcc/size_overflow_plugin.c
97653 @@ -0,0 +1,1221 @@
97654 +/*
97655 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
97656 + * Licensed under the GPL v2, or (at your option) v3
97657 + *
97658 + * Homepage:
97659 + * http://www.grsecurity.net/~ephox/overflow_plugin/
97660 + *
97661 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
97662 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
97663 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
97664 + *
97665 + * Usage:
97666 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c
97667 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
97668 + */
97669 +
97670 +#include "gcc-plugin.h"
97671 +#include "config.h"
97672 +#include "system.h"
97673 +#include "coretypes.h"
97674 +#include "tree.h"
97675 +#include "tree-pass.h"
97676 +#include "intl.h"
97677 +#include "plugin-version.h"
97678 +#include "tm.h"
97679 +#include "toplev.h"
97680 +#include "function.h"
97681 +#include "tree-flow.h"
97682 +#include "plugin.h"
97683 +#include "gimple.h"
97684 +#include "c-common.h"
97685 +#include "diagnostic.h"
97686 +#include "cfgloop.h"
97687 +
97688 +struct size_overflow_hash {
97689 + struct size_overflow_hash *next;
97690 + const char *name;
97691 + unsigned short param;
97692 +};
97693 +
97694 +#include "size_overflow_hash.h"
97695 +
97696 +#define __unused __attribute__((__unused__))
97697 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
97698 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
97699 +#define BEFORE_STMT true
97700 +#define AFTER_STMT false
97701 +#define CREATE_NEW_VAR NULL_TREE
97702 +#define CODES_LIMIT 32
97703 +#define MAX_PARAM 10
97704 +
97705 +#if BUILDING_GCC_VERSION == 4005
97706 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
97707 +#endif
97708 +
97709 +int plugin_is_GPL_compatible;
97710 +void debug_gimple_stmt(gimple gs);
97711 +
97712 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
97713 +static tree signed_size_overflow_type;
97714 +static tree unsigned_size_overflow_type;
97715 +static tree report_size_overflow_decl;
97716 +static tree const_char_ptr_type_node;
97717 +static unsigned int handle_function(void);
97718 +
97719 +static struct plugin_info size_overflow_plugin_info = {
97720 + .version = "20120612beta",
97721 + .help = "no-size_overflow\tturn off size overflow checking\n",
97722 +};
97723 +
97724 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
97725 +{
97726 + unsigned int arg_count = type_num_arguments(*node);
97727 +
97728 + for (; args; args = TREE_CHAIN(args)) {
97729 + tree position = TREE_VALUE(args);
97730 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
97731 + error("handle_size_overflow_attribute: overflow parameter outside range.");
97732 + *no_add_attrs = true;
97733 + }
97734 + }
97735 + return NULL_TREE;
97736 +}
97737 +
97738 +static struct attribute_spec no_size_overflow_attr = {
97739 + .name = "size_overflow",
97740 + .min_length = 1,
97741 + .max_length = -1,
97742 + .decl_required = false,
97743 + .type_required = true,
97744 + .function_type_required = true,
97745 + .handler = handle_size_overflow_attribute
97746 +};
97747 +
97748 +static void register_attributes(void __unused *event_data, void __unused *data)
97749 +{
97750 + register_attribute(&no_size_overflow_attr);
97751 +}
97752 +
97753 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
97754 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
97755 +{
97756 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
97757 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
97758 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
97759 +
97760 + const unsigned int m = 0x57559429;
97761 + const unsigned int n = 0x5052acdb;
97762 + const unsigned int *key4 = (const unsigned int *)key;
97763 + unsigned int h = len;
97764 + unsigned int k = len + seed + n;
97765 + unsigned long long p;
97766 +
97767 + while (len >= 8) {
97768 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
97769 + len -= 8;
97770 + }
97771 + if (len >= 4) {
97772 + cwmixb(key4[0]) key4 += 1;
97773 + len -= 4;
97774 + }
97775 + if (len)
97776 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
97777 + cwmixb(h ^ (k + n));
97778 + return k ^ h;
97779 +
97780 +#undef cwfold
97781 +#undef cwmixa
97782 +#undef cwmixb
97783 +}
97784 +
97785 +static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
97786 +{
97787 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
97788 + unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
97789 + return fn ^ codes;
97790 +}
97791 +
97792 +static inline tree get_original_function_decl(tree fndecl)
97793 +{
97794 + if (DECL_ABSTRACT_ORIGIN(fndecl))
97795 + return DECL_ABSTRACT_ORIGIN(fndecl);
97796 + return fndecl;
97797 +}
97798 +
97799 +static inline gimple get_def_stmt(tree node)
97800 +{
97801 + gcc_assert(TREE_CODE(node) == SSA_NAME);
97802 + return SSA_NAME_DEF_STMT(node);
97803 +}
97804 +
97805 +static unsigned char get_tree_code(tree type)
97806 +{
97807 + switch (TREE_CODE(type)) {
97808 + case ARRAY_TYPE:
97809 + return 0;
97810 + case BOOLEAN_TYPE:
97811 + return 1;
97812 + case ENUMERAL_TYPE:
97813 + return 2;
97814 + case FUNCTION_TYPE:
97815 + return 3;
97816 + case INTEGER_TYPE:
97817 + return 4;
97818 + case POINTER_TYPE:
97819 + return 5;
97820 + case RECORD_TYPE:
97821 + return 6;
97822 + case UNION_TYPE:
97823 + return 7;
97824 + case VOID_TYPE:
97825 + return 8;
97826 + case REAL_TYPE:
97827 + return 9;
97828 + case VECTOR_TYPE:
97829 + return 10;
97830 + default:
97831 + debug_tree(type);
97832 + gcc_unreachable();
97833 + }
97834 +}
97835 +
97836 +static size_t add_type_codes(tree type, unsigned char *tree_codes, size_t len)
97837 +{
97838 + gcc_assert(type != NULL_TREE);
97839 +
97840 + while (type && len < CODES_LIMIT) {
97841 + tree_codes[len] = get_tree_code(type);
97842 + len++;
97843 + type = TREE_TYPE(type);
97844 + }
97845 + return len;
97846 +}
97847 +
97848 +static unsigned int get_function_decl(tree fndecl, unsigned char *tree_codes)
97849 +{
97850 + tree arg, result, type = TREE_TYPE(fndecl);
97851 + enum tree_code code = TREE_CODE(type);
97852 + size_t len = 0;
97853 +
97854 + // skip builtins __builtin_constant_p
97855 + if (DECL_BUILT_IN(fndecl))
97856 + return 0;
97857 +
97858 + gcc_assert(code == FUNCTION_TYPE);
97859 +
97860 + arg = TYPE_ARG_TYPES(type);
97861 + gcc_assert(arg != NULL_TREE);
97862 +
97863 + if (TREE_CODE_CLASS(code) == tcc_type)
97864 + result = type;
97865 + else
97866 + result = DECL_RESULT(fndecl);
97867 +
97868 + gcc_assert(result != NULL_TREE);
97869 + len = add_type_codes(TREE_TYPE(result), tree_codes, len);
97870 +
97871 + while (arg && len < CODES_LIMIT) {
97872 + len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
97873 + arg = TREE_CHAIN(arg);
97874 + }
97875 +
97876 + gcc_assert(len != 0);
97877 + return len;
97878 +}
97879 +
97880 +static struct size_overflow_hash *get_function_hash(tree fndecl)
97881 +{
97882 + unsigned int hash;
97883 + struct size_overflow_hash *entry;
97884 + unsigned char tree_codes[CODES_LIMIT];
97885 + size_t len;
97886 + const char *func_name = NAME(fndecl);
97887 +
97888 + len = get_function_decl(fndecl, tree_codes);
97889 + if (len == 0)
97890 + return NULL;
97891 +
97892 + hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
97893 +
97894 + entry = size_overflow_hash[hash];
97895 + while (entry) {
97896 + if (!strcmp(entry->name, func_name))
97897 + return entry;
97898 + entry = entry->next;
97899 + }
97900 +
97901 + return NULL;
97902 +}
97903 +
97904 +static void check_arg_type(tree var)
97905 +{
97906 + tree type = TREE_TYPE(var);
97907 + enum tree_code code = TREE_CODE(type);
97908 +
97909 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
97910 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
97911 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
97912 +}
97913 +
97914 +static int find_arg_number(tree arg, tree func)
97915 +{
97916 + tree var;
97917 + bool match = false;
97918 + unsigned int argnum = 1;
97919 +
97920 + if (TREE_CODE(arg) == SSA_NAME)
97921 + arg = SSA_NAME_VAR(arg);
97922 +
97923 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
97924 + if (strcmp(NAME(arg), NAME(var))) {
97925 + argnum++;
97926 + continue;
97927 + }
97928 + check_arg_type(var);
97929 +
97930 + match = true;
97931 + break;
97932 + }
97933 + if (!match) {
97934 + warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func));
97935 + return 0;
97936 + }
97937 + return argnum;
97938 +}
97939 +
97940 +static void print_missing_msg(tree func, unsigned int argnum)
97941 +{
97942 + unsigned int new_hash;
97943 + size_t len;
97944 + unsigned char tree_codes[CODES_LIMIT];
97945 + location_t loc = DECL_SOURCE_LOCATION(func);
97946 + const char *curfunc = NAME(func);
97947 +
97948 + len = get_function_decl(func, tree_codes);
97949 + new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
97950 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+", curfunc, curfunc, argnum, new_hash);
97951 +}
97952 +
97953 +static void check_missing_attribute(tree arg)
97954 +{
97955 + tree type, func = get_original_function_decl(current_function_decl);
97956 + unsigned int argnum;
97957 + struct size_overflow_hash *hash;
97958 +
97959 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
97960 +
97961 + type = TREE_TYPE(arg);
97962 + // skip function pointers
97963 + if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
97964 + return;
97965 +
97966 + if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
97967 + return;
97968 +
97969 + argnum = find_arg_number(arg, func);
97970 + if (argnum == 0)
97971 + return;
97972 +
97973 + hash = get_function_hash(func);
97974 + if (!hash || !(hash->param & (1U << argnum)))
97975 + print_missing_msg(func, argnum);
97976 +}
97977 +
97978 +static tree create_new_var(tree type)
97979 +{
97980 + tree new_var = create_tmp_var(type, "cicus");
97981 +
97982 + add_referenced_var(new_var);
97983 + mark_sym_for_renaming(new_var);
97984 + return new_var;
97985 +}
97986 +
97987 +static bool is_bool(tree node)
97988 +{
97989 + tree type;
97990 +
97991 + if (node == NULL_TREE)
97992 + return false;
97993 +
97994 + type = TREE_TYPE(node);
97995 + if (!INTEGRAL_TYPE_P(type))
97996 + return false;
97997 + if (TREE_CODE(type) == BOOLEAN_TYPE)
97998 + return true;
97999 + if (TYPE_PRECISION(type) == 1)
98000 + return true;
98001 + return false;
98002 +}
98003 +
98004 +static tree cast_a_tree(tree type, tree var)
98005 +{
98006 + gcc_assert(fold_convertible_p(type, var));
98007 +
98008 + return fold_convert(type, var);
98009 +}
98010 +
98011 +static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
98012 +{
98013 + gimple assign;
98014 +
98015 + if (new_var == CREATE_NEW_VAR)
98016 + new_var = create_new_var(type);
98017 +
98018 + assign = gimple_build_assign(new_var, cast_a_tree(type, var));
98019 + gimple_set_location(assign, loc);
98020 + gimple_set_lhs(assign, make_ssa_name(new_var, assign));
98021 +
98022 + return assign;
98023 +}
98024 +
98025 +static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
98026 +{
98027 + tree oldstmt_rhs1;
98028 + enum tree_code code;
98029 + gimple stmt;
98030 + gimple_stmt_iterator gsi;
98031 +
98032 + if (!*potentionally_overflowed)
98033 + return NULL_TREE;
98034 +
98035 + if (rhs1 == NULL_TREE) {
98036 + debug_gimple_stmt(oldstmt);
98037 + error("create_assign: rhs1 is NULL_TREE");
98038 + gcc_unreachable();
98039 + }
98040 +
98041 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
98042 + code = TREE_CODE(oldstmt_rhs1);
98043 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
98044 + check_missing_attribute(oldstmt_rhs1);
98045 +
98046 + stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
98047 + gsi = gsi_for_stmt(oldstmt);
98048 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
98049 + basic_block next_bb, cur_bb;
98050 + edge e;
98051 +
98052 + gcc_assert(before == false);
98053 + gcc_assert(stmt_can_throw_internal(oldstmt));
98054 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
98055 + gcc_assert(!gsi_end_p(gsi));
98056 +
98057 + cur_bb = gimple_bb(oldstmt);
98058 + next_bb = cur_bb->next_bb;
98059 + e = find_edge(cur_bb, next_bb);
98060 + gcc_assert(e != NULL);
98061 + gcc_assert(e->flags & EDGE_FALLTHRU);
98062 +
98063 + gsi = gsi_after_labels(next_bb);
98064 + gcc_assert(!gsi_end_p(gsi));
98065 + before = true;
98066 + }
98067 + if (before)
98068 + gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
98069 + else
98070 + gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
98071 + update_stmt(stmt);
98072 + pointer_set_insert(visited, oldstmt);
98073 + return gimple_get_lhs(stmt);
98074 +}
98075 +
98076 +static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
98077 +{
98078 + tree new_var, lhs = gimple_get_lhs(oldstmt);
98079 + gimple stmt;
98080 + gimple_stmt_iterator gsi;
98081 +
98082 + if (!*potentionally_overflowed)
98083 + return NULL_TREE;
98084 +
98085 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
98086 + rhs1 = gimple_assign_rhs1(oldstmt);
98087 + rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
98088 + }
98089 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
98090 + rhs2 = gimple_assign_rhs2(oldstmt);
98091 + rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
98092 + }
98093 +
98094 + stmt = gimple_copy(oldstmt);
98095 + gimple_set_location(stmt, gimple_location(oldstmt));
98096 +
98097 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
98098 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
98099 +
98100 + if (is_bool(lhs))
98101 + new_var = SSA_NAME_VAR(lhs);
98102 + else
98103 + new_var = create_new_var(signed_size_overflow_type);
98104 + new_var = make_ssa_name(new_var, stmt);
98105 + gimple_set_lhs(stmt, new_var);
98106 +
98107 + if (rhs1 != NULL_TREE) {
98108 + if (!gimple_assign_cast_p(oldstmt))
98109 + rhs1 = cast_a_tree(signed_size_overflow_type, rhs1);
98110 + gimple_assign_set_rhs1(stmt, rhs1);
98111 + }
98112 +
98113 + if (rhs2 != NULL_TREE)
98114 + gimple_assign_set_rhs2(stmt, rhs2);
98115 +#if BUILDING_GCC_VERSION >= 4007
98116 + if (rhs3 != NULL_TREE)
98117 + gimple_assign_set_rhs3(stmt, rhs3);
98118 +#endif
98119 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
98120 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
98121 +
98122 + gsi = gsi_for_stmt(oldstmt);
98123 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
98124 + update_stmt(stmt);
98125 + pointer_set_insert(visited, oldstmt);
98126 + return gimple_get_lhs(stmt);
98127 +}
98128 +
98129 +static gimple overflow_create_phi_node(gimple oldstmt, tree var)
98130 +{
98131 + basic_block bb;
98132 + gimple phi;
98133 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
98134 +
98135 + bb = gsi_bb(gsi);
98136 +
98137 + phi = create_phi_node(var, bb);
98138 + gsi = gsi_last(phi_nodes(bb));
98139 + gsi_remove(&gsi, false);
98140 +
98141 + gsi = gsi_for_stmt(oldstmt);
98142 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
98143 + gimple_set_bb(phi, bb);
98144 + return phi;
98145 +}
98146 +
98147 +static tree signed_cast_constant(tree node)
98148 +{
98149 + gcc_assert(is_gimple_constant(node));
98150 +
98151 + return cast_a_tree(signed_size_overflow_type, node);
98152 +}
98153 +
98154 +static basic_block create_a_first_bb(void)
98155 +{
98156 + basic_block first_bb;
98157 +
98158 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
98159 + if (dom_info_available_p(CDI_DOMINATORS))
98160 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
98161 + return first_bb;
98162 +}
98163 +
98164 +static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
98165 +{
98166 + basic_block bb;
98167 + gimple newstmt, def_stmt;
98168 + gimple_stmt_iterator gsi;
98169 +
98170 + newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
98171 + if (TREE_CODE(arg) == SSA_NAME) {
98172 + def_stmt = get_def_stmt(arg);
98173 + if (gimple_code(def_stmt) != GIMPLE_NOP) {
98174 + gsi = gsi_for_stmt(def_stmt);
98175 + gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
98176 + return newstmt;
98177 + }
98178 + }
98179 +
98180 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
98181 + if (bb->index == 0)
98182 + bb = create_a_first_bb();
98183 + gsi = gsi_after_labels(bb);
98184 + gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
98185 + return newstmt;
98186 +}
98187 +
98188 +static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
98189 +{
98190 + gimple newstmt;
98191 + gimple_stmt_iterator gsi;
98192 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
98193 + gimple def_newstmt = get_def_stmt(new_rhs);
98194 +
98195 + gsi_insert = gsi_insert_after;
98196 + gsi = gsi_for_stmt(def_newstmt);
98197 +
98198 + switch (gimple_code(get_def_stmt(arg))) {
98199 + case GIMPLE_PHI:
98200 + newstmt = gimple_build_assign(new_var, new_rhs);
98201 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
98202 + gsi_insert = gsi_insert_before;
98203 + break;
98204 + case GIMPLE_ASM:
98205 + case GIMPLE_CALL:
98206 + newstmt = gimple_build_assign(new_var, new_rhs);
98207 + break;
98208 + case GIMPLE_ASSIGN:
98209 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
98210 + break;
98211 + default:
98212 + /* unknown gimple_code (handle_build_new_phi_arg) */
98213 + gcc_unreachable();
98214 + }
98215 +
98216 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
98217 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
98218 + update_stmt(newstmt);
98219 + return newstmt;
98220 +}
98221 +
98222 +static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
98223 +{
98224 + gimple newstmt;
98225 + tree new_rhs;
98226 +
98227 + new_rhs = expand(visited, potentionally_overflowed, arg);
98228 +
98229 + if (new_rhs == NULL_TREE)
98230 + return NULL_TREE;
98231 +
98232 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
98233 + return gimple_get_lhs(newstmt);
98234 +}
98235 +
98236 +static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
98237 +{
98238 + gimple phi;
98239 + tree new_var = create_new_var(signed_size_overflow_type);
98240 + unsigned int i, n = gimple_phi_num_args(oldstmt);
98241 +
98242 + pointer_set_insert(visited, oldstmt);
98243 + phi = overflow_create_phi_node(oldstmt, new_var);
98244 + for (i = 0; i < n; i++) {
98245 + tree arg, lhs;
98246 +
98247 + arg = gimple_phi_arg_def(oldstmt, i);
98248 + if (is_gimple_constant(arg))
98249 + arg = signed_cast_constant(arg);
98250 + lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
98251 + if (lhs == NULL_TREE)
98252 + lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
98253 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
98254 + }
98255 +
98256 + update_stmt(phi);
98257 + return gimple_phi_result(phi);
98258 +}
98259 +
98260 +static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
98261 +{
98262 + gimple def_stmt = get_def_stmt(var);
98263 + tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
98264 +
98265 + *potentionally_overflowed = true;
98266 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
98267 + if (new_rhs1 == NULL_TREE) {
98268 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
98269 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
98270 + else
98271 + return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
98272 + }
98273 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
98274 +}
98275 +
98276 +static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
98277 +{
98278 + gimple def_stmt = get_def_stmt(var);
98279 + tree rhs1 = gimple_assign_rhs1(def_stmt);
98280 +
98281 + if (is_gimple_constant(rhs1))
98282 + return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE);
98283 +
98284 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
98285 + switch (TREE_CODE(rhs1)) {
98286 + case SSA_NAME:
98287 + return handle_unary_rhs(visited, potentionally_overflowed, var);
98288 +
98289 + case ARRAY_REF:
98290 + case BIT_FIELD_REF:
98291 + case ADDR_EXPR:
98292 + case COMPONENT_REF:
98293 + case INDIRECT_REF:
98294 +#if BUILDING_GCC_VERSION >= 4006
98295 + case MEM_REF:
98296 +#endif
98297 + case PARM_DECL:
98298 + case TARGET_MEM_REF:
98299 + case VAR_DECL:
98300 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
98301 +
98302 + default:
98303 + debug_gimple_stmt(def_stmt);
98304 + debug_tree(rhs1);
98305 + gcc_unreachable();
98306 + }
98307 +}
98308 +
98309 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
98310 +{
98311 + gimple cond_stmt;
98312 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
98313 +
98314 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
98315 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
98316 + update_stmt(cond_stmt);
98317 +}
98318 +
98319 +static tree create_string_param(tree string)
98320 +{
98321 + tree i_type, a_type;
98322 + int length = TREE_STRING_LENGTH(string);
98323 +
98324 + gcc_assert(length > 0);
98325 +
98326 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
98327 + a_type = build_array_type(char_type_node, i_type);
98328 +
98329 + TREE_TYPE(string) = a_type;
98330 + TREE_CONSTANT(string) = 1;
98331 + TREE_READONLY(string) = 1;
98332 +
98333 + return build1(ADDR_EXPR, ptr_type_node, string);
98334 +}
98335 +
98336 +static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
98337 +{
98338 + gimple func_stmt, def_stmt;
98339 + tree current_func, loc_file, loc_line;
98340 + expanded_location xloc;
98341 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
98342 +
98343 + def_stmt = get_def_stmt(arg);
98344 + xloc = expand_location(gimple_location(def_stmt));
98345 +
98346 + if (!gimple_has_location(def_stmt)) {
98347 + xloc = expand_location(gimple_location(stmt));
98348 + if (!gimple_has_location(stmt))
98349 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
98350 + }
98351 +
98352 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
98353 +
98354 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
98355 + loc_file = create_string_param(loc_file);
98356 +
98357 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
98358 + current_func = create_string_param(current_func);
98359 +
98360 + // void report_size_overflow(const char *file, unsigned int line, const char *func)
98361 + func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
98362 +
98363 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
98364 +}
98365 +
98366 +static void __unused print_the_code_insertions(gimple stmt)
98367 +{
98368 + location_t loc = gimple_location(stmt);
98369 +
98370 + inform(loc, "Integer size_overflow check applied here.");
98371 +}
98372 +
98373 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
98374 +{
98375 + basic_block cond_bb, join_bb, bb_true;
98376 + edge e;
98377 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
98378 +
98379 + cond_bb = gimple_bb(stmt);
98380 + gsi_prev(&gsi);
98381 + if (gsi_end_p(gsi))
98382 + e = split_block_after_labels(cond_bb);
98383 + else
98384 + e = split_block(cond_bb, gsi_stmt(gsi));
98385 + cond_bb = e->src;
98386 + join_bb = e->dest;
98387 + e->flags = EDGE_FALSE_VALUE;
98388 + e->probability = REG_BR_PROB_BASE;
98389 +
98390 + bb_true = create_empty_bb(cond_bb);
98391 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
98392 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
98393 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
98394 +
98395 + if (dom_info_available_p(CDI_DOMINATORS)) {
98396 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
98397 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
98398 + }
98399 +
98400 + if (current_loops != NULL) {
98401 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
98402 + add_bb_to_loop(bb_true, cond_bb->loop_father);
98403 + }
98404 +
98405 + insert_cond(cond_bb, arg, cond_code, type_value);
98406 + insert_cond_result(bb_true, stmt, arg);
98407 +
98408 +// print_the_code_insertions(stmt);
98409 +}
98410 +
98411 +static tree get_type_for_check(tree rhs)
98412 +{
98413 + tree def_rhs;
98414 + gimple def_stmt = get_def_stmt(rhs);
98415 +
98416 + if (!gimple_assign_cast_p(def_stmt))
98417 + return TREE_TYPE(rhs);
98418 + def_rhs = gimple_assign_rhs1(def_stmt);
98419 + if (TREE_CODE(TREE_TYPE(def_rhs)) == INTEGER_TYPE)
98420 + return TREE_TYPE(def_rhs);
98421 + return TREE_TYPE(rhs);
98422 +}
98423 +
98424 +static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
98425 +{
98426 + gimple ucast_stmt;
98427 + gimple_stmt_iterator gsi;
98428 + location_t loc = gimple_location(stmt);
98429 +
98430 + ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
98431 + gsi = gsi_for_stmt(stmt);
98432 + gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
98433 + return ucast_stmt;
98434 +}
98435 +
98436 +static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
98437 +{
98438 + tree type_max, type_min, rhs_type;
98439 + gimple ucast_stmt;
98440 +
98441 + if (!*potentionally_overflowed)
98442 + return;
98443 +
98444 + rhs_type = get_type_for_check(rhs);
98445 +
98446 + if (TYPE_UNSIGNED(rhs_type)) {
98447 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
98448 + type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
98449 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
98450 + } else {
98451 + type_max = cast_a_tree(signed_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
98452 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
98453 +
98454 + type_min = cast_a_tree(signed_size_overflow_type, TYPE_MIN_VALUE(rhs_type));
98455 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
98456 + }
98457 +}
98458 +
98459 +static tree change_assign_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt, tree orig_rhs)
98460 +{
98461 + gimple assign;
98462 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
98463 + tree new_rhs, origtype = TREE_TYPE(orig_rhs);
98464 +
98465 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
98466 +
98467 + new_rhs = expand(visited, potentionally_overflowed, orig_rhs);
98468 + if (new_rhs == NULL_TREE)
98469 + return NULL_TREE;
98470 +
98471 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
98472 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
98473 + update_stmt(assign);
98474 + return gimple_get_lhs(assign);
98475 +}
98476 +
98477 +static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
98478 +{
98479 + tree new_rhs, cast_rhs;
98480 +
98481 + if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
98482 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
98483 +
98484 + new_rhs = change_assign_rhs(visited, potentionally_overflowed, def_stmt, rhs);
98485 + if (new_rhs != NULL_TREE) {
98486 + gimple_assign_set_rhs(def_stmt, new_rhs);
98487 + update_stmt(def_stmt);
98488 +
98489 + cast_rhs = gimple_assign_rhs1(get_def_stmt(new_rhs));
98490 +
98491 + check_size_overflow(def_stmt, cast_rhs, rhs, potentionally_overflowed);
98492 + }
98493 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
98494 +}
98495 +
98496 +static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
98497 +{
98498 + tree rhs1, rhs2;
98499 + gimple def_stmt = get_def_stmt(var);
98500 + tree new_rhs1 = NULL_TREE;
98501 + tree new_rhs2 = NULL_TREE;
98502 +
98503 + rhs1 = gimple_assign_rhs1(def_stmt);
98504 + rhs2 = gimple_assign_rhs2(def_stmt);
98505 +
98506 + /* no DImode/TImode division in the 32/64 bit kernel */
98507 + switch (gimple_assign_rhs_code(def_stmt)) {
98508 + case RDIV_EXPR:
98509 + case TRUNC_DIV_EXPR:
98510 + case CEIL_DIV_EXPR:
98511 + case FLOOR_DIV_EXPR:
98512 + case ROUND_DIV_EXPR:
98513 + case TRUNC_MOD_EXPR:
98514 + case CEIL_MOD_EXPR:
98515 + case FLOOR_MOD_EXPR:
98516 + case ROUND_MOD_EXPR:
98517 + case EXACT_DIV_EXPR:
98518 + case POINTER_PLUS_EXPR:
98519 + case BIT_AND_EXPR:
98520 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
98521 + default:
98522 + break;
98523 + }
98524 +
98525 + *potentionally_overflowed = true;
98526 +
98527 + if (TREE_CODE(rhs1) == SSA_NAME)
98528 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
98529 + if (TREE_CODE(rhs2) == SSA_NAME)
98530 + new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
98531 +
98532 + if (is_gimple_constant(rhs2))
98533 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, signed_cast_constant(rhs2), &gimple_assign_set_rhs1);
98534 +
98535 + if (is_gimple_constant(rhs1))
98536 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, signed_cast_constant(rhs1), new_rhs2, &gimple_assign_set_rhs2);
98537 +
98538 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
98539 +}
98540 +
98541 +#if BUILDING_GCC_VERSION >= 4007
98542 +static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
98543 +{
98544 + if (is_gimple_constant(rhs))
98545 + return signed_cast_constant(rhs);
98546 + if (TREE_CODE(rhs) != SSA_NAME)
98547 + return NULL_TREE;
98548 + return expand(visited, potentionally_overflowed, rhs);
98549 +}
98550 +
98551 +static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
98552 +{
98553 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
98554 + gimple def_stmt = get_def_stmt(var);
98555 +
98556 + *potentionally_overflowed = true;
98557 +
98558 + rhs1 = gimple_assign_rhs1(def_stmt);
98559 + rhs2 = gimple_assign_rhs2(def_stmt);
98560 + rhs3 = gimple_assign_rhs3(def_stmt);
98561 + new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
98562 + new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
98563 + new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
98564 +
98565 + if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
98566 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
98567 + error("handle_ternary_ops: unknown rhs");
98568 + gcc_unreachable();
98569 +}
98570 +#endif
98571 +
98572 +static void set_size_overflow_type(tree node)
98573 +{
98574 + switch (TYPE_MODE(TREE_TYPE(node))) {
98575 + case SImode:
98576 + signed_size_overflow_type = intDI_type_node;
98577 + unsigned_size_overflow_type = unsigned_intDI_type_node;
98578 + break;
98579 + case DImode:
98580 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
98581 + signed_size_overflow_type = intDI_type_node;
98582 + unsigned_size_overflow_type = unsigned_intDI_type_node;
98583 + } else {
98584 + signed_size_overflow_type = intTI_type_node;
98585 + unsigned_size_overflow_type = unsigned_intTI_type_node;
98586 + }
98587 + break;
98588 + default:
98589 + error("set_size_overflow_type: unsupported gcc configuration.");
98590 + gcc_unreachable();
98591 + }
98592 +}
98593 +
98594 +static tree expand_visited(gimple def_stmt)
98595 +{
98596 + gimple tmp;
98597 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
98598 +
98599 + gsi_next(&gsi);
98600 + tmp = gsi_stmt(gsi);
98601 + switch (gimple_code(tmp)) {
98602 + case GIMPLE_ASSIGN:
98603 + return gimple_get_lhs(tmp);
98604 + case GIMPLE_PHI:
98605 + return gimple_phi_result(tmp);
98606 + case GIMPLE_CALL:
98607 + return gimple_call_lhs(tmp);
98608 + default:
98609 + return NULL_TREE;
98610 + }
98611 +}
98612 +
98613 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
98614 +{
98615 + gimple def_stmt;
98616 + enum tree_code code = TREE_CODE(TREE_TYPE(var));
98617 +
98618 + if (is_gimple_constant(var))
98619 + return NULL_TREE;
98620 +
98621 + if (TREE_CODE(var) == ADDR_EXPR)
98622 + return NULL_TREE;
98623 +
98624 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
98625 + if (code != INTEGER_TYPE)
98626 + return NULL_TREE;
98627 +
98628 + if (SSA_NAME_IS_DEFAULT_DEF(var)) {
98629 + check_missing_attribute(var);
98630 + return NULL_TREE;
98631 + }
98632 +
98633 + def_stmt = get_def_stmt(var);
98634 +
98635 + if (!def_stmt)
98636 + return NULL_TREE;
98637 +
98638 + if (pointer_set_contains(visited, def_stmt))
98639 + return expand_visited(def_stmt);
98640 +
98641 + switch (gimple_code(def_stmt)) {
98642 + case GIMPLE_NOP:
98643 + check_missing_attribute(var);
98644 + return NULL_TREE;
98645 + case GIMPLE_PHI:
98646 + return build_new_phi(visited, potentionally_overflowed, def_stmt);
98647 + case GIMPLE_CALL:
98648 + case GIMPLE_ASM:
98649 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
98650 + case GIMPLE_ASSIGN:
98651 + switch (gimple_num_ops(def_stmt)) {
98652 + case 2:
98653 + return handle_unary_ops(visited, potentionally_overflowed, var);
98654 + case 3:
98655 + return handle_binary_ops(visited, potentionally_overflowed, var);
98656 +#if BUILDING_GCC_VERSION >= 4007
98657 + case 4:
98658 + return handle_ternary_ops(visited, potentionally_overflowed, var);
98659 +#endif
98660 + }
98661 + default:
98662 + debug_gimple_stmt(def_stmt);
98663 + error("expand: unknown gimple code");
98664 + gcc_unreachable();
98665 + }
98666 +}
98667 +
98668 +static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
98669 +{
98670 + gimple assign;
98671 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
98672 + tree origtype = TREE_TYPE(origarg);
98673 +
98674 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
98675 +
98676 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
98677 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
98678 + update_stmt(assign);
98679 +
98680 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
98681 + update_stmt(stmt);
98682 +}
98683 +
98684 +static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
98685 +{
98686 + const char *origid;
98687 + tree arg, origarg;
98688 +
98689 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
98690 + gcc_assert(gimple_call_num_args(stmt) > argnum);
98691 + return gimple_call_arg(stmt, argnum);
98692 + }
98693 +
98694 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
98695 + while (origarg && argnum) {
98696 + argnum--;
98697 + origarg = TREE_CHAIN(origarg);
98698 + }
98699 +
98700 + gcc_assert(argnum == 0);
98701 +
98702 + gcc_assert(origarg != NULL_TREE);
98703 + origid = NAME(origarg);
98704 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
98705 + if (!strcmp(origid, NAME(arg)))
98706 + return arg;
98707 + }
98708 + return NULL_TREE;
98709 +}
98710 +
98711 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
98712 +{
98713 + struct pointer_set_t *visited;
98714 + tree arg, newarg;
98715 + bool potentionally_overflowed;
98716 +
98717 + arg = get_function_arg(argnum, stmt, fndecl);
98718 + if (arg == NULL_TREE)
98719 + return;
98720 +
98721 + if (is_gimple_constant(arg))
98722 + return;
98723 + if (TREE_CODE(arg) != SSA_NAME)
98724 + return;
98725 +
98726 + check_arg_type(arg);
98727 +
98728 + set_size_overflow_type(arg);
98729 +
98730 + visited = pointer_set_create();
98731 + potentionally_overflowed = false;
98732 + newarg = expand(visited, &potentionally_overflowed, arg);
98733 + pointer_set_destroy(visited);
98734 +
98735 + if (newarg == NULL_TREE || !potentionally_overflowed)
98736 + return;
98737 +
98738 + change_function_arg(stmt, arg, argnum, newarg);
98739 +
98740 + check_size_overflow(stmt, newarg, arg, &potentionally_overflowed);
98741 +}
98742 +
98743 +static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
98744 +{
98745 + tree p = TREE_VALUE(attr);
98746 + do {
98747 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
98748 + p = TREE_CHAIN(p);
98749 + } while (p);
98750 +}
98751 +
98752 +static void handle_function_by_hash(gimple stmt, tree fndecl)
98753 +{
98754 + tree orig_fndecl;
98755 + unsigned int num;
98756 + struct size_overflow_hash *hash;
98757 +
98758 + orig_fndecl = get_original_function_decl(fndecl);
98759 + hash = get_function_hash(orig_fndecl);
98760 + if (!hash)
98761 + return;
98762 +
98763 + for (num = 1; num <= MAX_PARAM; num++)
98764 + if (hash->param & (1U << num))
98765 + handle_function_arg(stmt, fndecl, num - 1);
98766 +}
98767 +
98768 +static unsigned int handle_function(void)
98769 +{
98770 + basic_block bb = ENTRY_BLOCK_PTR->next_bb;
98771 + int saved_last_basic_block = last_basic_block;
98772 +
98773 + do {
98774 + gimple_stmt_iterator gsi;
98775 + basic_block next = bb->next_bb;
98776 +
98777 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
98778 + tree fndecl, attr;
98779 + gimple stmt = gsi_stmt(gsi);
98780 +
98781 + if (!(is_gimple_call(stmt)))
98782 + continue;
98783 + fndecl = gimple_call_fndecl(stmt);
98784 + if (fndecl == NULL_TREE)
98785 + continue;
98786 + if (gimple_call_num_args(stmt) == 0)
98787 + continue;
98788 + attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
98789 + if (!attr || !TREE_VALUE(attr))
98790 + handle_function_by_hash(stmt, fndecl);
98791 + else
98792 + handle_function_by_attribute(stmt, attr, fndecl);
98793 + gsi = gsi_for_stmt(stmt);
98794 + }
98795 + bb = next;
98796 + } while (bb && bb->index <= saved_last_basic_block);
98797 + return 0;
98798 +}
98799 +
98800 +static struct gimple_opt_pass size_overflow_pass = {
98801 + .pass = {
98802 + .type = GIMPLE_PASS,
98803 + .name = "size_overflow",
98804 + .gate = NULL,
98805 + .execute = handle_function,
98806 + .sub = NULL,
98807 + .next = NULL,
98808 + .static_pass_number = 0,
98809 + .tv_id = TV_NONE,
98810 + .properties_required = PROP_cfg | PROP_referenced_vars,
98811 + .properties_provided = 0,
98812 + .properties_destroyed = 0,
98813 + .todo_flags_start = 0,
98814 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
98815 + }
98816 +};
98817 +
98818 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
98819 +{
98820 + tree fntype;
98821 +
98822 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
98823 +
98824 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
98825 + fntype = build_function_type_list(void_type_node,
98826 + const_char_ptr_type_node,
98827 + unsigned_type_node,
98828 + const_char_ptr_type_node,
98829 + NULL_TREE);
98830 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
98831 +
98832 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
98833 + TREE_PUBLIC(report_size_overflow_decl) = 1;
98834 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
98835 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
98836 +}
98837 +
98838 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
98839 +{
98840 + int i;
98841 + const char * const plugin_name = plugin_info->base_name;
98842 + const int argc = plugin_info->argc;
98843 + const struct plugin_argument * const argv = plugin_info->argv;
98844 + bool enable = true;
98845 +
98846 + struct register_pass_info size_overflow_pass_info = {
98847 + .pass = &size_overflow_pass.pass,
98848 + .reference_pass_name = "ssa",
98849 + .ref_pass_instance_number = 1,
98850 + .pos_op = PASS_POS_INSERT_AFTER
98851 + };
98852 +
98853 + if (!plugin_default_version_check(version, &gcc_version)) {
98854 + error(G_("incompatible gcc/plugin versions"));
98855 + return 1;
98856 + }
98857 +
98858 + for (i = 0; i < argc; ++i) {
98859 + if (!strcmp(argv[i].key, "no-size-overflow")) {
98860 + enable = false;
98861 + continue;
98862 + }
98863 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
98864 + }
98865 +
98866 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
98867 + if (enable) {
98868 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
98869 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
98870 + }
98871 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
98872 +
98873 + return 0;
98874 +}
98875 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
98876 new file mode 100644
98877 index 0000000..b87ec9d
98878 --- /dev/null
98879 +++ b/tools/gcc/stackleak_plugin.c
98880 @@ -0,0 +1,313 @@
98881 +/*
98882 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
98883 + * Licensed under the GPL v2
98884 + *
98885 + * Note: the choice of the license means that the compilation process is
98886 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
98887 + * but for the kernel it doesn't matter since it doesn't link against
98888 + * any of the gcc libraries
98889 + *
98890 + * gcc plugin to help implement various PaX features
98891 + *
98892 + * - track lowest stack pointer
98893 + *
98894 + * TODO:
98895 + * - initialize all local variables
98896 + *
98897 + * BUGS:
98898 + * - none known
98899 + */
98900 +#include "gcc-plugin.h"
98901 +#include "config.h"
98902 +#include "system.h"
98903 +#include "coretypes.h"
98904 +#include "tree.h"
98905 +#include "tree-pass.h"
98906 +#include "flags.h"
98907 +#include "intl.h"
98908 +#include "toplev.h"
98909 +#include "plugin.h"
98910 +//#include "expr.h" where are you...
98911 +#include "diagnostic.h"
98912 +#include "plugin-version.h"
98913 +#include "tm.h"
98914 +#include "function.h"
98915 +#include "basic-block.h"
98916 +#include "gimple.h"
98917 +#include "rtl.h"
98918 +#include "emit-rtl.h"
98919 +
98920 +extern void print_gimple_stmt(FILE *, gimple, int, int);
98921 +
98922 +int plugin_is_GPL_compatible;
98923 +
98924 +static int track_frame_size = -1;
98925 +static const char track_function[] = "pax_track_stack";
98926 +static const char check_function[] = "pax_check_alloca";
98927 +static bool init_locals;
98928 +
98929 +static struct plugin_info stackleak_plugin_info = {
98930 + .version = "201203140940",
98931 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
98932 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
98933 +};
98934 +
98935 +static bool gate_stackleak_track_stack(void);
98936 +static unsigned int execute_stackleak_tree_instrument(void);
98937 +static unsigned int execute_stackleak_final(void);
98938 +
98939 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
98940 + .pass = {
98941 + .type = GIMPLE_PASS,
98942 + .name = "stackleak_tree_instrument",
98943 + .gate = gate_stackleak_track_stack,
98944 + .execute = execute_stackleak_tree_instrument,
98945 + .sub = NULL,
98946 + .next = NULL,
98947 + .static_pass_number = 0,
98948 + .tv_id = TV_NONE,
98949 + .properties_required = PROP_gimple_leh | PROP_cfg,
98950 + .properties_provided = 0,
98951 + .properties_destroyed = 0,
98952 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
98953 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
98954 + }
98955 +};
98956 +
98957 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
98958 + .pass = {
98959 + .type = RTL_PASS,
98960 + .name = "stackleak_final",
98961 + .gate = gate_stackleak_track_stack,
98962 + .execute = execute_stackleak_final,
98963 + .sub = NULL,
98964 + .next = NULL,
98965 + .static_pass_number = 0,
98966 + .tv_id = TV_NONE,
98967 + .properties_required = 0,
98968 + .properties_provided = 0,
98969 + .properties_destroyed = 0,
98970 + .todo_flags_start = 0,
98971 + .todo_flags_finish = TODO_dump_func
98972 + }
98973 +};
98974 +
98975 +static bool gate_stackleak_track_stack(void)
98976 +{
98977 + return track_frame_size >= 0;
98978 +}
98979 +
98980 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
98981 +{
98982 + gimple check_alloca;
98983 + tree fntype, fndecl, alloca_size;
98984 +
98985 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
98986 + fndecl = build_fn_decl(check_function, fntype);
98987 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
98988 +
98989 + // insert call to void pax_check_alloca(unsigned long size)
98990 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
98991 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
98992 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
98993 +}
98994 +
98995 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
98996 +{
98997 + gimple track_stack;
98998 + tree fntype, fndecl;
98999 +
99000 + fntype = build_function_type_list(void_type_node, NULL_TREE);
99001 + fndecl = build_fn_decl(track_function, fntype);
99002 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
99003 +
99004 + // insert call to void pax_track_stack(void)
99005 + track_stack = gimple_build_call(fndecl, 0);
99006 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
99007 +}
99008 +
99009 +#if BUILDING_GCC_VERSION == 4005
99010 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
99011 +{
99012 + tree fndecl;
99013 +
99014 + if (!is_gimple_call(stmt))
99015 + return false;
99016 + fndecl = gimple_call_fndecl(stmt);
99017 + if (!fndecl)
99018 + return false;
99019 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
99020 + return false;
99021 +// print_node(stderr, "pax", fndecl, 4);
99022 + return DECL_FUNCTION_CODE(fndecl) == code;
99023 +}
99024 +#endif
99025 +
99026 +static bool is_alloca(gimple stmt)
99027 +{
99028 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
99029 + return true;
99030 +
99031 +#if BUILDING_GCC_VERSION >= 4007
99032 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
99033 + return true;
99034 +#endif
99035 +
99036 + return false;
99037 +}
99038 +
99039 +static unsigned int execute_stackleak_tree_instrument(void)
99040 +{
99041 + basic_block bb, entry_bb;
99042 + bool prologue_instrumented = false, is_leaf = true;
99043 +
99044 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
99045 +
99046 + // 1. loop through BBs and GIMPLE statements
99047 + FOR_EACH_BB(bb) {
99048 + gimple_stmt_iterator gsi;
99049 +
99050 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
99051 + gimple stmt;
99052 +
99053 + stmt = gsi_stmt(gsi);
99054 +
99055 + if (is_gimple_call(stmt))
99056 + is_leaf = false;
99057 +
99058 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
99059 + if (!is_alloca(stmt))
99060 + continue;
99061 +
99062 + // 2. insert stack overflow check before each __builtin_alloca call
99063 + stackleak_check_alloca(&gsi);
99064 +
99065 + // 3. insert track call after each __builtin_alloca call
99066 + stackleak_add_instrumentation(&gsi);
99067 + if (bb == entry_bb)
99068 + prologue_instrumented = true;
99069 + }
99070 + }
99071 +
99072 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
99073 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
99074 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
99075 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
99076 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
99077 + return 0;
99078 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
99079 + return 0;
99080 +
99081 + // 4. insert track call at the beginning
99082 + if (!prologue_instrumented) {
99083 + gimple_stmt_iterator gsi;
99084 +
99085 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
99086 + if (dom_info_available_p(CDI_DOMINATORS))
99087 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
99088 + gsi = gsi_start_bb(bb);
99089 + stackleak_add_instrumentation(&gsi);
99090 + }
99091 +
99092 + return 0;
99093 +}
99094 +
99095 +static unsigned int execute_stackleak_final(void)
99096 +{
99097 + rtx insn;
99098 +
99099 + if (cfun->calls_alloca)
99100 + return 0;
99101 +
99102 + // keep calls only if function frame is big enough
99103 + if (get_frame_size() >= track_frame_size)
99104 + return 0;
99105 +
99106 + // 1. find pax_track_stack calls
99107 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
99108 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
99109 + rtx body;
99110 +
99111 + if (!CALL_P(insn))
99112 + continue;
99113 + body = PATTERN(insn);
99114 + if (GET_CODE(body) != CALL)
99115 + continue;
99116 + body = XEXP(body, 0);
99117 + if (GET_CODE(body) != MEM)
99118 + continue;
99119 + body = XEXP(body, 0);
99120 + if (GET_CODE(body) != SYMBOL_REF)
99121 + continue;
99122 + if (strcmp(XSTR(body, 0), track_function))
99123 + continue;
99124 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
99125 + // 2. delete call
99126 + insn = delete_insn_and_edges(insn);
99127 +#if BUILDING_GCC_VERSION >= 4007
99128 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
99129 + insn = delete_insn_and_edges(insn);
99130 +#endif
99131 + }
99132 +
99133 +// print_simple_rtl(stderr, get_insns());
99134 +// print_rtl(stderr, get_insns());
99135 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
99136 +
99137 + return 0;
99138 +}
99139 +
99140 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
99141 +{
99142 + const char * const plugin_name = plugin_info->base_name;
99143 + const int argc = plugin_info->argc;
99144 + const struct plugin_argument * const argv = plugin_info->argv;
99145 + int i;
99146 + struct register_pass_info stackleak_tree_instrument_pass_info = {
99147 + .pass = &stackleak_tree_instrument_pass.pass,
99148 +// .reference_pass_name = "tree_profile",
99149 + .reference_pass_name = "optimized",
99150 + .ref_pass_instance_number = 0,
99151 + .pos_op = PASS_POS_INSERT_BEFORE
99152 + };
99153 + struct register_pass_info stackleak_final_pass_info = {
99154 + .pass = &stackleak_final_rtl_opt_pass.pass,
99155 + .reference_pass_name = "final",
99156 + .ref_pass_instance_number = 0,
99157 + .pos_op = PASS_POS_INSERT_BEFORE
99158 + };
99159 +
99160 + if (!plugin_default_version_check(version, &gcc_version)) {
99161 + error(G_("incompatible gcc/plugin versions"));
99162 + return 1;
99163 + }
99164 +
99165 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
99166 +
99167 + for (i = 0; i < argc; ++i) {
99168 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
99169 + if (!argv[i].value) {
99170 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
99171 + continue;
99172 + }
99173 + track_frame_size = atoi(argv[i].value);
99174 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
99175 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
99176 + continue;
99177 + }
99178 + if (!strcmp(argv[i].key, "initialize-locals")) {
99179 + if (argv[i].value) {
99180 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
99181 + continue;
99182 + }
99183 + init_locals = true;
99184 + continue;
99185 + }
99186 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
99187 + }
99188 +
99189 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
99190 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
99191 +
99192 + return 0;
99193 +}
99194 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
99195 index 6789d78..4afd019 100644
99196 --- a/tools/perf/util/include/asm/alternative-asm.h
99197 +++ b/tools/perf/util/include/asm/alternative-asm.h
99198 @@ -5,4 +5,7 @@
99199
99200 #define altinstruction_entry #
99201
99202 + .macro pax_force_retaddr rip=0, reload=0
99203 + .endm
99204 +
99205 #endif
99206 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
99207 index af0f22f..9a7d479 100644
99208 --- a/usr/gen_init_cpio.c
99209 +++ b/usr/gen_init_cpio.c
99210 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
99211 int retval;
99212 int rc = -1;
99213 int namesize;
99214 - int i;
99215 + unsigned int i;
99216
99217 mode |= S_IFREG;
99218
99219 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
99220 *env_var = *expanded = '\0';
99221 strncat(env_var, start + 2, end - start - 2);
99222 strncat(expanded, new_location, start - new_location);
99223 - strncat(expanded, getenv(env_var), PATH_MAX);
99224 - strncat(expanded, end + 1, PATH_MAX);
99225 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
99226 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
99227 strncpy(new_location, expanded, PATH_MAX);
99228 + new_location[PATH_MAX] = 0;
99229 } else
99230 break;
99231 }
99232 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
99233 index 9739b53..6d457e3 100644
99234 --- a/virt/kvm/kvm_main.c
99235 +++ b/virt/kvm/kvm_main.c
99236 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
99237
99238 static cpumask_var_t cpus_hardware_enabled;
99239 static int kvm_usage_count = 0;
99240 -static atomic_t hardware_enable_failed;
99241 +static atomic_unchecked_t hardware_enable_failed;
99242
99243 struct kmem_cache *kvm_vcpu_cache;
99244 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
99245 @@ -2247,7 +2247,7 @@ static void hardware_enable_nolock(void *junk)
99246
99247 if (r) {
99248 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
99249 - atomic_inc(&hardware_enable_failed);
99250 + atomic_inc_unchecked(&hardware_enable_failed);
99251 printk(KERN_INFO "kvm: enabling virtualization on "
99252 "CPU%d failed\n", cpu);
99253 }
99254 @@ -2301,10 +2301,10 @@ static int hardware_enable_all(void)
99255
99256 kvm_usage_count++;
99257 if (kvm_usage_count == 1) {
99258 - atomic_set(&hardware_enable_failed, 0);
99259 + atomic_set_unchecked(&hardware_enable_failed, 0);
99260 on_each_cpu(hardware_enable_nolock, NULL, 1);
99261
99262 - if (atomic_read(&hardware_enable_failed)) {
99263 + if (atomic_read_unchecked(&hardware_enable_failed)) {
99264 hardware_disable_all_nolock();
99265 r = -EBUSY;
99266 }
99267 @@ -2667,7 +2667,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
99268 kvm_arch_vcpu_put(vcpu);
99269 }
99270
99271 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
99272 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
99273 struct module *module)
99274 {
99275 int r;
99276 @@ -2730,7 +2730,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
99277 if (!vcpu_align)
99278 vcpu_align = __alignof__(struct kvm_vcpu);
99279 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
99280 - 0, NULL);
99281 + SLAB_USERCOPY, NULL);
99282 if (!kvm_vcpu_cache) {
99283 r = -ENOMEM;
99284 goto out_free_3;
99285 @@ -2740,9 +2740,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
99286 if (r)
99287 goto out_free;
99288
99289 - kvm_chardev_ops.owner = module;
99290 - kvm_vm_fops.owner = module;
99291 - kvm_vcpu_fops.owner = module;
99292 + pax_open_kernel();
99293 + *(void **)&kvm_chardev_ops.owner = module;
99294 + *(void **)&kvm_vm_fops.owner = module;
99295 + *(void **)&kvm_vcpu_fops.owner = module;
99296 + pax_close_kernel();
99297
99298 r = misc_register(&kvm_dev);
99299 if (r) {