]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.4.2-201206111838.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.4.2-201206111838.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b4a898f..6c0106a 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 @@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41 +PERF*
42 SCCS
43 System.map*
44 TAGS
45 @@ -92,19 +98,24 @@ bounds.h
46 bsetup
47 btfixupprep
48 build
49 +builtin-policy.h
50 bvmlinux
51 bzImage*
52 capability_names.h
53 capflags.c
54 classlist.h*
55 +clut_vga16.c
56 +common-cmds.h
57 comp*.log
58 compile.h*
59 conf
60 config
61 config-*
62 config_data.h*
63 +config.c
64 config.mak
65 config.mak.autogen
66 +config.tmp
67 conmakehash
68 consolemap_deftbl.c*
69 cpustr.h
70 @@ -115,9 +126,11 @@ devlist.h*
71 dnotify_test
72 docproc
73 dslm
74 +dtc-lexer.lex.c
75 elf2ecoff
76 elfconfig.h*
77 evergreen_reg_safe.h
78 +exception_policy.conf
79 fixdep
80 flask.h
81 fore200e_mkfirm
82 @@ -125,12 +138,15 @@ fore200e_pca_fw.c*
83 gconf
84 gconf.glade.h
85 gen-devlist
86 +gen-kdb_cmds.c
87 gen_crc32table
88 gen_init_cpio
89 generated
90 genheaders
91 genksyms
92 *_gray256.c
93 +hash
94 +hid-example
95 hpet_example
96 hugepage-mmap
97 hugepage-shm
98 @@ -145,7 +161,7 @@ int32.c
99 int4.c
100 int8.c
101 kallsyms
102 -kconfig
103 +kern_constants.h
104 keywords.c
105 ksym.c*
106 ksym.h*
107 @@ -153,7 +169,7 @@ kxgettext
108 lkc_defs.h
109 lex.c
110 lex.*.c
111 -linux
112 +lib1funcs.S
113 logo_*.c
114 logo_*_clut224.c
115 logo_*_mono.c
116 @@ -164,14 +180,15 @@ machtypes.h
117 map
118 map_hugetlb
119 maui_boot.h
120 -media
121 mconf
122 +mdp
123 miboot*
124 mk_elfconfig
125 mkboot
126 mkbugboot
127 mkcpustr
128 mkdep
129 +mkpiggy
130 mkprep
131 mkregtable
132 mktables
133 @@ -188,6 +205,7 @@ oui.c*
134 page-types
135 parse.c
136 parse.h
137 +parse-events*
138 patches*
139 pca200e.bin
140 pca200e_ecd.bin2
141 @@ -197,6 +215,7 @@ perf-archive
142 piggyback
143 piggy.gzip
144 piggy.S
145 +pmu-*
146 pnmtologo
147 ppc_defs.h*
148 pss_boot.h
149 @@ -207,6 +226,7 @@ r300_reg_safe.h
150 r420_reg_safe.h
151 r600_reg_safe.h
152 recordmcount
153 +regdb.c
154 relocs
155 rlim_names.h
156 rn50_reg_safe.h
157 @@ -217,6 +237,7 @@ setup
158 setup.bin
159 setup.elf
160 sImage
161 +slabinfo
162 sm_tbl*
163 split-include
164 syscalltab.h
165 @@ -227,6 +248,7 @@ tftpboot.img
166 timeconst.h
167 times.h*
168 trix_boot.h
169 +user_constants.h
170 utsrelease.h*
171 vdso-syms.lds
172 vdso.lds
173 @@ -238,13 +260,17 @@ vdso32.lds
174 vdso32.so.dbg
175 vdso64.lds
176 vdso64.so.dbg
177 +vdsox32.lds
178 +vdsox32-syms.lds
179 version.h*
180 vmImage
181 vmlinux
182 vmlinux-*
183 vmlinux.aout
184 vmlinux.bin.all
185 +vmlinux.bin.bz2
186 vmlinux.lds
187 +vmlinux.relocs
188 vmlinuz
189 voffset.h
190 vsyscall.lds
191 @@ -252,9 +278,11 @@ vsyscall_32.lds
192 wanxlfw.inc
193 uImage
194 unifdef
195 +utsrelease.h
196 wakeup.bin
197 wakeup.elf
198 wakeup.lds
199 zImage*
200 zconf.hash.c
201 +zconf.lex.c
202 zoffset.h
203 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
204 index c1601e5..08557ce 100644
205 --- a/Documentation/kernel-parameters.txt
206 +++ b/Documentation/kernel-parameters.txt
207 @@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
208 the specified number of seconds. This is to be used if
209 your oopses keep scrolling off the screen.
210
211 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
212 + virtualization environments that don't cope well with the
213 + expand down segment used by UDEREF on X86-32 or the frequent
214 + page table updates on X86-64.
215 +
216 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
217 +
218 pcbit= [HW,ISDN]
219
220 pcd. [PARIDE]
221 diff --git a/Makefile b/Makefile
222 index 901a955..8277cb4 100644
223 --- a/Makefile
224 +++ b/Makefile
225 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
226
227 HOSTCC = gcc
228 HOSTCXX = g++
229 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
230 -HOSTCXXFLAGS = -O2
231 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
232 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
233 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
234
235 # Decide whether to build built-in, modular, or both.
236 # Normally, just do built-in.
237 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
238 # Rules shared between *config targets and build targets
239
240 # Basic helpers built in scripts/
241 -PHONY += scripts_basic
242 -scripts_basic:
243 +PHONY += scripts_basic gcc-plugins
244 +scripts_basic: gcc-plugins
245 $(Q)$(MAKE) $(build)=scripts/basic
246 $(Q)rm -f .tmp_quiet_recordmcount
247
248 @@ -564,6 +565,55 @@ else
249 KBUILD_CFLAGS += -O2
250 endif
251
252 +ifndef DISABLE_PAX_PLUGINS
253 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
254 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
255 +ifndef CONFIG_UML
256 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
257 +endif
258 +endif
259 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
260 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
261 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
262 +endif
263 +ifdef CONFIG_KALLOCSTAT_PLUGIN
264 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
265 +endif
266 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
267 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
268 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
269 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
270 +endif
271 +ifdef CONFIG_CHECKER_PLUGIN
272 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
273 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
274 +endif
275 +endif
276 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
277 +ifdef CONFIG_PAX_SIZE_OVERFLOW
278 +SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
279 +endif
280 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
281 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
282 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
283 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
284 +ifeq ($(KBUILD_EXTMOD),)
285 +gcc-plugins:
286 + $(Q)$(MAKE) $(build)=tools/gcc
287 +else
288 +gcc-plugins: ;
289 +endif
290 +else
291 +gcc-plugins:
292 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
293 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
294 +else
295 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
296 +endif
297 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
298 +endif
299 +endif
300 +
301 include $(srctree)/arch/$(SRCARCH)/Makefile
302
303 ifneq ($(CONFIG_FRAME_WARN),0)
304 @@ -708,7 +758,7 @@ export mod_strip_cmd
305
306
307 ifeq ($(KBUILD_EXTMOD),)
308 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
309 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
310
311 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
312 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
313 @@ -932,6 +982,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
314
315 # The actual objects are generated when descending,
316 # make sure no implicit rule kicks in
317 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
318 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
319 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
320
321 # Handle descending into subdirectories listed in $(vmlinux-dirs)
322 @@ -941,7 +993,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
323 # Error messages still appears in the original language
324
325 PHONY += $(vmlinux-dirs)
326 -$(vmlinux-dirs): prepare scripts
327 +$(vmlinux-dirs): gcc-plugins prepare scripts
328 $(Q)$(MAKE) $(build)=$@
329
330 # Store (new) KERNELRELASE string in include/config/kernel.release
331 @@ -985,6 +1037,7 @@ prepare0: archprepare FORCE
332 $(Q)$(MAKE) $(build)=.
333
334 # All the preparing..
335 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
336 prepare: prepare0
337
338 # Generate some files
339 @@ -1092,6 +1145,8 @@ all: modules
340 # using awk while concatenating to the final file.
341
342 PHONY += modules
343 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
344 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
345 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
346 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
347 @$(kecho) ' Building modules, stage 2.';
348 @@ -1107,7 +1162,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
349
350 # Target to prepare building external modules
351 PHONY += modules_prepare
352 -modules_prepare: prepare scripts
353 +modules_prepare: gcc-plugins prepare scripts
354
355 # Target to install modules
356 PHONY += modules_install
357 @@ -1204,6 +1259,7 @@ distclean: mrproper
358 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
359 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
360 -o -name '.*.rej' \
361 + -o -name '.*.rej' -o -name '*.so' \
362 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
363 -type f -print | xargs rm -f
364
365 @@ -1364,6 +1420,8 @@ PHONY += $(module-dirs) modules
366 $(module-dirs): crmodverdir $(objtree)/Module.symvers
367 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
368
369 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
370 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
371 modules: $(module-dirs)
372 @$(kecho) ' Building modules, stage 2.';
373 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
374 @@ -1490,17 +1548,21 @@ else
375 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
376 endif
377
378 -%.s: %.c prepare scripts FORCE
379 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
380 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
381 +%.s: %.c gcc-plugins prepare scripts FORCE
382 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
383 %.i: %.c prepare scripts FORCE
384 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
385 -%.o: %.c prepare scripts FORCE
386 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
387 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
388 +%.o: %.c gcc-plugins prepare scripts FORCE
389 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
390 %.lst: %.c prepare scripts FORCE
391 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
392 -%.s: %.S prepare scripts FORCE
393 +%.s: %.S gcc-plugins prepare scripts FORCE
394 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
395 -%.o: %.S prepare scripts FORCE
396 +%.o: %.S gcc-plugins prepare scripts FORCE
397 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
398 %.symtypes: %.c prepare scripts FORCE
399 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
400 @@ -1510,11 +1572,15 @@ endif
401 $(cmd_crmodverdir)
402 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
403 $(build)=$(build-dir)
404 -%/: prepare scripts FORCE
405 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
406 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
407 +%/: gcc-plugins prepare scripts FORCE
408 $(cmd_crmodverdir)
409 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
410 $(build)=$(build-dir)
411 -%.ko: prepare scripts FORCE
412 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
413 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
414 +%.ko: gcc-plugins prepare scripts FORCE
415 $(cmd_crmodverdir)
416 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
417 $(build)=$(build-dir) $(@:.ko=.o)
418 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
419 index 3bb7ffe..347a54c 100644
420 --- a/arch/alpha/include/asm/atomic.h
421 +++ b/arch/alpha/include/asm/atomic.h
422 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
423 #define atomic_dec(v) atomic_sub(1,(v))
424 #define atomic64_dec(v) atomic64_sub(1,(v))
425
426 +#define atomic64_read_unchecked(v) atomic64_read(v)
427 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
428 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
429 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
430 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
431 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
432 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
433 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
434 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
435 +
436 #define smp_mb__before_atomic_dec() smp_mb()
437 #define smp_mb__after_atomic_dec() smp_mb()
438 #define smp_mb__before_atomic_inc() smp_mb()
439 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
440 index ad368a9..fbe0f25 100644
441 --- a/arch/alpha/include/asm/cache.h
442 +++ b/arch/alpha/include/asm/cache.h
443 @@ -4,19 +4,19 @@
444 #ifndef __ARCH_ALPHA_CACHE_H
445 #define __ARCH_ALPHA_CACHE_H
446
447 +#include <linux/const.h>
448
449 /* Bytes per L1 (data) cache line. */
450 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
451 -# define L1_CACHE_BYTES 64
452 # define L1_CACHE_SHIFT 6
453 #else
454 /* Both EV4 and EV5 are write-through, read-allocate,
455 direct-mapped, physical.
456 */
457 -# define L1_CACHE_BYTES 32
458 # define L1_CACHE_SHIFT 5
459 #endif
460
461 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
462 #define SMP_CACHE_BYTES L1_CACHE_BYTES
463
464 #endif
465 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
466 index 968d999..d36b2df 100644
467 --- a/arch/alpha/include/asm/elf.h
468 +++ b/arch/alpha/include/asm/elf.h
469 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
470
471 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
472
473 +#ifdef CONFIG_PAX_ASLR
474 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
475 +
476 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
477 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
478 +#endif
479 +
480 /* $0 is set by ld.so to a pointer to a function which might be
481 registered using atexit. This provides a mean for the dynamic
482 linker to call DT_FINI functions for shared libraries that have
483 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
484 index bc2a0da..8ad11ee 100644
485 --- a/arch/alpha/include/asm/pgalloc.h
486 +++ b/arch/alpha/include/asm/pgalloc.h
487 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
488 pgd_set(pgd, pmd);
489 }
490
491 +static inline void
492 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
493 +{
494 + pgd_populate(mm, pgd, pmd);
495 +}
496 +
497 extern pgd_t *pgd_alloc(struct mm_struct *mm);
498
499 static inline void
500 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
501 index 81a4342..348b927 100644
502 --- a/arch/alpha/include/asm/pgtable.h
503 +++ b/arch/alpha/include/asm/pgtable.h
504 @@ -102,6 +102,17 @@ struct vm_area_struct;
505 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
506 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
507 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
508 +
509 +#ifdef CONFIG_PAX_PAGEEXEC
510 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
511 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
512 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
513 +#else
514 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
515 +# define PAGE_COPY_NOEXEC PAGE_COPY
516 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
517 +#endif
518 +
519 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
520
521 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
522 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
523 index 2fd00b7..cfd5069 100644
524 --- a/arch/alpha/kernel/module.c
525 +++ b/arch/alpha/kernel/module.c
526 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
527
528 /* The small sections were sorted to the end of the segment.
529 The following should definitely cover them. */
530 - gp = (u64)me->module_core + me->core_size - 0x8000;
531 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
532 got = sechdrs[me->arch.gotsecindex].sh_addr;
533
534 for (i = 0; i < n; i++) {
535 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
536 index 49ee319..9ee7d14 100644
537 --- a/arch/alpha/kernel/osf_sys.c
538 +++ b/arch/alpha/kernel/osf_sys.c
539 @@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
540 /* At this point: (!vma || addr < vma->vm_end). */
541 if (limit - len < addr)
542 return -ENOMEM;
543 - if (!vma || addr + len <= vma->vm_start)
544 + if (check_heap_stack_gap(vma, addr, len))
545 return addr;
546 addr = vma->vm_end;
547 vma = vma->vm_next;
548 @@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
549 merely specific addresses, but regions of memory -- perhaps
550 this feature should be incorporated into all ports? */
551
552 +#ifdef CONFIG_PAX_RANDMMAP
553 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
554 +#endif
555 +
556 if (addr) {
557 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
558 if (addr != (unsigned long) -ENOMEM)
559 @@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
560 }
561
562 /* Next, try allocating at TASK_UNMAPPED_BASE. */
563 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
564 - len, limit);
565 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
566 +
567 if (addr != (unsigned long) -ENOMEM)
568 return addr;
569
570 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
571 index 5eecab1..609abc0 100644
572 --- a/arch/alpha/mm/fault.c
573 +++ b/arch/alpha/mm/fault.c
574 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
575 __reload_thread(pcb);
576 }
577
578 +#ifdef CONFIG_PAX_PAGEEXEC
579 +/*
580 + * PaX: decide what to do with offenders (regs->pc = fault address)
581 + *
582 + * returns 1 when task should be killed
583 + * 2 when patched PLT trampoline was detected
584 + * 3 when unpatched PLT trampoline was detected
585 + */
586 +static int pax_handle_fetch_fault(struct pt_regs *regs)
587 +{
588 +
589 +#ifdef CONFIG_PAX_EMUPLT
590 + int err;
591 +
592 + do { /* PaX: patched PLT emulation #1 */
593 + unsigned int ldah, ldq, jmp;
594 +
595 + err = get_user(ldah, (unsigned int *)regs->pc);
596 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
597 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
598 +
599 + if (err)
600 + break;
601 +
602 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
603 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
604 + jmp == 0x6BFB0000U)
605 + {
606 + unsigned long r27, addr;
607 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
608 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
609 +
610 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
611 + err = get_user(r27, (unsigned long *)addr);
612 + if (err)
613 + break;
614 +
615 + regs->r27 = r27;
616 + regs->pc = r27;
617 + return 2;
618 + }
619 + } while (0);
620 +
621 + do { /* PaX: patched PLT emulation #2 */
622 + unsigned int ldah, lda, br;
623 +
624 + err = get_user(ldah, (unsigned int *)regs->pc);
625 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
626 + err |= get_user(br, (unsigned int *)(regs->pc+8));
627 +
628 + if (err)
629 + break;
630 +
631 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
632 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
633 + (br & 0xFFE00000U) == 0xC3E00000U)
634 + {
635 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
636 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
637 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
638 +
639 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
640 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
641 + return 2;
642 + }
643 + } while (0);
644 +
645 + do { /* PaX: unpatched PLT emulation */
646 + unsigned int br;
647 +
648 + err = get_user(br, (unsigned int *)regs->pc);
649 +
650 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
651 + unsigned int br2, ldq, nop, jmp;
652 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
653 +
654 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
655 + err = get_user(br2, (unsigned int *)addr);
656 + err |= get_user(ldq, (unsigned int *)(addr+4));
657 + err |= get_user(nop, (unsigned int *)(addr+8));
658 + err |= get_user(jmp, (unsigned int *)(addr+12));
659 + err |= get_user(resolver, (unsigned long *)(addr+16));
660 +
661 + if (err)
662 + break;
663 +
664 + if (br2 == 0xC3600000U &&
665 + ldq == 0xA77B000CU &&
666 + nop == 0x47FF041FU &&
667 + jmp == 0x6B7B0000U)
668 + {
669 + regs->r28 = regs->pc+4;
670 + regs->r27 = addr+16;
671 + regs->pc = resolver;
672 + return 3;
673 + }
674 + }
675 + } while (0);
676 +#endif
677 +
678 + return 1;
679 +}
680 +
681 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
682 +{
683 + unsigned long i;
684 +
685 + printk(KERN_ERR "PAX: bytes at PC: ");
686 + for (i = 0; i < 5; i++) {
687 + unsigned int c;
688 + if (get_user(c, (unsigned int *)pc+i))
689 + printk(KERN_CONT "???????? ");
690 + else
691 + printk(KERN_CONT "%08x ", c);
692 + }
693 + printk("\n");
694 +}
695 +#endif
696
697 /*
698 * This routine handles page faults. It determines the address,
699 @@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
700 good_area:
701 si_code = SEGV_ACCERR;
702 if (cause < 0) {
703 - if (!(vma->vm_flags & VM_EXEC))
704 + if (!(vma->vm_flags & VM_EXEC)) {
705 +
706 +#ifdef CONFIG_PAX_PAGEEXEC
707 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
708 + goto bad_area;
709 +
710 + up_read(&mm->mmap_sem);
711 + switch (pax_handle_fetch_fault(regs)) {
712 +
713 +#ifdef CONFIG_PAX_EMUPLT
714 + case 2:
715 + case 3:
716 + return;
717 +#endif
718 +
719 + }
720 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
721 + do_group_exit(SIGKILL);
722 +#else
723 goto bad_area;
724 +#endif
725 +
726 + }
727 } else if (!cause) {
728 /* Allow reads even for write-only mappings */
729 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
730 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
731 index 68374ba..15c980c 100644
732 --- a/arch/arm/include/asm/atomic.h
733 +++ b/arch/arm/include/asm/atomic.h
734 @@ -17,17 +17,35 @@
735 #include <asm/barrier.h>
736 #include <asm/cmpxchg.h>
737
738 +#ifdef CONFIG_GENERIC_ATOMIC64
739 +#include <asm-generic/atomic64.h>
740 +#endif
741 +
742 #define ATOMIC_INIT(i) { (i) }
743
744 #ifdef __KERNEL__
745
746 +#define _ASM_EXTABLE(from, to) \
747 +" .pushsection __ex_table,\"a\"\n"\
748 +" .align 3\n" \
749 +" .long " #from ", " #to"\n" \
750 +" .popsection"
751 +
752 /*
753 * On ARM, ordinary assignment (str instruction) doesn't clear the local
754 * strex/ldrex monitor on some implementations. The reason we can use it for
755 * atomic_set() is the clrex or dummy strex done on every exception return.
756 */
757 #define atomic_read(v) (*(volatile int *)&(v)->counter)
758 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
759 +{
760 + return v->counter;
761 +}
762 #define atomic_set(v,i) (((v)->counter) = (i))
763 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
764 +{
765 + v->counter = i;
766 +}
767
768 #if __LINUX_ARM_ARCH__ >= 6
769
770 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
771 int result;
772
773 __asm__ __volatile__("@ atomic_add\n"
774 +"1: ldrex %1, [%3]\n"
775 +" adds %0, %1, %4\n"
776 +
777 +#ifdef CONFIG_PAX_REFCOUNT
778 +" bvc 3f\n"
779 +"2: bkpt 0xf103\n"
780 +"3:\n"
781 +#endif
782 +
783 +" strex %1, %0, [%3]\n"
784 +" teq %1, #0\n"
785 +" bne 1b"
786 +
787 +#ifdef CONFIG_PAX_REFCOUNT
788 +"\n4:\n"
789 + _ASM_EXTABLE(2b, 4b)
790 +#endif
791 +
792 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
793 + : "r" (&v->counter), "Ir" (i)
794 + : "cc");
795 +}
796 +
797 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
798 +{
799 + unsigned long tmp;
800 + int result;
801 +
802 + __asm__ __volatile__("@ atomic_add_unchecked\n"
803 "1: ldrex %0, [%3]\n"
804 " add %0, %0, %4\n"
805 " strex %1, %0, [%3]\n"
806 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
807 smp_mb();
808
809 __asm__ __volatile__("@ atomic_add_return\n"
810 +"1: ldrex %1, [%3]\n"
811 +" adds %0, %1, %4\n"
812 +
813 +#ifdef CONFIG_PAX_REFCOUNT
814 +" bvc 3f\n"
815 +" mov %0, %1\n"
816 +"2: bkpt 0xf103\n"
817 +"3:\n"
818 +#endif
819 +
820 +" strex %1, %0, [%3]\n"
821 +" teq %1, #0\n"
822 +" bne 1b"
823 +
824 +#ifdef CONFIG_PAX_REFCOUNT
825 +"\n4:\n"
826 + _ASM_EXTABLE(2b, 4b)
827 +#endif
828 +
829 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
830 + : "r" (&v->counter), "Ir" (i)
831 + : "cc");
832 +
833 + smp_mb();
834 +
835 + return result;
836 +}
837 +
838 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
839 +{
840 + unsigned long tmp;
841 + int result;
842 +
843 + smp_mb();
844 +
845 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
846 "1: ldrex %0, [%3]\n"
847 " add %0, %0, %4\n"
848 " strex %1, %0, [%3]\n"
849 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
850 int result;
851
852 __asm__ __volatile__("@ atomic_sub\n"
853 +"1: ldrex %1, [%3]\n"
854 +" subs %0, %1, %4\n"
855 +
856 +#ifdef CONFIG_PAX_REFCOUNT
857 +" bvc 3f\n"
858 +"2: bkpt 0xf103\n"
859 +"3:\n"
860 +#endif
861 +
862 +" strex %1, %0, [%3]\n"
863 +" teq %1, #0\n"
864 +" bne 1b"
865 +
866 +#ifdef CONFIG_PAX_REFCOUNT
867 +"\n4:\n"
868 + _ASM_EXTABLE(2b, 4b)
869 +#endif
870 +
871 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
872 + : "r" (&v->counter), "Ir" (i)
873 + : "cc");
874 +}
875 +
876 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
877 +{
878 + unsigned long tmp;
879 + int result;
880 +
881 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
882 "1: ldrex %0, [%3]\n"
883 " sub %0, %0, %4\n"
884 " strex %1, %0, [%3]\n"
885 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
886 smp_mb();
887
888 __asm__ __volatile__("@ atomic_sub_return\n"
889 -"1: ldrex %0, [%3]\n"
890 -" sub %0, %0, %4\n"
891 +"1: ldrex %1, [%3]\n"
892 +" sub %0, %1, %4\n"
893 +
894 +#ifdef CONFIG_PAX_REFCOUNT
895 +" bvc 3f\n"
896 +" mov %0, %1\n"
897 +"2: bkpt 0xf103\n"
898 +"3:\n"
899 +#endif
900 +
901 " strex %1, %0, [%3]\n"
902 " teq %1, #0\n"
903 " bne 1b"
904 +
905 +#ifdef CONFIG_PAX_REFCOUNT
906 +"\n4:\n"
907 + _ASM_EXTABLE(2b, 4b)
908 +#endif
909 +
910 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
911 : "r" (&v->counter), "Ir" (i)
912 : "cc");
913 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
914 return oldval;
915 }
916
917 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
918 +{
919 + unsigned long oldval, res;
920 +
921 + smp_mb();
922 +
923 + do {
924 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
925 + "ldrex %1, [%3]\n"
926 + "mov %0, #0\n"
927 + "teq %1, %4\n"
928 + "strexeq %0, %5, [%3]\n"
929 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
930 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
931 + : "cc");
932 + } while (res);
933 +
934 + smp_mb();
935 +
936 + return oldval;
937 +}
938 +
939 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
940 {
941 unsigned long tmp, tmp2;
942 @@ -167,7 +315,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
943
944 return val;
945 }
946 +#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
947 #define atomic_add(i, v) (void) atomic_add_return(i, v)
948 +#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
949
950 static inline int atomic_sub_return(int i, atomic_t *v)
951 {
952 @@ -181,7 +331,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
953
954 return val;
955 }
956 +#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
957 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
958 +#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
959
960 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
961 {
962 @@ -196,6 +348,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
963
964 return ret;
965 }
966 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
967
968 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
969 {
970 @@ -209,6 +362,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
971 #endif /* __LINUX_ARM_ARCH__ */
972
973 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
974 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
975 +{
976 + return xchg(&v->counter, new);
977 +}
978
979 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
980 {
981 @@ -221,11 +378,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
982 }
983
984 #define atomic_inc(v) atomic_add(1, v)
985 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
986 +{
987 + atomic_add_unchecked(1, v);
988 +}
989 #define atomic_dec(v) atomic_sub(1, v)
990 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
991 +{
992 + atomic_sub_unchecked(1, v);
993 +}
994
995 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
996 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
997 +{
998 + return atomic_add_return_unchecked(1, v) == 0;
999 +}
1000 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1001 #define atomic_inc_return(v) (atomic_add_return(1, v))
1002 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1003 +{
1004 + return atomic_add_return_unchecked(1, v);
1005 +}
1006 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1007 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1008
1009 @@ -241,6 +414,14 @@ typedef struct {
1010 u64 __aligned(8) counter;
1011 } atomic64_t;
1012
1013 +#ifdef CONFIG_PAX_REFCOUNT
1014 +typedef struct {
1015 + u64 __aligned(8) counter;
1016 +} atomic64_unchecked_t;
1017 +#else
1018 +typedef atomic64_t atomic64_unchecked_t;
1019 +#endif
1020 +
1021 #define ATOMIC64_INIT(i) { (i) }
1022
1023 static inline u64 atomic64_read(atomic64_t *v)
1024 @@ -256,6 +437,19 @@ static inline u64 atomic64_read(atomic64_t *v)
1025 return result;
1026 }
1027
1028 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1029 +{
1030 + u64 result;
1031 +
1032 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1033 +" ldrexd %0, %H0, [%1]"
1034 + : "=&r" (result)
1035 + : "r" (&v->counter), "Qo" (v->counter)
1036 + );
1037 +
1038 + return result;
1039 +}
1040 +
1041 static inline void atomic64_set(atomic64_t *v, u64 i)
1042 {
1043 u64 tmp;
1044 @@ -270,6 +464,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1045 : "cc");
1046 }
1047
1048 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1049 +{
1050 + u64 tmp;
1051 +
1052 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1053 +"1: ldrexd %0, %H0, [%2]\n"
1054 +" strexd %0, %3, %H3, [%2]\n"
1055 +" teq %0, #0\n"
1056 +" bne 1b"
1057 + : "=&r" (tmp), "=Qo" (v->counter)
1058 + : "r" (&v->counter), "r" (i)
1059 + : "cc");
1060 +}
1061 +
1062 static inline void atomic64_add(u64 i, atomic64_t *v)
1063 {
1064 u64 result;
1065 @@ -278,6 +486,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1066 __asm__ __volatile__("@ atomic64_add\n"
1067 "1: ldrexd %0, %H0, [%3]\n"
1068 " adds %0, %0, %4\n"
1069 +" adcs %H0, %H0, %H4\n"
1070 +
1071 +#ifdef CONFIG_PAX_REFCOUNT
1072 +" bvc 3f\n"
1073 +"2: bkpt 0xf103\n"
1074 +"3:\n"
1075 +#endif
1076 +
1077 +" strexd %1, %0, %H0, [%3]\n"
1078 +" teq %1, #0\n"
1079 +" bne 1b"
1080 +
1081 +#ifdef CONFIG_PAX_REFCOUNT
1082 +"\n4:\n"
1083 + _ASM_EXTABLE(2b, 4b)
1084 +#endif
1085 +
1086 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1087 + : "r" (&v->counter), "r" (i)
1088 + : "cc");
1089 +}
1090 +
1091 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1092 +{
1093 + u64 result;
1094 + unsigned long tmp;
1095 +
1096 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1097 +"1: ldrexd %0, %H0, [%3]\n"
1098 +" adds %0, %0, %4\n"
1099 " adc %H0, %H0, %H4\n"
1100 " strexd %1, %0, %H0, [%3]\n"
1101 " teq %1, #0\n"
1102 @@ -289,12 +527,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1103
1104 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1105 {
1106 - u64 result;
1107 - unsigned long tmp;
1108 + u64 result, tmp;
1109
1110 smp_mb();
1111
1112 __asm__ __volatile__("@ atomic64_add_return\n"
1113 +"1: ldrexd %1, %H1, [%3]\n"
1114 +" adds %0, %1, %4\n"
1115 +" adcs %H0, %H1, %H4\n"
1116 +
1117 +#ifdef CONFIG_PAX_REFCOUNT
1118 +" bvc 3f\n"
1119 +" mov %0, %1\n"
1120 +" mov %H0, %H1\n"
1121 +"2: bkpt 0xf103\n"
1122 +"3:\n"
1123 +#endif
1124 +
1125 +" strexd %1, %0, %H0, [%3]\n"
1126 +" teq %1, #0\n"
1127 +" bne 1b"
1128 +
1129 +#ifdef CONFIG_PAX_REFCOUNT
1130 +"\n4:\n"
1131 + _ASM_EXTABLE(2b, 4b)
1132 +#endif
1133 +
1134 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1135 + : "r" (&v->counter), "r" (i)
1136 + : "cc");
1137 +
1138 + smp_mb();
1139 +
1140 + return result;
1141 +}
1142 +
1143 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1144 +{
1145 + u64 result;
1146 + unsigned long tmp;
1147 +
1148 + smp_mb();
1149 +
1150 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1151 "1: ldrexd %0, %H0, [%3]\n"
1152 " adds %0, %0, %4\n"
1153 " adc %H0, %H0, %H4\n"
1154 @@ -318,6 +593,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1155 __asm__ __volatile__("@ atomic64_sub\n"
1156 "1: ldrexd %0, %H0, [%3]\n"
1157 " subs %0, %0, %4\n"
1158 +" sbcs %H0, %H0, %H4\n"
1159 +
1160 +#ifdef CONFIG_PAX_REFCOUNT
1161 +" bvc 3f\n"
1162 +"2: bkpt 0xf103\n"
1163 +"3:\n"
1164 +#endif
1165 +
1166 +" strexd %1, %0, %H0, [%3]\n"
1167 +" teq %1, #0\n"
1168 +" bne 1b"
1169 +
1170 +#ifdef CONFIG_PAX_REFCOUNT
1171 +"\n4:\n"
1172 + _ASM_EXTABLE(2b, 4b)
1173 +#endif
1174 +
1175 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1176 + : "r" (&v->counter), "r" (i)
1177 + : "cc");
1178 +}
1179 +
1180 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1181 +{
1182 + u64 result;
1183 + unsigned long tmp;
1184 +
1185 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1186 +"1: ldrexd %0, %H0, [%3]\n"
1187 +" subs %0, %0, %4\n"
1188 " sbc %H0, %H0, %H4\n"
1189 " strexd %1, %0, %H0, [%3]\n"
1190 " teq %1, #0\n"
1191 @@ -329,18 +634,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1192
1193 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1194 {
1195 - u64 result;
1196 - unsigned long tmp;
1197 + u64 result, tmp;
1198
1199 smp_mb();
1200
1201 __asm__ __volatile__("@ atomic64_sub_return\n"
1202 -"1: ldrexd %0, %H0, [%3]\n"
1203 -" subs %0, %0, %4\n"
1204 -" sbc %H0, %H0, %H4\n"
1205 +"1: ldrexd %1, %H1, [%3]\n"
1206 +" subs %0, %1, %4\n"
1207 +" sbc %H0, %H1, %H4\n"
1208 +
1209 +#ifdef CONFIG_PAX_REFCOUNT
1210 +" bvc 3f\n"
1211 +" mov %0, %1\n"
1212 +" mov %H0, %H1\n"
1213 +"2: bkpt 0xf103\n"
1214 +"3:\n"
1215 +#endif
1216 +
1217 " strexd %1, %0, %H0, [%3]\n"
1218 " teq %1, #0\n"
1219 " bne 1b"
1220 +
1221 +#ifdef CONFIG_PAX_REFCOUNT
1222 +"\n4:\n"
1223 + _ASM_EXTABLE(2b, 4b)
1224 +#endif
1225 +
1226 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1227 : "r" (&v->counter), "r" (i)
1228 : "cc");
1229 @@ -374,6 +693,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1230 return oldval;
1231 }
1232
1233 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1234 +{
1235 + u64 oldval;
1236 + unsigned long res;
1237 +
1238 + smp_mb();
1239 +
1240 + do {
1241 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1242 + "ldrexd %1, %H1, [%3]\n"
1243 + "mov %0, #0\n"
1244 + "teq %1, %4\n"
1245 + "teqeq %H1, %H4\n"
1246 + "strexdeq %0, %5, %H5, [%3]"
1247 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1248 + : "r" (&ptr->counter), "r" (old), "r" (new)
1249 + : "cc");
1250 + } while (res);
1251 +
1252 + smp_mb();
1253 +
1254 + return oldval;
1255 +}
1256 +
1257 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1258 {
1259 u64 result;
1260 @@ -397,21 +740,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1261
1262 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1263 {
1264 - u64 result;
1265 - unsigned long tmp;
1266 + u64 result, tmp;
1267
1268 smp_mb();
1269
1270 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1271 -"1: ldrexd %0, %H0, [%3]\n"
1272 -" subs %0, %0, #1\n"
1273 -" sbc %H0, %H0, #0\n"
1274 +"1: ldrexd %1, %H1, [%3]\n"
1275 +" subs %0, %1, #1\n"
1276 +" sbc %H0, %H1, #0\n"
1277 +
1278 +#ifdef CONFIG_PAX_REFCOUNT
1279 +" bvc 3f\n"
1280 +" mov %0, %1\n"
1281 +" mov %H0, %H1\n"
1282 +"2: bkpt 0xf103\n"
1283 +"3:\n"
1284 +#endif
1285 +
1286 " teq %H0, #0\n"
1287 -" bmi 2f\n"
1288 +" bmi 4f\n"
1289 " strexd %1, %0, %H0, [%3]\n"
1290 " teq %1, #0\n"
1291 " bne 1b\n"
1292 -"2:"
1293 +"4:\n"
1294 +
1295 +#ifdef CONFIG_PAX_REFCOUNT
1296 + _ASM_EXTABLE(2b, 4b)
1297 +#endif
1298 +
1299 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1300 : "r" (&v->counter)
1301 : "cc");
1302 @@ -434,13 +790,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1303 " teq %0, %5\n"
1304 " teqeq %H0, %H5\n"
1305 " moveq %1, #0\n"
1306 -" beq 2f\n"
1307 +" beq 4f\n"
1308 " adds %0, %0, %6\n"
1309 " adc %H0, %H0, %H6\n"
1310 +
1311 +#ifdef CONFIG_PAX_REFCOUNT
1312 +" bvc 3f\n"
1313 +"2: bkpt 0xf103\n"
1314 +"3:\n"
1315 +#endif
1316 +
1317 " strexd %2, %0, %H0, [%4]\n"
1318 " teq %2, #0\n"
1319 " bne 1b\n"
1320 -"2:"
1321 +"4:\n"
1322 +
1323 +#ifdef CONFIG_PAX_REFCOUNT
1324 + _ASM_EXTABLE(2b, 4b)
1325 +#endif
1326 +
1327 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1328 : "r" (&v->counter), "r" (u), "r" (a)
1329 : "cc");
1330 @@ -453,10 +821,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1331
1332 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1333 #define atomic64_inc(v) atomic64_add(1LL, (v))
1334 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1335 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1336 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1337 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1338 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1339 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1340 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1341 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1342 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1343 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1344 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1345 index 75fe66b..2255c86 100644
1346 --- a/arch/arm/include/asm/cache.h
1347 +++ b/arch/arm/include/asm/cache.h
1348 @@ -4,8 +4,10 @@
1349 #ifndef __ASMARM_CACHE_H
1350 #define __ASMARM_CACHE_H
1351
1352 +#include <linux/const.h>
1353 +
1354 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1355 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1356 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1357
1358 /*
1359 * Memory returned by kmalloc() may be used for DMA, so we must make
1360 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1361 index 1252a26..9dc17b5 100644
1362 --- a/arch/arm/include/asm/cacheflush.h
1363 +++ b/arch/arm/include/asm/cacheflush.h
1364 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1365 void (*dma_unmap_area)(const void *, size_t, int);
1366
1367 void (*dma_flush_range)(const void *, const void *);
1368 -};
1369 +} __no_const;
1370
1371 /*
1372 * Select the calling method
1373 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1374 index d41d7cb..9bea5e0 100644
1375 --- a/arch/arm/include/asm/cmpxchg.h
1376 +++ b/arch/arm/include/asm/cmpxchg.h
1377 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1378
1379 #define xchg(ptr,x) \
1380 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1381 +#define xchg_unchecked(ptr,x) \
1382 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1383
1384 #include <asm-generic/cmpxchg-local.h>
1385
1386 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1387 index 38050b1..9d90e8b 100644
1388 --- a/arch/arm/include/asm/elf.h
1389 +++ b/arch/arm/include/asm/elf.h
1390 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1391 the loader. We need to make sure that it is out of the way of the program
1392 that it will "exec", and that there is sufficient room for the brk. */
1393
1394 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1395 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1396 +
1397 +#ifdef CONFIG_PAX_ASLR
1398 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1399 +
1400 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1401 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1402 +#endif
1403
1404 /* When the program starts, a1 contains a pointer to a function to be
1405 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1406 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1407 extern void elf_set_personality(const struct elf32_hdr *);
1408 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1409
1410 -struct mm_struct;
1411 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1412 -#define arch_randomize_brk arch_randomize_brk
1413 -
1414 #endif
1415 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1416 index e51b1e8..32a3113 100644
1417 --- a/arch/arm/include/asm/kmap_types.h
1418 +++ b/arch/arm/include/asm/kmap_types.h
1419 @@ -21,6 +21,7 @@ enum km_type {
1420 KM_L1_CACHE,
1421 KM_L2_CACHE,
1422 KM_KDB,
1423 + KM_CLEARPAGE,
1424 KM_TYPE_NR
1425 };
1426
1427 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1428 index 53426c6..c7baff3 100644
1429 --- a/arch/arm/include/asm/outercache.h
1430 +++ b/arch/arm/include/asm/outercache.h
1431 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1432 #endif
1433 void (*set_debug)(unsigned long);
1434 void (*resume)(void);
1435 -};
1436 +} __no_const;
1437
1438 #ifdef CONFIG_OUTER_CACHE
1439
1440 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1441 index 5838361..da6e813 100644
1442 --- a/arch/arm/include/asm/page.h
1443 +++ b/arch/arm/include/asm/page.h
1444 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1445 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1446 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1447 unsigned long vaddr, struct vm_area_struct *vma);
1448 -};
1449 +} __no_const;
1450
1451 #ifdef MULTI_USER
1452 extern struct cpu_user_fns cpu_user;
1453 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1454 index 943504f..bf8d667 100644
1455 --- a/arch/arm/include/asm/pgalloc.h
1456 +++ b/arch/arm/include/asm/pgalloc.h
1457 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1458 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1459 }
1460
1461 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1462 +{
1463 + pud_populate(mm, pud, pmd);
1464 +}
1465 +
1466 #else /* !CONFIG_ARM_LPAE */
1467
1468 /*
1469 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1470 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1471 #define pmd_free(mm, pmd) do { } while (0)
1472 #define pud_populate(mm,pmd,pte) BUG()
1473 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1474
1475 #endif /* CONFIG_ARM_LPAE */
1476
1477 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1478 index 0f04d84..2be5648 100644
1479 --- a/arch/arm/include/asm/thread_info.h
1480 +++ b/arch/arm/include/asm/thread_info.h
1481 @@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1482 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1483 #define TIF_SYSCALL_TRACE 8
1484 #define TIF_SYSCALL_AUDIT 9
1485 +
1486 +/* within 8 bits of TIF_SYSCALL_TRACE
1487 + to meet flexible second operand requirements
1488 +*/
1489 +#define TIF_GRSEC_SETXID 10
1490 +
1491 #define TIF_POLLING_NRFLAG 16
1492 #define TIF_USING_IWMMXT 17
1493 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1494 @@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1495 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1496 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1497 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1498 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1499
1500 /* Checks for any syscall work in entry-common.S */
1501 -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1502 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1503 + _TIF_GRSEC_SETXID)
1504
1505 /*
1506 * Change these and you break ASM code in entry-common.S
1507 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1508 index 71f6536..602f279 100644
1509 --- a/arch/arm/include/asm/uaccess.h
1510 +++ b/arch/arm/include/asm/uaccess.h
1511 @@ -22,6 +22,8 @@
1512 #define VERIFY_READ 0
1513 #define VERIFY_WRITE 1
1514
1515 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1516 +
1517 /*
1518 * The exception table consists of pairs of addresses: the first is the
1519 * address of an instruction that is allowed to fault, and the second is
1520 @@ -387,8 +389,23 @@ do { \
1521
1522
1523 #ifdef CONFIG_MMU
1524 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1525 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1526 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1527 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1528 +
1529 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1530 +{
1531 + if (!__builtin_constant_p(n))
1532 + check_object_size(to, n, false);
1533 + return ___copy_from_user(to, from, n);
1534 +}
1535 +
1536 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1537 +{
1538 + if (!__builtin_constant_p(n))
1539 + check_object_size(from, n, true);
1540 + return ___copy_to_user(to, from, n);
1541 +}
1542 +
1543 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1544 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1545 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1546 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1547
1548 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1549 {
1550 + if ((long)n < 0)
1551 + return n;
1552 +
1553 if (access_ok(VERIFY_READ, from, n))
1554 n = __copy_from_user(to, from, n);
1555 else /* security hole - plug it */
1556 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1557
1558 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1559 {
1560 + if ((long)n < 0)
1561 + return n;
1562 +
1563 if (access_ok(VERIFY_WRITE, to, n))
1564 n = __copy_to_user(to, from, n);
1565 return n;
1566 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1567 index b57c75e..ed2d6b2 100644
1568 --- a/arch/arm/kernel/armksyms.c
1569 +++ b/arch/arm/kernel/armksyms.c
1570 @@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1571 #ifdef CONFIG_MMU
1572 EXPORT_SYMBOL(copy_page);
1573
1574 -EXPORT_SYMBOL(__copy_from_user);
1575 -EXPORT_SYMBOL(__copy_to_user);
1576 +EXPORT_SYMBOL(___copy_from_user);
1577 +EXPORT_SYMBOL(___copy_to_user);
1578 EXPORT_SYMBOL(__clear_user);
1579
1580 EXPORT_SYMBOL(__get_user_1);
1581 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1582 index 2b7b017..c380fa2 100644
1583 --- a/arch/arm/kernel/process.c
1584 +++ b/arch/arm/kernel/process.c
1585 @@ -28,7 +28,6 @@
1586 #include <linux/tick.h>
1587 #include <linux/utsname.h>
1588 #include <linux/uaccess.h>
1589 -#include <linux/random.h>
1590 #include <linux/hw_breakpoint.h>
1591 #include <linux/cpuidle.h>
1592
1593 @@ -275,9 +274,10 @@ void machine_power_off(void)
1594 machine_shutdown();
1595 if (pm_power_off)
1596 pm_power_off();
1597 + BUG();
1598 }
1599
1600 -void machine_restart(char *cmd)
1601 +__noreturn void machine_restart(char *cmd)
1602 {
1603 machine_shutdown();
1604
1605 @@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_struct *p)
1606 return 0;
1607 }
1608
1609 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1610 -{
1611 - unsigned long range_end = mm->brk + 0x02000000;
1612 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1613 -}
1614 -
1615 #ifdef CONFIG_MMU
1616 /*
1617 * The vectors page is always readable from user space for the
1618 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1619 index 9650c14..ae30cdd 100644
1620 --- a/arch/arm/kernel/ptrace.c
1621 +++ b/arch/arm/kernel/ptrace.c
1622 @@ -906,10 +906,19 @@ long arch_ptrace(struct task_struct *child, long request,
1623 return ret;
1624 }
1625
1626 +#ifdef CONFIG_GRKERNSEC_SETXID
1627 +extern void gr_delayed_cred_worker(void);
1628 +#endif
1629 +
1630 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1631 {
1632 unsigned long ip;
1633
1634 +#ifdef CONFIG_GRKERNSEC_SETXID
1635 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1636 + gr_delayed_cred_worker();
1637 +#endif
1638 +
1639 if (why)
1640 audit_syscall_exit(regs);
1641 else
1642 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1643 index ebfac78..cbea9c0 100644
1644 --- a/arch/arm/kernel/setup.c
1645 +++ b/arch/arm/kernel/setup.c
1646 @@ -111,13 +111,13 @@ struct processor processor __read_mostly;
1647 struct cpu_tlb_fns cpu_tlb __read_mostly;
1648 #endif
1649 #ifdef MULTI_USER
1650 -struct cpu_user_fns cpu_user __read_mostly;
1651 +struct cpu_user_fns cpu_user __read_only;
1652 #endif
1653 #ifdef MULTI_CACHE
1654 -struct cpu_cache_fns cpu_cache __read_mostly;
1655 +struct cpu_cache_fns cpu_cache __read_only;
1656 #endif
1657 #ifdef CONFIG_OUTER_CACHE
1658 -struct outer_cache_fns outer_cache __read_mostly;
1659 +struct outer_cache_fns outer_cache __read_only;
1660 EXPORT_SYMBOL(outer_cache);
1661 #endif
1662
1663 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1664 index 63d402f..db1d714 100644
1665 --- a/arch/arm/kernel/traps.c
1666 +++ b/arch/arm/kernel/traps.c
1667 @@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1668
1669 static DEFINE_RAW_SPINLOCK(die_lock);
1670
1671 +extern void gr_handle_kernel_exploit(void);
1672 +
1673 /*
1674 * This function is protected against re-entrancy.
1675 */
1676 @@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1677 panic("Fatal exception in interrupt");
1678 if (panic_on_oops)
1679 panic("Fatal exception");
1680 +
1681 + gr_handle_kernel_exploit();
1682 +
1683 if (ret != NOTIFY_STOP)
1684 do_exit(SIGSEGV);
1685 }
1686 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1687 index 66a477a..bee61d3 100644
1688 --- a/arch/arm/lib/copy_from_user.S
1689 +++ b/arch/arm/lib/copy_from_user.S
1690 @@ -16,7 +16,7 @@
1691 /*
1692 * Prototype:
1693 *
1694 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1695 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1696 *
1697 * Purpose:
1698 *
1699 @@ -84,11 +84,11 @@
1700
1701 .text
1702
1703 -ENTRY(__copy_from_user)
1704 +ENTRY(___copy_from_user)
1705
1706 #include "copy_template.S"
1707
1708 -ENDPROC(__copy_from_user)
1709 +ENDPROC(___copy_from_user)
1710
1711 .pushsection .fixup,"ax"
1712 .align 0
1713 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1714 index 6ee2f67..d1cce76 100644
1715 --- a/arch/arm/lib/copy_page.S
1716 +++ b/arch/arm/lib/copy_page.S
1717 @@ -10,6 +10,7 @@
1718 * ASM optimised string functions
1719 */
1720 #include <linux/linkage.h>
1721 +#include <linux/const.h>
1722 #include <asm/assembler.h>
1723 #include <asm/asm-offsets.h>
1724 #include <asm/cache.h>
1725 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1726 index d066df6..df28194 100644
1727 --- a/arch/arm/lib/copy_to_user.S
1728 +++ b/arch/arm/lib/copy_to_user.S
1729 @@ -16,7 +16,7 @@
1730 /*
1731 * Prototype:
1732 *
1733 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1734 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1735 *
1736 * Purpose:
1737 *
1738 @@ -88,11 +88,11 @@
1739 .text
1740
1741 ENTRY(__copy_to_user_std)
1742 -WEAK(__copy_to_user)
1743 +WEAK(___copy_to_user)
1744
1745 #include "copy_template.S"
1746
1747 -ENDPROC(__copy_to_user)
1748 +ENDPROC(___copy_to_user)
1749 ENDPROC(__copy_to_user_std)
1750
1751 .pushsection .fixup,"ax"
1752 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1753 index 5c908b1..e712687 100644
1754 --- a/arch/arm/lib/uaccess.S
1755 +++ b/arch/arm/lib/uaccess.S
1756 @@ -20,7 +20,7 @@
1757
1758 #define PAGE_SHIFT 12
1759
1760 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1761 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1762 * Purpose : copy a block to user memory from kernel memory
1763 * Params : to - user memory
1764 * : from - kernel memory
1765 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1766 sub r2, r2, ip
1767 b .Lc2u_dest_aligned
1768
1769 -ENTRY(__copy_to_user)
1770 +ENTRY(___copy_to_user)
1771 stmfd sp!, {r2, r4 - r7, lr}
1772 cmp r2, #4
1773 blt .Lc2u_not_enough
1774 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1775 ldrgtb r3, [r1], #0
1776 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1777 b .Lc2u_finished
1778 -ENDPROC(__copy_to_user)
1779 +ENDPROC(___copy_to_user)
1780
1781 .pushsection .fixup,"ax"
1782 .align 0
1783 9001: ldmfd sp!, {r0, r4 - r7, pc}
1784 .popsection
1785
1786 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1787 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1788 * Purpose : copy a block from user memory to kernel memory
1789 * Params : to - kernel memory
1790 * : from - user memory
1791 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1792 sub r2, r2, ip
1793 b .Lcfu_dest_aligned
1794
1795 -ENTRY(__copy_from_user)
1796 +ENTRY(___copy_from_user)
1797 stmfd sp!, {r0, r2, r4 - r7, lr}
1798 cmp r2, #4
1799 blt .Lcfu_not_enough
1800 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1801 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1802 strgtb r3, [r0], #1
1803 b .Lcfu_finished
1804 -ENDPROC(__copy_from_user)
1805 +ENDPROC(___copy_from_user)
1806
1807 .pushsection .fixup,"ax"
1808 .align 0
1809 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1810 index 025f742..8432b08 100644
1811 --- a/arch/arm/lib/uaccess_with_memcpy.c
1812 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1813 @@ -104,7 +104,7 @@ out:
1814 }
1815
1816 unsigned long
1817 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1818 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1819 {
1820 /*
1821 * This test is stubbed out of the main function above to keep
1822 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1823 index 518091c..eae9a76 100644
1824 --- a/arch/arm/mach-omap2/board-n8x0.c
1825 +++ b/arch/arm/mach-omap2/board-n8x0.c
1826 @@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1827 }
1828 #endif
1829
1830 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1831 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1832 .late_init = n8x0_menelaus_late_init,
1833 };
1834
1835 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1836 index 5bb4835..4760f68 100644
1837 --- a/arch/arm/mm/fault.c
1838 +++ b/arch/arm/mm/fault.c
1839 @@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1840 }
1841 #endif
1842
1843 +#ifdef CONFIG_PAX_PAGEEXEC
1844 + if (fsr & FSR_LNX_PF) {
1845 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1846 + do_group_exit(SIGKILL);
1847 + }
1848 +#endif
1849 +
1850 tsk->thread.address = addr;
1851 tsk->thread.error_code = fsr;
1852 tsk->thread.trap_no = 14;
1853 @@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1854 }
1855 #endif /* CONFIG_MMU */
1856
1857 +#ifdef CONFIG_PAX_PAGEEXEC
1858 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1859 +{
1860 + long i;
1861 +
1862 + printk(KERN_ERR "PAX: bytes at PC: ");
1863 + for (i = 0; i < 20; i++) {
1864 + unsigned char c;
1865 + if (get_user(c, (__force unsigned char __user *)pc+i))
1866 + printk(KERN_CONT "?? ");
1867 + else
1868 + printk(KERN_CONT "%02x ", c);
1869 + }
1870 + printk("\n");
1871 +
1872 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1873 + for (i = -1; i < 20; i++) {
1874 + unsigned long c;
1875 + if (get_user(c, (__force unsigned long __user *)sp+i))
1876 + printk(KERN_CONT "???????? ");
1877 + else
1878 + printk(KERN_CONT "%08lx ", c);
1879 + }
1880 + printk("\n");
1881 +}
1882 +#endif
1883 +
1884 /*
1885 * First Level Translation Fault Handler
1886 *
1887 @@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1888 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1889 struct siginfo info;
1890
1891 +#ifdef CONFIG_PAX_REFCOUNT
1892 + if (fsr_fs(ifsr) == 2) {
1893 + unsigned int bkpt;
1894 +
1895 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1896 + current->thread.error_code = ifsr;
1897 + current->thread.trap_no = 0;
1898 + pax_report_refcount_overflow(regs);
1899 + fixup_exception(regs);
1900 + return;
1901 + }
1902 + }
1903 +#endif
1904 +
1905 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1906 return;
1907
1908 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1909 index ce8cb19..3ec539d 100644
1910 --- a/arch/arm/mm/mmap.c
1911 +++ b/arch/arm/mm/mmap.c
1912 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1913 if (len > TASK_SIZE)
1914 return -ENOMEM;
1915
1916 +#ifdef CONFIG_PAX_RANDMMAP
1917 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1918 +#endif
1919 +
1920 if (addr) {
1921 if (do_align)
1922 addr = COLOUR_ALIGN(addr, pgoff);
1923 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1924 addr = PAGE_ALIGN(addr);
1925
1926 vma = find_vma(mm, addr);
1927 - if (TASK_SIZE - len >= addr &&
1928 - (!vma || addr + len <= vma->vm_start))
1929 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1930 return addr;
1931 }
1932 if (len > mm->cached_hole_size) {
1933 - start_addr = addr = mm->free_area_cache;
1934 + start_addr = addr = mm->free_area_cache;
1935 } else {
1936 - start_addr = addr = mm->mmap_base;
1937 - mm->cached_hole_size = 0;
1938 + start_addr = addr = mm->mmap_base;
1939 + mm->cached_hole_size = 0;
1940 }
1941
1942 full_search:
1943 @@ -124,14 +127,14 @@ full_search:
1944 * Start a new search - just in case we missed
1945 * some holes.
1946 */
1947 - if (start_addr != TASK_UNMAPPED_BASE) {
1948 - start_addr = addr = TASK_UNMAPPED_BASE;
1949 + if (start_addr != mm->mmap_base) {
1950 + start_addr = addr = mm->mmap_base;
1951 mm->cached_hole_size = 0;
1952 goto full_search;
1953 }
1954 return -ENOMEM;
1955 }
1956 - if (!vma || addr + len <= vma->vm_start) {
1957 + if (check_heap_stack_gap(vma, addr, len)) {
1958 /*
1959 * Remember the place where we stopped the search:
1960 */
1961 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1962
1963 if (mmap_is_legacy()) {
1964 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1965 +
1966 +#ifdef CONFIG_PAX_RANDMMAP
1967 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1968 + mm->mmap_base += mm->delta_mmap;
1969 +#endif
1970 +
1971 mm->get_unmapped_area = arch_get_unmapped_area;
1972 mm->unmap_area = arch_unmap_area;
1973 } else {
1974 mm->mmap_base = mmap_base(random_factor);
1975 +
1976 +#ifdef CONFIG_PAX_RANDMMAP
1977 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1978 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1979 +#endif
1980 +
1981 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1982 mm->unmap_area = arch_unmap_area_topdown;
1983 }
1984 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1985 index 71a6827..e7fbc23 100644
1986 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1987 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1988 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
1989 int (*started)(unsigned ch);
1990 int (*flush)(unsigned ch);
1991 int (*stop)(unsigned ch);
1992 -};
1993 +} __no_const;
1994
1995 extern void *samsung_dmadev_get_ops(void);
1996 extern void *s3c_dma_get_ops(void);
1997 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1998 index 5f28cae..3d23723 100644
1999 --- a/arch/arm/plat-samsung/include/plat/ehci.h
2000 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
2001 @@ -14,7 +14,7 @@
2002 struct s5p_ehci_platdata {
2003 int (*phy_init)(struct platform_device *pdev, int type);
2004 int (*phy_exit)(struct platform_device *pdev, int type);
2005 -};
2006 +} __no_const;
2007
2008 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2009
2010 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2011 index c3a58a1..78fbf54 100644
2012 --- a/arch/avr32/include/asm/cache.h
2013 +++ b/arch/avr32/include/asm/cache.h
2014 @@ -1,8 +1,10 @@
2015 #ifndef __ASM_AVR32_CACHE_H
2016 #define __ASM_AVR32_CACHE_H
2017
2018 +#include <linux/const.h>
2019 +
2020 #define L1_CACHE_SHIFT 5
2021 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2022 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2023
2024 /*
2025 * Memory returned by kmalloc() may be used for DMA, so we must make
2026 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2027 index 3b3159b..425ea94 100644
2028 --- a/arch/avr32/include/asm/elf.h
2029 +++ b/arch/avr32/include/asm/elf.h
2030 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2031 the loader. We need to make sure that it is out of the way of the program
2032 that it will "exec", and that there is sufficient room for the brk. */
2033
2034 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2035 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2036
2037 +#ifdef CONFIG_PAX_ASLR
2038 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2039 +
2040 +#define PAX_DELTA_MMAP_LEN 15
2041 +#define PAX_DELTA_STACK_LEN 15
2042 +#endif
2043
2044 /* This yields a mask that user programs can use to figure out what
2045 instruction set this CPU supports. This could be done in user space,
2046 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2047 index b7f5c68..556135c 100644
2048 --- a/arch/avr32/include/asm/kmap_types.h
2049 +++ b/arch/avr32/include/asm/kmap_types.h
2050 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2051 D(11) KM_IRQ1,
2052 D(12) KM_SOFTIRQ0,
2053 D(13) KM_SOFTIRQ1,
2054 -D(14) KM_TYPE_NR
2055 +D(14) KM_CLEARPAGE,
2056 +D(15) KM_TYPE_NR
2057 };
2058
2059 #undef D
2060 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2061 index f7040a1..db9f300 100644
2062 --- a/arch/avr32/mm/fault.c
2063 +++ b/arch/avr32/mm/fault.c
2064 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2065
2066 int exception_trace = 1;
2067
2068 +#ifdef CONFIG_PAX_PAGEEXEC
2069 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2070 +{
2071 + unsigned long i;
2072 +
2073 + printk(KERN_ERR "PAX: bytes at PC: ");
2074 + for (i = 0; i < 20; i++) {
2075 + unsigned char c;
2076 + if (get_user(c, (unsigned char *)pc+i))
2077 + printk(KERN_CONT "???????? ");
2078 + else
2079 + printk(KERN_CONT "%02x ", c);
2080 + }
2081 + printk("\n");
2082 +}
2083 +#endif
2084 +
2085 /*
2086 * This routine handles page faults. It determines the address and the
2087 * problem, and then passes it off to one of the appropriate routines.
2088 @@ -156,6 +173,16 @@ bad_area:
2089 up_read(&mm->mmap_sem);
2090
2091 if (user_mode(regs)) {
2092 +
2093 +#ifdef CONFIG_PAX_PAGEEXEC
2094 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2095 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2096 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2097 + do_group_exit(SIGKILL);
2098 + }
2099 + }
2100 +#endif
2101 +
2102 if (exception_trace && printk_ratelimit())
2103 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2104 "sp %08lx ecr %lu\n",
2105 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2106 index 568885a..f8008df 100644
2107 --- a/arch/blackfin/include/asm/cache.h
2108 +++ b/arch/blackfin/include/asm/cache.h
2109 @@ -7,6 +7,7 @@
2110 #ifndef __ARCH_BLACKFIN_CACHE_H
2111 #define __ARCH_BLACKFIN_CACHE_H
2112
2113 +#include <linux/const.h>
2114 #include <linux/linkage.h> /* for asmlinkage */
2115
2116 /*
2117 @@ -14,7 +15,7 @@
2118 * Blackfin loads 32 bytes for cache
2119 */
2120 #define L1_CACHE_SHIFT 5
2121 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2122 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2123 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2124
2125 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2126 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2127 index aea2718..3639a60 100644
2128 --- a/arch/cris/include/arch-v10/arch/cache.h
2129 +++ b/arch/cris/include/arch-v10/arch/cache.h
2130 @@ -1,8 +1,9 @@
2131 #ifndef _ASM_ARCH_CACHE_H
2132 #define _ASM_ARCH_CACHE_H
2133
2134 +#include <linux/const.h>
2135 /* Etrax 100LX have 32-byte cache-lines. */
2136 -#define L1_CACHE_BYTES 32
2137 #define L1_CACHE_SHIFT 5
2138 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2139
2140 #endif /* _ASM_ARCH_CACHE_H */
2141 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2142 index 1de779f..336fad3 100644
2143 --- a/arch/cris/include/arch-v32/arch/cache.h
2144 +++ b/arch/cris/include/arch-v32/arch/cache.h
2145 @@ -1,11 +1,12 @@
2146 #ifndef _ASM_CRIS_ARCH_CACHE_H
2147 #define _ASM_CRIS_ARCH_CACHE_H
2148
2149 +#include <linux/const.h>
2150 #include <arch/hwregs/dma.h>
2151
2152 /* A cache-line is 32 bytes. */
2153 -#define L1_CACHE_BYTES 32
2154 #define L1_CACHE_SHIFT 5
2155 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2156
2157 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2158
2159 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2160 index b86329d..6709906 100644
2161 --- a/arch/frv/include/asm/atomic.h
2162 +++ b/arch/frv/include/asm/atomic.h
2163 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2164 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2165 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2166
2167 +#define atomic64_read_unchecked(v) atomic64_read(v)
2168 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2169 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2170 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2171 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2172 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2173 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2174 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2175 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2176 +
2177 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2178 {
2179 int c, old;
2180 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2181 index 2797163..c2a401d 100644
2182 --- a/arch/frv/include/asm/cache.h
2183 +++ b/arch/frv/include/asm/cache.h
2184 @@ -12,10 +12,11 @@
2185 #ifndef __ASM_CACHE_H
2186 #define __ASM_CACHE_H
2187
2188 +#include <linux/const.h>
2189
2190 /* bytes per L1 cache line */
2191 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2192 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2193 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2194
2195 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2196 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2197 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2198 index f8e16b2..c73ff79 100644
2199 --- a/arch/frv/include/asm/kmap_types.h
2200 +++ b/arch/frv/include/asm/kmap_types.h
2201 @@ -23,6 +23,7 @@ enum km_type {
2202 KM_IRQ1,
2203 KM_SOFTIRQ0,
2204 KM_SOFTIRQ1,
2205 + KM_CLEARPAGE,
2206 KM_TYPE_NR
2207 };
2208
2209 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2210 index 385fd30..6c3d97e 100644
2211 --- a/arch/frv/mm/elf-fdpic.c
2212 +++ b/arch/frv/mm/elf-fdpic.c
2213 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2214 if (addr) {
2215 addr = PAGE_ALIGN(addr);
2216 vma = find_vma(current->mm, addr);
2217 - if (TASK_SIZE - len >= addr &&
2218 - (!vma || addr + len <= vma->vm_start))
2219 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2220 goto success;
2221 }
2222
2223 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2224 for (; vma; vma = vma->vm_next) {
2225 if (addr > limit)
2226 break;
2227 - if (addr + len <= vma->vm_start)
2228 + if (check_heap_stack_gap(vma, addr, len))
2229 goto success;
2230 addr = vma->vm_end;
2231 }
2232 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2233 for (; vma; vma = vma->vm_next) {
2234 if (addr > limit)
2235 break;
2236 - if (addr + len <= vma->vm_start)
2237 + if (check_heap_stack_gap(vma, addr, len))
2238 goto success;
2239 addr = vma->vm_end;
2240 }
2241 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2242 index c635028..6d9445a 100644
2243 --- a/arch/h8300/include/asm/cache.h
2244 +++ b/arch/h8300/include/asm/cache.h
2245 @@ -1,8 +1,10 @@
2246 #ifndef __ARCH_H8300_CACHE_H
2247 #define __ARCH_H8300_CACHE_H
2248
2249 +#include <linux/const.h>
2250 +
2251 /* bytes per L1 cache line */
2252 -#define L1_CACHE_BYTES 4
2253 +#define L1_CACHE_BYTES _AC(4,UL)
2254
2255 /* m68k-elf-gcc 2.95.2 doesn't like these */
2256
2257 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2258 index 0f01de2..d37d309 100644
2259 --- a/arch/hexagon/include/asm/cache.h
2260 +++ b/arch/hexagon/include/asm/cache.h
2261 @@ -21,9 +21,11 @@
2262 #ifndef __ASM_CACHE_H
2263 #define __ASM_CACHE_H
2264
2265 +#include <linux/const.h>
2266 +
2267 /* Bytes per L1 cache line */
2268 -#define L1_CACHE_SHIFT (5)
2269 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2270 +#define L1_CACHE_SHIFT 5
2271 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2272
2273 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2274 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2275 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2276 index 7d91166..88ab87e 100644
2277 --- a/arch/ia64/include/asm/atomic.h
2278 +++ b/arch/ia64/include/asm/atomic.h
2279 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2280 #define atomic64_inc(v) atomic64_add(1, (v))
2281 #define atomic64_dec(v) atomic64_sub(1, (v))
2282
2283 +#define atomic64_read_unchecked(v) atomic64_read(v)
2284 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2285 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2286 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2287 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2288 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2289 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2290 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2291 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2292 +
2293 /* Atomic operations are already serializing */
2294 #define smp_mb__before_atomic_dec() barrier()
2295 #define smp_mb__after_atomic_dec() barrier()
2296 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2297 index 988254a..e1ee885 100644
2298 --- a/arch/ia64/include/asm/cache.h
2299 +++ b/arch/ia64/include/asm/cache.h
2300 @@ -1,6 +1,7 @@
2301 #ifndef _ASM_IA64_CACHE_H
2302 #define _ASM_IA64_CACHE_H
2303
2304 +#include <linux/const.h>
2305
2306 /*
2307 * Copyright (C) 1998-2000 Hewlett-Packard Co
2308 @@ -9,7 +10,7 @@
2309
2310 /* Bytes per L1 (data) cache line. */
2311 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2312 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2313 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2314
2315 #ifdef CONFIG_SMP
2316 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2317 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2318 index b5298eb..67c6e62 100644
2319 --- a/arch/ia64/include/asm/elf.h
2320 +++ b/arch/ia64/include/asm/elf.h
2321 @@ -42,6 +42,13 @@
2322 */
2323 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2324
2325 +#ifdef CONFIG_PAX_ASLR
2326 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2327 +
2328 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2329 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2330 +#endif
2331 +
2332 #define PT_IA_64_UNWIND 0x70000001
2333
2334 /* IA-64 relocations: */
2335 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2336 index 96a8d92..617a1cf 100644
2337 --- a/arch/ia64/include/asm/pgalloc.h
2338 +++ b/arch/ia64/include/asm/pgalloc.h
2339 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2340 pgd_val(*pgd_entry) = __pa(pud);
2341 }
2342
2343 +static inline void
2344 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2345 +{
2346 + pgd_populate(mm, pgd_entry, pud);
2347 +}
2348 +
2349 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2350 {
2351 return quicklist_alloc(0, GFP_KERNEL, NULL);
2352 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2353 pud_val(*pud_entry) = __pa(pmd);
2354 }
2355
2356 +static inline void
2357 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2358 +{
2359 + pud_populate(mm, pud_entry, pmd);
2360 +}
2361 +
2362 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2363 {
2364 return quicklist_alloc(0, GFP_KERNEL, NULL);
2365 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2366 index 815810c..d60bd4c 100644
2367 --- a/arch/ia64/include/asm/pgtable.h
2368 +++ b/arch/ia64/include/asm/pgtable.h
2369 @@ -12,7 +12,7 @@
2370 * David Mosberger-Tang <davidm@hpl.hp.com>
2371 */
2372
2373 -
2374 +#include <linux/const.h>
2375 #include <asm/mman.h>
2376 #include <asm/page.h>
2377 #include <asm/processor.h>
2378 @@ -142,6 +142,17 @@
2379 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2380 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2381 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2382 +
2383 +#ifdef CONFIG_PAX_PAGEEXEC
2384 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2385 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2386 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2387 +#else
2388 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2389 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2390 +# define PAGE_COPY_NOEXEC PAGE_COPY
2391 +#endif
2392 +
2393 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2394 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2395 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2396 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2397 index 54ff557..70c88b7 100644
2398 --- a/arch/ia64/include/asm/spinlock.h
2399 +++ b/arch/ia64/include/asm/spinlock.h
2400 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2401 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2402
2403 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2404 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2405 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2406 }
2407
2408 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2409 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2410 index 449c8c0..432a3d2 100644
2411 --- a/arch/ia64/include/asm/uaccess.h
2412 +++ b/arch/ia64/include/asm/uaccess.h
2413 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2414 const void *__cu_from = (from); \
2415 long __cu_len = (n); \
2416 \
2417 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2418 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2419 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2420 __cu_len; \
2421 })
2422 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2423 long __cu_len = (n); \
2424 \
2425 __chk_user_ptr(__cu_from); \
2426 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2427 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2428 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2429 __cu_len; \
2430 })
2431 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2432 index 24603be..948052d 100644
2433 --- a/arch/ia64/kernel/module.c
2434 +++ b/arch/ia64/kernel/module.c
2435 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2436 void
2437 module_free (struct module *mod, void *module_region)
2438 {
2439 - if (mod && mod->arch.init_unw_table &&
2440 - module_region == mod->module_init) {
2441 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2442 unw_remove_unwind_table(mod->arch.init_unw_table);
2443 mod->arch.init_unw_table = NULL;
2444 }
2445 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2446 }
2447
2448 static inline int
2449 +in_init_rx (const struct module *mod, uint64_t addr)
2450 +{
2451 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2452 +}
2453 +
2454 +static inline int
2455 +in_init_rw (const struct module *mod, uint64_t addr)
2456 +{
2457 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2458 +}
2459 +
2460 +static inline int
2461 in_init (const struct module *mod, uint64_t addr)
2462 {
2463 - return addr - (uint64_t) mod->module_init < mod->init_size;
2464 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2465 +}
2466 +
2467 +static inline int
2468 +in_core_rx (const struct module *mod, uint64_t addr)
2469 +{
2470 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2471 +}
2472 +
2473 +static inline int
2474 +in_core_rw (const struct module *mod, uint64_t addr)
2475 +{
2476 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2477 }
2478
2479 static inline int
2480 in_core (const struct module *mod, uint64_t addr)
2481 {
2482 - return addr - (uint64_t) mod->module_core < mod->core_size;
2483 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2484 }
2485
2486 static inline int
2487 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2488 break;
2489
2490 case RV_BDREL:
2491 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2492 + if (in_init_rx(mod, val))
2493 + val -= (uint64_t) mod->module_init_rx;
2494 + else if (in_init_rw(mod, val))
2495 + val -= (uint64_t) mod->module_init_rw;
2496 + else if (in_core_rx(mod, val))
2497 + val -= (uint64_t) mod->module_core_rx;
2498 + else if (in_core_rw(mod, val))
2499 + val -= (uint64_t) mod->module_core_rw;
2500 break;
2501
2502 case RV_LTV:
2503 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2504 * addresses have been selected...
2505 */
2506 uint64_t gp;
2507 - if (mod->core_size > MAX_LTOFF)
2508 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2509 /*
2510 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2511 * at the end of the module.
2512 */
2513 - gp = mod->core_size - MAX_LTOFF / 2;
2514 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2515 else
2516 - gp = mod->core_size / 2;
2517 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2518 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2519 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2520 mod->arch.gp = gp;
2521 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2522 }
2523 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2524 index 609d500..7dde2a8 100644
2525 --- a/arch/ia64/kernel/sys_ia64.c
2526 +++ b/arch/ia64/kernel/sys_ia64.c
2527 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2528 if (REGION_NUMBER(addr) == RGN_HPAGE)
2529 addr = 0;
2530 #endif
2531 +
2532 +#ifdef CONFIG_PAX_RANDMMAP
2533 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2534 + addr = mm->free_area_cache;
2535 + else
2536 +#endif
2537 +
2538 if (!addr)
2539 addr = mm->free_area_cache;
2540
2541 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2542 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2543 /* At this point: (!vma || addr < vma->vm_end). */
2544 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2545 - if (start_addr != TASK_UNMAPPED_BASE) {
2546 + if (start_addr != mm->mmap_base) {
2547 /* Start a new search --- just in case we missed some holes. */
2548 - addr = TASK_UNMAPPED_BASE;
2549 + addr = mm->mmap_base;
2550 goto full_search;
2551 }
2552 return -ENOMEM;
2553 }
2554 - if (!vma || addr + len <= vma->vm_start) {
2555 + if (check_heap_stack_gap(vma, addr, len)) {
2556 /* Remember the address where we stopped this search: */
2557 mm->free_area_cache = addr + len;
2558 return addr;
2559 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2560 index 0ccb28f..8992469 100644
2561 --- a/arch/ia64/kernel/vmlinux.lds.S
2562 +++ b/arch/ia64/kernel/vmlinux.lds.S
2563 @@ -198,7 +198,7 @@ SECTIONS {
2564 /* Per-cpu data: */
2565 . = ALIGN(PERCPU_PAGE_SIZE);
2566 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2567 - __phys_per_cpu_start = __per_cpu_load;
2568 + __phys_per_cpu_start = per_cpu_load;
2569 /*
2570 * ensure percpu data fits
2571 * into percpu page size
2572 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2573 index 02d29c2..ea893df 100644
2574 --- a/arch/ia64/mm/fault.c
2575 +++ b/arch/ia64/mm/fault.c
2576 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2577 return pte_present(pte);
2578 }
2579
2580 +#ifdef CONFIG_PAX_PAGEEXEC
2581 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2582 +{
2583 + unsigned long i;
2584 +
2585 + printk(KERN_ERR "PAX: bytes at PC: ");
2586 + for (i = 0; i < 8; i++) {
2587 + unsigned int c;
2588 + if (get_user(c, (unsigned int *)pc+i))
2589 + printk(KERN_CONT "???????? ");
2590 + else
2591 + printk(KERN_CONT "%08x ", c);
2592 + }
2593 + printk("\n");
2594 +}
2595 +#endif
2596 +
2597 void __kprobes
2598 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2599 {
2600 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2601 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2602 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2603
2604 - if ((vma->vm_flags & mask) != mask)
2605 + if ((vma->vm_flags & mask) != mask) {
2606 +
2607 +#ifdef CONFIG_PAX_PAGEEXEC
2608 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2609 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2610 + goto bad_area;
2611 +
2612 + up_read(&mm->mmap_sem);
2613 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2614 + do_group_exit(SIGKILL);
2615 + }
2616 +#endif
2617 +
2618 goto bad_area;
2619
2620 + }
2621 +
2622 /*
2623 * If for any reason at all we couldn't handle the fault, make
2624 * sure we exit gracefully rather than endlessly redo the
2625 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2626 index 5ca674b..e0e1b70 100644
2627 --- a/arch/ia64/mm/hugetlbpage.c
2628 +++ b/arch/ia64/mm/hugetlbpage.c
2629 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2630 /* At this point: (!vmm || addr < vmm->vm_end). */
2631 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2632 return -ENOMEM;
2633 - if (!vmm || (addr + len) <= vmm->vm_start)
2634 + if (check_heap_stack_gap(vmm, addr, len))
2635 return addr;
2636 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2637 }
2638 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2639 index 0eab454..bd794f2 100644
2640 --- a/arch/ia64/mm/init.c
2641 +++ b/arch/ia64/mm/init.c
2642 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2643 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2644 vma->vm_end = vma->vm_start + PAGE_SIZE;
2645 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2646 +
2647 +#ifdef CONFIG_PAX_PAGEEXEC
2648 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2649 + vma->vm_flags &= ~VM_EXEC;
2650 +
2651 +#ifdef CONFIG_PAX_MPROTECT
2652 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2653 + vma->vm_flags &= ~VM_MAYEXEC;
2654 +#endif
2655 +
2656 + }
2657 +#endif
2658 +
2659 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2660 down_write(&current->mm->mmap_sem);
2661 if (insert_vm_struct(current->mm, vma)) {
2662 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2663 index 40b3ee9..8c2c112 100644
2664 --- a/arch/m32r/include/asm/cache.h
2665 +++ b/arch/m32r/include/asm/cache.h
2666 @@ -1,8 +1,10 @@
2667 #ifndef _ASM_M32R_CACHE_H
2668 #define _ASM_M32R_CACHE_H
2669
2670 +#include <linux/const.h>
2671 +
2672 /* L1 cache line size */
2673 #define L1_CACHE_SHIFT 4
2674 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2675 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2676
2677 #endif /* _ASM_M32R_CACHE_H */
2678 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2679 index 82abd15..d95ae5d 100644
2680 --- a/arch/m32r/lib/usercopy.c
2681 +++ b/arch/m32r/lib/usercopy.c
2682 @@ -14,6 +14,9 @@
2683 unsigned long
2684 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2685 {
2686 + if ((long)n < 0)
2687 + return n;
2688 +
2689 prefetch(from);
2690 if (access_ok(VERIFY_WRITE, to, n))
2691 __copy_user(to,from,n);
2692 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2693 unsigned long
2694 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2695 {
2696 + if ((long)n < 0)
2697 + return n;
2698 +
2699 prefetchw(to);
2700 if (access_ok(VERIFY_READ, from, n))
2701 __copy_user_zeroing(to,from,n);
2702 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2703 index 0395c51..5f26031 100644
2704 --- a/arch/m68k/include/asm/cache.h
2705 +++ b/arch/m68k/include/asm/cache.h
2706 @@ -4,9 +4,11 @@
2707 #ifndef __ARCH_M68K_CACHE_H
2708 #define __ARCH_M68K_CACHE_H
2709
2710 +#include <linux/const.h>
2711 +
2712 /* bytes per L1 cache line */
2713 #define L1_CACHE_SHIFT 4
2714 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2715 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2716
2717 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2718
2719 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2720 index 4efe96a..60e8699 100644
2721 --- a/arch/microblaze/include/asm/cache.h
2722 +++ b/arch/microblaze/include/asm/cache.h
2723 @@ -13,11 +13,12 @@
2724 #ifndef _ASM_MICROBLAZE_CACHE_H
2725 #define _ASM_MICROBLAZE_CACHE_H
2726
2727 +#include <linux/const.h>
2728 #include <asm/registers.h>
2729
2730 #define L1_CACHE_SHIFT 5
2731 /* word-granular cache in microblaze */
2732 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2733 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2734
2735 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2736
2737 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2738 index 3f4c5cb..3439c6e 100644
2739 --- a/arch/mips/include/asm/atomic.h
2740 +++ b/arch/mips/include/asm/atomic.h
2741 @@ -21,6 +21,10 @@
2742 #include <asm/cmpxchg.h>
2743 #include <asm/war.h>
2744
2745 +#ifdef CONFIG_GENERIC_ATOMIC64
2746 +#include <asm-generic/atomic64.h>
2747 +#endif
2748 +
2749 #define ATOMIC_INIT(i) { (i) }
2750
2751 /*
2752 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2753 */
2754 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2755
2756 +#define atomic64_read_unchecked(v) atomic64_read(v)
2757 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2758 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2759 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2760 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2761 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2762 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2763 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2764 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2765 +
2766 #endif /* CONFIG_64BIT */
2767
2768 /*
2769 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2770 index b4db69f..8f3b093 100644
2771 --- a/arch/mips/include/asm/cache.h
2772 +++ b/arch/mips/include/asm/cache.h
2773 @@ -9,10 +9,11 @@
2774 #ifndef _ASM_CACHE_H
2775 #define _ASM_CACHE_H
2776
2777 +#include <linux/const.h>
2778 #include <kmalloc.h>
2779
2780 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2781 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2782 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2783
2784 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2785 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2786 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2787 index 455c0ac..ad65fbe 100644
2788 --- a/arch/mips/include/asm/elf.h
2789 +++ b/arch/mips/include/asm/elf.h
2790 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2791 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2792 #endif
2793
2794 +#ifdef CONFIG_PAX_ASLR
2795 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2796 +
2797 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2798 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2799 +#endif
2800 +
2801 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2802 struct linux_binprm;
2803 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2804 int uses_interp);
2805
2806 -struct mm_struct;
2807 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2808 -#define arch_randomize_brk arch_randomize_brk
2809 -
2810 #endif /* _ASM_ELF_H */
2811 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2812 index c1f6afa..38cc6e9 100644
2813 --- a/arch/mips/include/asm/exec.h
2814 +++ b/arch/mips/include/asm/exec.h
2815 @@ -12,6 +12,6 @@
2816 #ifndef _ASM_EXEC_H
2817 #define _ASM_EXEC_H
2818
2819 -extern unsigned long arch_align_stack(unsigned long sp);
2820 +#define arch_align_stack(x) ((x) & ~0xfUL)
2821
2822 #endif /* _ASM_EXEC_H */
2823 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2824 index da9bd7d..91aa7ab 100644
2825 --- a/arch/mips/include/asm/page.h
2826 +++ b/arch/mips/include/asm/page.h
2827 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2828 #ifdef CONFIG_CPU_MIPS32
2829 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2830 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2831 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2832 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2833 #else
2834 typedef struct { unsigned long long pte; } pte_t;
2835 #define pte_val(x) ((x).pte)
2836 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2837 index 881d18b..cea38bc 100644
2838 --- a/arch/mips/include/asm/pgalloc.h
2839 +++ b/arch/mips/include/asm/pgalloc.h
2840 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2841 {
2842 set_pud(pud, __pud((unsigned long)pmd));
2843 }
2844 +
2845 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2846 +{
2847 + pud_populate(mm, pud, pmd);
2848 +}
2849 #endif
2850
2851 /*
2852 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2853 index 0d85d8e..ec71487 100644
2854 --- a/arch/mips/include/asm/thread_info.h
2855 +++ b/arch/mips/include/asm/thread_info.h
2856 @@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2857 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2858 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2859 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2860 +/* li takes a 32bit immediate */
2861 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2862 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2863
2864 #ifdef CONFIG_MIPS32_O32
2865 @@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2866 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2867 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2868 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2869 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2870 +
2871 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2872
2873 /* work to do in syscall_trace_leave() */
2874 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2875 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2876
2877 /* work to do on interrupt/exception return */
2878 #define _TIF_WORK_MASK (0x0000ffef & \
2879 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2880 /* work to do on any return to u-space */
2881 -#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2882 +#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2883
2884 #endif /* __KERNEL__ */
2885
2886 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2887 index 9fdd8bc..4bd7f1a 100644
2888 --- a/arch/mips/kernel/binfmt_elfn32.c
2889 +++ b/arch/mips/kernel/binfmt_elfn32.c
2890 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2891 #undef ELF_ET_DYN_BASE
2892 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2893
2894 +#ifdef CONFIG_PAX_ASLR
2895 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2896 +
2897 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2898 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2899 +#endif
2900 +
2901 #include <asm/processor.h>
2902 #include <linux/module.h>
2903 #include <linux/elfcore.h>
2904 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2905 index ff44823..97f8906 100644
2906 --- a/arch/mips/kernel/binfmt_elfo32.c
2907 +++ b/arch/mips/kernel/binfmt_elfo32.c
2908 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2909 #undef ELF_ET_DYN_BASE
2910 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2911
2912 +#ifdef CONFIG_PAX_ASLR
2913 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2914 +
2915 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2916 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2917 +#endif
2918 +
2919 #include <asm/processor.h>
2920
2921 /*
2922 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2923 index e9a5fd7..378809a 100644
2924 --- a/arch/mips/kernel/process.c
2925 +++ b/arch/mips/kernel/process.c
2926 @@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2927 out:
2928 return pc;
2929 }
2930 -
2931 -/*
2932 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2933 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2934 - */
2935 -unsigned long arch_align_stack(unsigned long sp)
2936 -{
2937 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2938 - sp -= get_random_int() & ~PAGE_MASK;
2939 -
2940 - return sp & ALMASK;
2941 -}
2942 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2943 index 7c24c29..e2f1981 100644
2944 --- a/arch/mips/kernel/ptrace.c
2945 +++ b/arch/mips/kernel/ptrace.c
2946 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
2947 return arch;
2948 }
2949
2950 +#ifdef CONFIG_GRKERNSEC_SETXID
2951 +extern void gr_delayed_cred_worker(void);
2952 +#endif
2953 +
2954 /*
2955 * Notification of system call entry/exit
2956 * - triggered by current->work.syscall_trace
2957 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
2958 /* do the secure computing check first */
2959 secure_computing(regs->regs[2]);
2960
2961 +#ifdef CONFIG_GRKERNSEC_SETXID
2962 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2963 + gr_delayed_cred_worker();
2964 +#endif
2965 +
2966 if (!(current->ptrace & PT_PTRACED))
2967 goto out;
2968
2969 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
2970 index a632bc1..0b77c7c 100644
2971 --- a/arch/mips/kernel/scall32-o32.S
2972 +++ b/arch/mips/kernel/scall32-o32.S
2973 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
2974
2975 stack_done:
2976 lw t0, TI_FLAGS($28) # syscall tracing enabled?
2977 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2978 + li t1, _TIF_SYSCALL_WORK
2979 and t0, t1
2980 bnez t0, syscall_trace_entry # -> yes
2981
2982 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
2983 index 3b5a5e9..e1ee86d 100644
2984 --- a/arch/mips/kernel/scall64-64.S
2985 +++ b/arch/mips/kernel/scall64-64.S
2986 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
2987
2988 sd a3, PT_R26(sp) # save a3 for syscall restarting
2989
2990 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2991 + li t1, _TIF_SYSCALL_WORK
2992 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
2993 and t0, t1, t0
2994 bnez t0, syscall_trace_entry
2995 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
2996 index 6be6f70..1859577 100644
2997 --- a/arch/mips/kernel/scall64-n32.S
2998 +++ b/arch/mips/kernel/scall64-n32.S
2999 @@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3000
3001 sd a3, PT_R26(sp) # save a3 for syscall restarting
3002
3003 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3004 + li t1, _TIF_SYSCALL_WORK
3005 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3006 and t0, t1, t0
3007 bnez t0, n32_syscall_trace_entry
3008 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3009 index 5422855..74e63a3 100644
3010 --- a/arch/mips/kernel/scall64-o32.S
3011 +++ b/arch/mips/kernel/scall64-o32.S
3012 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3013 PTR 4b, bad_stack
3014 .previous
3015
3016 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3017 + li t1, _TIF_SYSCALL_WORK
3018 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3019 and t0, t1, t0
3020 bnez t0, trace_a_syscall
3021 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3022 index c14f6df..537e729 100644
3023 --- a/arch/mips/mm/fault.c
3024 +++ b/arch/mips/mm/fault.c
3025 @@ -27,6 +27,23 @@
3026 #include <asm/highmem.h> /* For VMALLOC_END */
3027 #include <linux/kdebug.h>
3028
3029 +#ifdef CONFIG_PAX_PAGEEXEC
3030 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3031 +{
3032 + unsigned long i;
3033 +
3034 + printk(KERN_ERR "PAX: bytes at PC: ");
3035 + for (i = 0; i < 5; i++) {
3036 + unsigned int c;
3037 + if (get_user(c, (unsigned int *)pc+i))
3038 + printk(KERN_CONT "???????? ");
3039 + else
3040 + printk(KERN_CONT "%08x ", c);
3041 + }
3042 + printk("\n");
3043 +}
3044 +#endif
3045 +
3046 /*
3047 * This routine handles page faults. It determines the address,
3048 * and the problem, and then passes it off to one of the appropriate
3049 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3050 index 302d779..7d35bf8 100644
3051 --- a/arch/mips/mm/mmap.c
3052 +++ b/arch/mips/mm/mmap.c
3053 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3054 do_color_align = 1;
3055
3056 /* requesting a specific address */
3057 +
3058 +#ifdef CONFIG_PAX_RANDMMAP
3059 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3060 +#endif
3061 +
3062 if (addr) {
3063 if (do_color_align)
3064 addr = COLOUR_ALIGN(addr, pgoff);
3065 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3066 addr = PAGE_ALIGN(addr);
3067
3068 vma = find_vma(mm, addr);
3069 - if (TASK_SIZE - len >= addr &&
3070 - (!vma || addr + len <= vma->vm_start))
3071 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3072 return addr;
3073 }
3074
3075 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3076 /* At this point: (!vma || addr < vma->vm_end). */
3077 if (TASK_SIZE - len < addr)
3078 return -ENOMEM;
3079 - if (!vma || addr + len <= vma->vm_start)
3080 + if (check_heap_stack_gap(vmm, addr, len))
3081 return addr;
3082 addr = vma->vm_end;
3083 if (do_color_align)
3084 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3085 /* make sure it can fit in the remaining address space */
3086 if (likely(addr > len)) {
3087 vma = find_vma(mm, addr - len);
3088 - if (!vma || addr <= vma->vm_start) {
3089 + if (check_heap_stack_gap(vmm, addr - len, len))
3090 /* cache the address as a hint for next time */
3091 return mm->free_area_cache = addr - len;
3092 }
3093 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3094 * return with success:
3095 */
3096 vma = find_vma(mm, addr);
3097 - if (likely(!vma || addr + len <= vma->vm_start)) {
3098 + if (check_heap_stack_gap(vmm, addr, len)) {
3099 /* cache the address as a hint for next time */
3100 return mm->free_area_cache = addr;
3101 }
3102 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3103 mm->unmap_area = arch_unmap_area_topdown;
3104 }
3105 }
3106 -
3107 -static inline unsigned long brk_rnd(void)
3108 -{
3109 - unsigned long rnd = get_random_int();
3110 -
3111 - rnd = rnd << PAGE_SHIFT;
3112 - /* 8MB for 32bit, 256MB for 64bit */
3113 - if (TASK_IS_32BIT_ADDR)
3114 - rnd = rnd & 0x7ffffful;
3115 - else
3116 - rnd = rnd & 0xffffffful;
3117 -
3118 - return rnd;
3119 -}
3120 -
3121 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3122 -{
3123 - unsigned long base = mm->brk;
3124 - unsigned long ret;
3125 -
3126 - ret = PAGE_ALIGN(base + brk_rnd());
3127 -
3128 - if (ret < mm->brk)
3129 - return mm->brk;
3130 -
3131 - return ret;
3132 -}
3133 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3134 index 967d144..db12197 100644
3135 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3136 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3137 @@ -11,12 +11,14 @@
3138 #ifndef _ASM_PROC_CACHE_H
3139 #define _ASM_PROC_CACHE_H
3140
3141 +#include <linux/const.h>
3142 +
3143 /* L1 cache */
3144
3145 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3146 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3147 -#define L1_CACHE_BYTES 16 /* bytes per entry */
3148 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3149 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3150 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3151
3152 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3153 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3154 index bcb5df2..84fabd2 100644
3155 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3156 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3157 @@ -16,13 +16,15 @@
3158 #ifndef _ASM_PROC_CACHE_H
3159 #define _ASM_PROC_CACHE_H
3160
3161 +#include <linux/const.h>
3162 +
3163 /*
3164 * L1 cache
3165 */
3166 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3167 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3168 -#define L1_CACHE_BYTES 32 /* bytes per entry */
3169 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3170 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3171 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3172
3173 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3174 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3175 index 4ce7a01..449202a 100644
3176 --- a/arch/openrisc/include/asm/cache.h
3177 +++ b/arch/openrisc/include/asm/cache.h
3178 @@ -19,11 +19,13 @@
3179 #ifndef __ASM_OPENRISC_CACHE_H
3180 #define __ASM_OPENRISC_CACHE_H
3181
3182 +#include <linux/const.h>
3183 +
3184 /* FIXME: How can we replace these with values from the CPU...
3185 * they shouldn't be hard-coded!
3186 */
3187
3188 -#define L1_CACHE_BYTES 16
3189 #define L1_CACHE_SHIFT 4
3190 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3191
3192 #endif /* __ASM_OPENRISC_CACHE_H */
3193 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3194 index 6c6defc..d30653d 100644
3195 --- a/arch/parisc/include/asm/atomic.h
3196 +++ b/arch/parisc/include/asm/atomic.h
3197 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3198
3199 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3200
3201 +#define atomic64_read_unchecked(v) atomic64_read(v)
3202 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3203 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3204 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3205 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3206 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3207 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3208 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3209 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3210 +
3211 #endif /* !CONFIG_64BIT */
3212
3213
3214 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3215 index 47f11c7..3420df2 100644
3216 --- a/arch/parisc/include/asm/cache.h
3217 +++ b/arch/parisc/include/asm/cache.h
3218 @@ -5,6 +5,7 @@
3219 #ifndef __ARCH_PARISC_CACHE_H
3220 #define __ARCH_PARISC_CACHE_H
3221
3222 +#include <linux/const.h>
3223
3224 /*
3225 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3226 @@ -15,13 +16,13 @@
3227 * just ruin performance.
3228 */
3229 #ifdef CONFIG_PA20
3230 -#define L1_CACHE_BYTES 64
3231 #define L1_CACHE_SHIFT 6
3232 #else
3233 -#define L1_CACHE_BYTES 32
3234 #define L1_CACHE_SHIFT 5
3235 #endif
3236
3237 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3238 +
3239 #ifndef __ASSEMBLY__
3240
3241 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3242 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3243 index 19f6cb1..6c78cf2 100644
3244 --- a/arch/parisc/include/asm/elf.h
3245 +++ b/arch/parisc/include/asm/elf.h
3246 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3247
3248 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3249
3250 +#ifdef CONFIG_PAX_ASLR
3251 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3252 +
3253 +#define PAX_DELTA_MMAP_LEN 16
3254 +#define PAX_DELTA_STACK_LEN 16
3255 +#endif
3256 +
3257 /* This yields a mask that user programs can use to figure out what
3258 instruction set this CPU supports. This could be done in user space,
3259 but it's not easy, and we've already done it here. */
3260 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3261 index fc987a1..6e068ef 100644
3262 --- a/arch/parisc/include/asm/pgalloc.h
3263 +++ b/arch/parisc/include/asm/pgalloc.h
3264 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3265 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3266 }
3267
3268 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3269 +{
3270 + pgd_populate(mm, pgd, pmd);
3271 +}
3272 +
3273 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3274 {
3275 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3276 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3277 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3278 #define pmd_free(mm, x) do { } while (0)
3279 #define pgd_populate(mm, pmd, pte) BUG()
3280 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3281
3282 #endif
3283
3284 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3285 index ee99f23..802b0a1 100644
3286 --- a/arch/parisc/include/asm/pgtable.h
3287 +++ b/arch/parisc/include/asm/pgtable.h
3288 @@ -212,6 +212,17 @@ struct vm_area_struct;
3289 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3290 #define PAGE_COPY PAGE_EXECREAD
3291 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3292 +
3293 +#ifdef CONFIG_PAX_PAGEEXEC
3294 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3295 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3296 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3297 +#else
3298 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3299 +# define PAGE_COPY_NOEXEC PAGE_COPY
3300 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3301 +#endif
3302 +
3303 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3304 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3305 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3306 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3307 index 5e34ccf..672bc9c 100644
3308 --- a/arch/parisc/kernel/module.c
3309 +++ b/arch/parisc/kernel/module.c
3310 @@ -98,16 +98,38 @@
3311
3312 /* three functions to determine where in the module core
3313 * or init pieces the location is */
3314 +static inline int in_init_rx(struct module *me, void *loc)
3315 +{
3316 + return (loc >= me->module_init_rx &&
3317 + loc < (me->module_init_rx + me->init_size_rx));
3318 +}
3319 +
3320 +static inline int in_init_rw(struct module *me, void *loc)
3321 +{
3322 + return (loc >= me->module_init_rw &&
3323 + loc < (me->module_init_rw + me->init_size_rw));
3324 +}
3325 +
3326 static inline int in_init(struct module *me, void *loc)
3327 {
3328 - return (loc >= me->module_init &&
3329 - loc <= (me->module_init + me->init_size));
3330 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3331 +}
3332 +
3333 +static inline int in_core_rx(struct module *me, void *loc)
3334 +{
3335 + return (loc >= me->module_core_rx &&
3336 + loc < (me->module_core_rx + me->core_size_rx));
3337 +}
3338 +
3339 +static inline int in_core_rw(struct module *me, void *loc)
3340 +{
3341 + return (loc >= me->module_core_rw &&
3342 + loc < (me->module_core_rw + me->core_size_rw));
3343 }
3344
3345 static inline int in_core(struct module *me, void *loc)
3346 {
3347 - return (loc >= me->module_core &&
3348 - loc <= (me->module_core + me->core_size));
3349 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3350 }
3351
3352 static inline int in_local(struct module *me, void *loc)
3353 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3354 }
3355
3356 /* align things a bit */
3357 - me->core_size = ALIGN(me->core_size, 16);
3358 - me->arch.got_offset = me->core_size;
3359 - me->core_size += gots * sizeof(struct got_entry);
3360 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3361 + me->arch.got_offset = me->core_size_rw;
3362 + me->core_size_rw += gots * sizeof(struct got_entry);
3363
3364 - me->core_size = ALIGN(me->core_size, 16);
3365 - me->arch.fdesc_offset = me->core_size;
3366 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3367 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3368 + me->arch.fdesc_offset = me->core_size_rw;
3369 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3370
3371 me->arch.got_max = gots;
3372 me->arch.fdesc_max = fdescs;
3373 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3374
3375 BUG_ON(value == 0);
3376
3377 - got = me->module_core + me->arch.got_offset;
3378 + got = me->module_core_rw + me->arch.got_offset;
3379 for (i = 0; got[i].addr; i++)
3380 if (got[i].addr == value)
3381 goto out;
3382 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3383 #ifdef CONFIG_64BIT
3384 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3385 {
3386 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3387 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3388
3389 if (!value) {
3390 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3391 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3392
3393 /* Create new one */
3394 fdesc->addr = value;
3395 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3396 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3397 return (Elf_Addr)fdesc;
3398 }
3399 #endif /* CONFIG_64BIT */
3400 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3401
3402 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3403 end = table + sechdrs[me->arch.unwind_section].sh_size;
3404 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3405 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3406
3407 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3408 me->arch.unwind_section, table, end, gp);
3409 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3410 index c9b9322..02d8940 100644
3411 --- a/arch/parisc/kernel/sys_parisc.c
3412 +++ b/arch/parisc/kernel/sys_parisc.c
3413 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3414 /* At this point: (!vma || addr < vma->vm_end). */
3415 if (TASK_SIZE - len < addr)
3416 return -ENOMEM;
3417 - if (!vma || addr + len <= vma->vm_start)
3418 + if (check_heap_stack_gap(vma, addr, len))
3419 return addr;
3420 addr = vma->vm_end;
3421 }
3422 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3423 /* At this point: (!vma || addr < vma->vm_end). */
3424 if (TASK_SIZE - len < addr)
3425 return -ENOMEM;
3426 - if (!vma || addr + len <= vma->vm_start)
3427 + if (check_heap_stack_gap(vma, addr, len))
3428 return addr;
3429 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3430 if (addr < vma->vm_end) /* handle wraparound */
3431 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3432 if (flags & MAP_FIXED)
3433 return addr;
3434 if (!addr)
3435 - addr = TASK_UNMAPPED_BASE;
3436 + addr = current->mm->mmap_base;
3437
3438 if (filp) {
3439 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3440 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3441 index 45ba99f..8e22c33 100644
3442 --- a/arch/parisc/kernel/traps.c
3443 +++ b/arch/parisc/kernel/traps.c
3444 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3445
3446 down_read(&current->mm->mmap_sem);
3447 vma = find_vma(current->mm,regs->iaoq[0]);
3448 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3449 - && (vma->vm_flags & VM_EXEC)) {
3450 -
3451 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3452 fault_address = regs->iaoq[0];
3453 fault_space = regs->iasq[0];
3454
3455 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3456 index 18162ce..94de376 100644
3457 --- a/arch/parisc/mm/fault.c
3458 +++ b/arch/parisc/mm/fault.c
3459 @@ -15,6 +15,7 @@
3460 #include <linux/sched.h>
3461 #include <linux/interrupt.h>
3462 #include <linux/module.h>
3463 +#include <linux/unistd.h>
3464
3465 #include <asm/uaccess.h>
3466 #include <asm/traps.h>
3467 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3468 static unsigned long
3469 parisc_acctyp(unsigned long code, unsigned int inst)
3470 {
3471 - if (code == 6 || code == 16)
3472 + if (code == 6 || code == 7 || code == 16)
3473 return VM_EXEC;
3474
3475 switch (inst & 0xf0000000) {
3476 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3477 }
3478 #endif
3479
3480 +#ifdef CONFIG_PAX_PAGEEXEC
3481 +/*
3482 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3483 + *
3484 + * returns 1 when task should be killed
3485 + * 2 when rt_sigreturn trampoline was detected
3486 + * 3 when unpatched PLT trampoline was detected
3487 + */
3488 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3489 +{
3490 +
3491 +#ifdef CONFIG_PAX_EMUPLT
3492 + int err;
3493 +
3494 + do { /* PaX: unpatched PLT emulation */
3495 + unsigned int bl, depwi;
3496 +
3497 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3498 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3499 +
3500 + if (err)
3501 + break;
3502 +
3503 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3504 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3505 +
3506 + err = get_user(ldw, (unsigned int *)addr);
3507 + err |= get_user(bv, (unsigned int *)(addr+4));
3508 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3509 +
3510 + if (err)
3511 + break;
3512 +
3513 + if (ldw == 0x0E801096U &&
3514 + bv == 0xEAC0C000U &&
3515 + ldw2 == 0x0E881095U)
3516 + {
3517 + unsigned int resolver, map;
3518 +
3519 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3520 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3521 + if (err)
3522 + break;
3523 +
3524 + regs->gr[20] = instruction_pointer(regs)+8;
3525 + regs->gr[21] = map;
3526 + regs->gr[22] = resolver;
3527 + regs->iaoq[0] = resolver | 3UL;
3528 + regs->iaoq[1] = regs->iaoq[0] + 4;
3529 + return 3;
3530 + }
3531 + }
3532 + } while (0);
3533 +#endif
3534 +
3535 +#ifdef CONFIG_PAX_EMUTRAMP
3536 +
3537 +#ifndef CONFIG_PAX_EMUSIGRT
3538 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3539 + return 1;
3540 +#endif
3541 +
3542 + do { /* PaX: rt_sigreturn emulation */
3543 + unsigned int ldi1, ldi2, bel, nop;
3544 +
3545 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3546 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3547 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3548 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3549 +
3550 + if (err)
3551 + break;
3552 +
3553 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3554 + ldi2 == 0x3414015AU &&
3555 + bel == 0xE4008200U &&
3556 + nop == 0x08000240U)
3557 + {
3558 + regs->gr[25] = (ldi1 & 2) >> 1;
3559 + regs->gr[20] = __NR_rt_sigreturn;
3560 + regs->gr[31] = regs->iaoq[1] + 16;
3561 + regs->sr[0] = regs->iasq[1];
3562 + regs->iaoq[0] = 0x100UL;
3563 + regs->iaoq[1] = regs->iaoq[0] + 4;
3564 + regs->iasq[0] = regs->sr[2];
3565 + regs->iasq[1] = regs->sr[2];
3566 + return 2;
3567 + }
3568 + } while (0);
3569 +#endif
3570 +
3571 + return 1;
3572 +}
3573 +
3574 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3575 +{
3576 + unsigned long i;
3577 +
3578 + printk(KERN_ERR "PAX: bytes at PC: ");
3579 + for (i = 0; i < 5; i++) {
3580 + unsigned int c;
3581 + if (get_user(c, (unsigned int *)pc+i))
3582 + printk(KERN_CONT "???????? ");
3583 + else
3584 + printk(KERN_CONT "%08x ", c);
3585 + }
3586 + printk("\n");
3587 +}
3588 +#endif
3589 +
3590 int fixup_exception(struct pt_regs *regs)
3591 {
3592 const struct exception_table_entry *fix;
3593 @@ -192,8 +303,33 @@ good_area:
3594
3595 acc_type = parisc_acctyp(code,regs->iir);
3596
3597 - if ((vma->vm_flags & acc_type) != acc_type)
3598 + if ((vma->vm_flags & acc_type) != acc_type) {
3599 +
3600 +#ifdef CONFIG_PAX_PAGEEXEC
3601 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3602 + (address & ~3UL) == instruction_pointer(regs))
3603 + {
3604 + up_read(&mm->mmap_sem);
3605 + switch (pax_handle_fetch_fault(regs)) {
3606 +
3607 +#ifdef CONFIG_PAX_EMUPLT
3608 + case 3:
3609 + return;
3610 +#endif
3611 +
3612 +#ifdef CONFIG_PAX_EMUTRAMP
3613 + case 2:
3614 + return;
3615 +#endif
3616 +
3617 + }
3618 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3619 + do_group_exit(SIGKILL);
3620 + }
3621 +#endif
3622 +
3623 goto bad_area;
3624 + }
3625
3626 /*
3627 * If for any reason at all we couldn't handle the fault, make
3628 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3629 index da29032..f76c24c 100644
3630 --- a/arch/powerpc/include/asm/atomic.h
3631 +++ b/arch/powerpc/include/asm/atomic.h
3632 @@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3633 return t1;
3634 }
3635
3636 +#define atomic64_read_unchecked(v) atomic64_read(v)
3637 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3638 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3639 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3640 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3641 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3642 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3643 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3644 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3645 +
3646 #endif /* __powerpc64__ */
3647
3648 #endif /* __KERNEL__ */
3649 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3650 index 9e495c9..b6878e5 100644
3651 --- a/arch/powerpc/include/asm/cache.h
3652 +++ b/arch/powerpc/include/asm/cache.h
3653 @@ -3,6 +3,7 @@
3654
3655 #ifdef __KERNEL__
3656
3657 +#include <linux/const.h>
3658
3659 /* bytes per L1 cache line */
3660 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3661 @@ -22,7 +23,7 @@
3662 #define L1_CACHE_SHIFT 7
3663 #endif
3664
3665 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3666 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3667
3668 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3669
3670 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3671 index 3bf9cca..e7457d0 100644
3672 --- a/arch/powerpc/include/asm/elf.h
3673 +++ b/arch/powerpc/include/asm/elf.h
3674 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3675 the loader. We need to make sure that it is out of the way of the program
3676 that it will "exec", and that there is sufficient room for the brk. */
3677
3678 -extern unsigned long randomize_et_dyn(unsigned long base);
3679 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3680 +#define ELF_ET_DYN_BASE (0x20000000)
3681 +
3682 +#ifdef CONFIG_PAX_ASLR
3683 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3684 +
3685 +#ifdef __powerpc64__
3686 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3687 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3688 +#else
3689 +#define PAX_DELTA_MMAP_LEN 15
3690 +#define PAX_DELTA_STACK_LEN 15
3691 +#endif
3692 +#endif
3693
3694 /*
3695 * Our registers are always unsigned longs, whether we're a 32 bit
3696 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3697 (0x7ff >> (PAGE_SHIFT - 12)) : \
3698 (0x3ffff >> (PAGE_SHIFT - 12)))
3699
3700 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3701 -#define arch_randomize_brk arch_randomize_brk
3702 -
3703 #endif /* __KERNEL__ */
3704
3705 /*
3706 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3707 index 8196e9c..d83a9f3 100644
3708 --- a/arch/powerpc/include/asm/exec.h
3709 +++ b/arch/powerpc/include/asm/exec.h
3710 @@ -4,6 +4,6 @@
3711 #ifndef _ASM_POWERPC_EXEC_H
3712 #define _ASM_POWERPC_EXEC_H
3713
3714 -extern unsigned long arch_align_stack(unsigned long sp);
3715 +#define arch_align_stack(x) ((x) & ~0xfUL)
3716
3717 #endif /* _ASM_POWERPC_EXEC_H */
3718 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3719 index bca8fdc..61e9580 100644
3720 --- a/arch/powerpc/include/asm/kmap_types.h
3721 +++ b/arch/powerpc/include/asm/kmap_types.h
3722 @@ -27,6 +27,7 @@ enum km_type {
3723 KM_PPC_SYNC_PAGE,
3724 KM_PPC_SYNC_ICACHE,
3725 KM_KDB,
3726 + KM_CLEARPAGE,
3727 KM_TYPE_NR
3728 };
3729
3730 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3731 index d4a7f64..451de1c 100644
3732 --- a/arch/powerpc/include/asm/mman.h
3733 +++ b/arch/powerpc/include/asm/mman.h
3734 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3735 }
3736 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3737
3738 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3739 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3740 {
3741 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3742 }
3743 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3744 index f072e97..b436dee 100644
3745 --- a/arch/powerpc/include/asm/page.h
3746 +++ b/arch/powerpc/include/asm/page.h
3747 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3748 * and needs to be executable. This means the whole heap ends
3749 * up being executable.
3750 */
3751 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3752 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3753 +#define VM_DATA_DEFAULT_FLAGS32 \
3754 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3755 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3756
3757 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3758 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3759 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3760 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3761 #endif
3762
3763 +#define ktla_ktva(addr) (addr)
3764 +#define ktva_ktla(addr) (addr)
3765 +
3766 /*
3767 * Use the top bit of the higher-level page table entries to indicate whether
3768 * the entries we point to contain hugepages. This works because we know that
3769 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3770 index fed85e6..da5c71b 100644
3771 --- a/arch/powerpc/include/asm/page_64.h
3772 +++ b/arch/powerpc/include/asm/page_64.h
3773 @@ -146,15 +146,18 @@ do { \
3774 * stack by default, so in the absence of a PT_GNU_STACK program header
3775 * we turn execute permission off.
3776 */
3777 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3778 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3779 +#define VM_STACK_DEFAULT_FLAGS32 \
3780 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3781 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3782
3783 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3784 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3785
3786 +#ifndef CONFIG_PAX_PAGEEXEC
3787 #define VM_STACK_DEFAULT_FLAGS \
3788 (is_32bit_task() ? \
3789 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3790 +#endif
3791
3792 #include <asm-generic/getorder.h>
3793
3794 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3795 index 292725c..f87ae14 100644
3796 --- a/arch/powerpc/include/asm/pgalloc-64.h
3797 +++ b/arch/powerpc/include/asm/pgalloc-64.h
3798 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3799 #ifndef CONFIG_PPC_64K_PAGES
3800
3801 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3802 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3803
3804 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3805 {
3806 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3807 pud_set(pud, (unsigned long)pmd);
3808 }
3809
3810 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3811 +{
3812 + pud_populate(mm, pud, pmd);
3813 +}
3814 +
3815 #define pmd_populate(mm, pmd, pte_page) \
3816 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3817 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3818 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3819 #else /* CONFIG_PPC_64K_PAGES */
3820
3821 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3822 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3823
3824 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3825 pte_t *pte)
3826 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3827 index 2e0e411..7899c68 100644
3828 --- a/arch/powerpc/include/asm/pgtable.h
3829 +++ b/arch/powerpc/include/asm/pgtable.h
3830 @@ -2,6 +2,7 @@
3831 #define _ASM_POWERPC_PGTABLE_H
3832 #ifdef __KERNEL__
3833
3834 +#include <linux/const.h>
3835 #ifndef __ASSEMBLY__
3836 #include <asm/processor.h> /* For TASK_SIZE */
3837 #include <asm/mmu.h>
3838 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3839 index 4aad413..85d86bf 100644
3840 --- a/arch/powerpc/include/asm/pte-hash32.h
3841 +++ b/arch/powerpc/include/asm/pte-hash32.h
3842 @@ -21,6 +21,7 @@
3843 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3844 #define _PAGE_USER 0x004 /* usermode access allowed */
3845 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3846 +#define _PAGE_EXEC _PAGE_GUARDED
3847 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3848 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3849 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3850 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3851 index 9d7f0fb..a28fe69 100644
3852 --- a/arch/powerpc/include/asm/reg.h
3853 +++ b/arch/powerpc/include/asm/reg.h
3854 @@ -212,6 +212,7 @@
3855 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3856 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3857 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3858 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3859 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3860 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3861 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3862 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3863 index 4a741c7..c8162227b 100644
3864 --- a/arch/powerpc/include/asm/thread_info.h
3865 +++ b/arch/powerpc/include/asm/thread_info.h
3866 @@ -104,12 +104,14 @@ static inline struct thread_info *current_thread_info(void)
3867 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3868 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3869 #define TIF_SINGLESTEP 8 /* singlestepping active */
3870 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3871 #define TIF_SECCOMP 10 /* secure computing */
3872 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3873 #define TIF_NOERROR 12 /* Force successful syscall return */
3874 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3875 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3876 +#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3877 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3878 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3879
3880 /* as above, but as bit values */
3881 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3882 @@ -127,8 +129,11 @@ static inline struct thread_info *current_thread_info(void)
3883 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3884 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3885 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3886 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3887 +
3888 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3889 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3890 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3891 + _TIF_GRSEC_SETXID)
3892
3893 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3894 _TIF_NOTIFY_RESUME)
3895 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3896 index bd0fb84..a42a14b 100644
3897 --- a/arch/powerpc/include/asm/uaccess.h
3898 +++ b/arch/powerpc/include/asm/uaccess.h
3899 @@ -13,6 +13,8 @@
3900 #define VERIFY_READ 0
3901 #define VERIFY_WRITE 1
3902
3903 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3904 +
3905 /*
3906 * The fs value determines whether argument validity checking should be
3907 * performed or not. If get_fs() == USER_DS, checking is performed, with
3908 @@ -327,52 +329,6 @@ do { \
3909 extern unsigned long __copy_tofrom_user(void __user *to,
3910 const void __user *from, unsigned long size);
3911
3912 -#ifndef __powerpc64__
3913 -
3914 -static inline unsigned long copy_from_user(void *to,
3915 - const void __user *from, unsigned long n)
3916 -{
3917 - unsigned long over;
3918 -
3919 - if (access_ok(VERIFY_READ, from, n))
3920 - return __copy_tofrom_user((__force void __user *)to, from, n);
3921 - if ((unsigned long)from < TASK_SIZE) {
3922 - over = (unsigned long)from + n - TASK_SIZE;
3923 - return __copy_tofrom_user((__force void __user *)to, from,
3924 - n - over) + over;
3925 - }
3926 - return n;
3927 -}
3928 -
3929 -static inline unsigned long copy_to_user(void __user *to,
3930 - const void *from, unsigned long n)
3931 -{
3932 - unsigned long over;
3933 -
3934 - if (access_ok(VERIFY_WRITE, to, n))
3935 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3936 - if ((unsigned long)to < TASK_SIZE) {
3937 - over = (unsigned long)to + n - TASK_SIZE;
3938 - return __copy_tofrom_user(to, (__force void __user *)from,
3939 - n - over) + over;
3940 - }
3941 - return n;
3942 -}
3943 -
3944 -#else /* __powerpc64__ */
3945 -
3946 -#define __copy_in_user(to, from, size) \
3947 - __copy_tofrom_user((to), (from), (size))
3948 -
3949 -extern unsigned long copy_from_user(void *to, const void __user *from,
3950 - unsigned long n);
3951 -extern unsigned long copy_to_user(void __user *to, const void *from,
3952 - unsigned long n);
3953 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3954 - unsigned long n);
3955 -
3956 -#endif /* __powerpc64__ */
3957 -
3958 static inline unsigned long __copy_from_user_inatomic(void *to,
3959 const void __user *from, unsigned long n)
3960 {
3961 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3962 if (ret == 0)
3963 return 0;
3964 }
3965 +
3966 + if (!__builtin_constant_p(n))
3967 + check_object_size(to, n, false);
3968 +
3969 return __copy_tofrom_user((__force void __user *)to, from, n);
3970 }
3971
3972 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3973 if (ret == 0)
3974 return 0;
3975 }
3976 +
3977 + if (!__builtin_constant_p(n))
3978 + check_object_size(from, n, true);
3979 +
3980 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3981 }
3982
3983 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3984 return __copy_to_user_inatomic(to, from, size);
3985 }
3986
3987 +#ifndef __powerpc64__
3988 +
3989 +static inline unsigned long __must_check copy_from_user(void *to,
3990 + const void __user *from, unsigned long n)
3991 +{
3992 + unsigned long over;
3993 +
3994 + if ((long)n < 0)
3995 + return n;
3996 +
3997 + if (access_ok(VERIFY_READ, from, n)) {
3998 + if (!__builtin_constant_p(n))
3999 + check_object_size(to, n, false);
4000 + return __copy_tofrom_user((__force void __user *)to, from, n);
4001 + }
4002 + if ((unsigned long)from < TASK_SIZE) {
4003 + over = (unsigned long)from + n - TASK_SIZE;
4004 + if (!__builtin_constant_p(n - over))
4005 + check_object_size(to, n - over, false);
4006 + return __copy_tofrom_user((__force void __user *)to, from,
4007 + n - over) + over;
4008 + }
4009 + return n;
4010 +}
4011 +
4012 +static inline unsigned long __must_check copy_to_user(void __user *to,
4013 + const void *from, unsigned long n)
4014 +{
4015 + unsigned long over;
4016 +
4017 + if ((long)n < 0)
4018 + return n;
4019 +
4020 + if (access_ok(VERIFY_WRITE, to, n)) {
4021 + if (!__builtin_constant_p(n))
4022 + check_object_size(from, n, true);
4023 + return __copy_tofrom_user(to, (__force void __user *)from, n);
4024 + }
4025 + if ((unsigned long)to < TASK_SIZE) {
4026 + over = (unsigned long)to + n - TASK_SIZE;
4027 + if (!__builtin_constant_p(n))
4028 + check_object_size(from, n - over, true);
4029 + return __copy_tofrom_user(to, (__force void __user *)from,
4030 + n - over) + over;
4031 + }
4032 + return n;
4033 +}
4034 +
4035 +#else /* __powerpc64__ */
4036 +
4037 +#define __copy_in_user(to, from, size) \
4038 + __copy_tofrom_user((to), (from), (size))
4039 +
4040 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4041 +{
4042 + if ((long)n < 0 || n > INT_MAX)
4043 + return n;
4044 +
4045 + if (!__builtin_constant_p(n))
4046 + check_object_size(to, n, false);
4047 +
4048 + if (likely(access_ok(VERIFY_READ, from, n)))
4049 + n = __copy_from_user(to, from, n);
4050 + else
4051 + memset(to, 0, n);
4052 + return n;
4053 +}
4054 +
4055 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4056 +{
4057 + if ((long)n < 0 || n > INT_MAX)
4058 + return n;
4059 +
4060 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
4061 + if (!__builtin_constant_p(n))
4062 + check_object_size(from, n, true);
4063 + n = __copy_to_user(to, from, n);
4064 + }
4065 + return n;
4066 +}
4067 +
4068 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
4069 + unsigned long n);
4070 +
4071 +#endif /* __powerpc64__ */
4072 +
4073 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4074
4075 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4076 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4077 index 7215cc2..a9730c1 100644
4078 --- a/arch/powerpc/kernel/exceptions-64e.S
4079 +++ b/arch/powerpc/kernel/exceptions-64e.S
4080 @@ -661,6 +661,7 @@ storage_fault_common:
4081 std r14,_DAR(r1)
4082 std r15,_DSISR(r1)
4083 addi r3,r1,STACK_FRAME_OVERHEAD
4084 + bl .save_nvgprs
4085 mr r4,r14
4086 mr r5,r15
4087 ld r14,PACA_EXGEN+EX_R14(r13)
4088 @@ -669,8 +670,7 @@ storage_fault_common:
4089 cmpdi r3,0
4090 bne- 1f
4091 b .ret_from_except_lite
4092 -1: bl .save_nvgprs
4093 - mr r5,r3
4094 +1: mr r5,r3
4095 addi r3,r1,STACK_FRAME_OVERHEAD
4096 ld r4,_DAR(r1)
4097 bl .bad_page_fault
4098 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4099 index 8f880bc..c5bd2f3 100644
4100 --- a/arch/powerpc/kernel/exceptions-64s.S
4101 +++ b/arch/powerpc/kernel/exceptions-64s.S
4102 @@ -890,10 +890,10 @@ handle_page_fault:
4103 11: ld r4,_DAR(r1)
4104 ld r5,_DSISR(r1)
4105 addi r3,r1,STACK_FRAME_OVERHEAD
4106 + bl .save_nvgprs
4107 bl .do_page_fault
4108 cmpdi r3,0
4109 beq+ 12f
4110 - bl .save_nvgprs
4111 mr r5,r3
4112 addi r3,r1,STACK_FRAME_OVERHEAD
4113 lwz r4,_DAR(r1)
4114 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4115 index 0b6d796..d760ddb 100644
4116 --- a/arch/powerpc/kernel/module_32.c
4117 +++ b/arch/powerpc/kernel/module_32.c
4118 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4119 me->arch.core_plt_section = i;
4120 }
4121 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4122 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4123 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4124 return -ENOEXEC;
4125 }
4126
4127 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4128
4129 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4130 /* Init, or core PLT? */
4131 - if (location >= mod->module_core
4132 - && location < mod->module_core + mod->core_size)
4133 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4134 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4135 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4136 - else
4137 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4138 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4139 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4140 + else {
4141 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4142 + return ~0UL;
4143 + }
4144
4145 /* Find this entry, or if that fails, the next avail. entry */
4146 while (entry->jump[0]) {
4147 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4148 index 4937c96..70714b7 100644
4149 --- a/arch/powerpc/kernel/process.c
4150 +++ b/arch/powerpc/kernel/process.c
4151 @@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4152 * Lookup NIP late so we have the best change of getting the
4153 * above info out without failing
4154 */
4155 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4156 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4157 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4158 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4159 #endif
4160 show_stack(current, (unsigned long *) regs->gpr[1]);
4161 if (!user_mode(regs))
4162 @@ -1186,10 +1186,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4163 newsp = stack[0];
4164 ip = stack[STACK_FRAME_LR_SAVE];
4165 if (!firstframe || ip != lr) {
4166 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4167 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4168 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4169 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4170 - printk(" (%pS)",
4171 + printk(" (%pA)",
4172 (void *)current->ret_stack[curr_frame].ret);
4173 curr_frame--;
4174 }
4175 @@ -1209,7 +1209,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4176 struct pt_regs *regs = (struct pt_regs *)
4177 (sp + STACK_FRAME_OVERHEAD);
4178 lr = regs->link;
4179 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4180 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4181 regs->trap, (void *)regs->nip, (void *)lr);
4182 firstframe = 1;
4183 }
4184 @@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
4185 }
4186
4187 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4188 -
4189 -unsigned long arch_align_stack(unsigned long sp)
4190 -{
4191 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4192 - sp -= get_random_int() & ~PAGE_MASK;
4193 - return sp & ~0xf;
4194 -}
4195 -
4196 -static inline unsigned long brk_rnd(void)
4197 -{
4198 - unsigned long rnd = 0;
4199 -
4200 - /* 8MB for 32bit, 1GB for 64bit */
4201 - if (is_32bit_task())
4202 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4203 - else
4204 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4205 -
4206 - return rnd << PAGE_SHIFT;
4207 -}
4208 -
4209 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4210 -{
4211 - unsigned long base = mm->brk;
4212 - unsigned long ret;
4213 -
4214 -#ifdef CONFIG_PPC_STD_MMU_64
4215 - /*
4216 - * If we are using 1TB segments and we are allowed to randomise
4217 - * the heap, we can put it above 1TB so it is backed by a 1TB
4218 - * segment. Otherwise the heap will be in the bottom 1TB
4219 - * which always uses 256MB segments and this may result in a
4220 - * performance penalty.
4221 - */
4222 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4223 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4224 -#endif
4225 -
4226 - ret = PAGE_ALIGN(base + brk_rnd());
4227 -
4228 - if (ret < mm->brk)
4229 - return mm->brk;
4230 -
4231 - return ret;
4232 -}
4233 -
4234 -unsigned long randomize_et_dyn(unsigned long base)
4235 -{
4236 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4237 -
4238 - if (ret < base)
4239 - return base;
4240 -
4241 - return ret;
4242 -}
4243 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4244 index 8d8e028..c2aeb50 100644
4245 --- a/arch/powerpc/kernel/ptrace.c
4246 +++ b/arch/powerpc/kernel/ptrace.c
4247 @@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4248 return ret;
4249 }
4250
4251 +#ifdef CONFIG_GRKERNSEC_SETXID
4252 +extern void gr_delayed_cred_worker(void);
4253 +#endif
4254 +
4255 /*
4256 * We must return the syscall number to actually look up in the table.
4257 * This can be -1L to skip running any syscall at all.
4258 @@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4259
4260 secure_computing(regs->gpr[0]);
4261
4262 +#ifdef CONFIG_GRKERNSEC_SETXID
4263 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4264 + gr_delayed_cred_worker();
4265 +#endif
4266 +
4267 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4268 tracehook_report_syscall_entry(regs))
4269 /*
4270 @@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4271 {
4272 int step;
4273
4274 +#ifdef CONFIG_GRKERNSEC_SETXID
4275 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4276 + gr_delayed_cred_worker();
4277 +#endif
4278 +
4279 audit_syscall_exit(regs);
4280
4281 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4282 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4283 index 45eb998..0cb36bc 100644
4284 --- a/arch/powerpc/kernel/signal_32.c
4285 +++ b/arch/powerpc/kernel/signal_32.c
4286 @@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4287 /* Save user registers on the stack */
4288 frame = &rt_sf->uc.uc_mcontext;
4289 addr = frame;
4290 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4291 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4292 if (save_user_regs(regs, frame, 0, 1))
4293 goto badframe;
4294 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4295 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4296 index 2692efd..6673d2e 100644
4297 --- a/arch/powerpc/kernel/signal_64.c
4298 +++ b/arch/powerpc/kernel/signal_64.c
4299 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4300 current->thread.fpscr.val = 0;
4301
4302 /* Set up to return from userspace. */
4303 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4304 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4305 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4306 } else {
4307 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4308 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4309 index 1589723..cefe690 100644
4310 --- a/arch/powerpc/kernel/traps.c
4311 +++ b/arch/powerpc/kernel/traps.c
4312 @@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4313 return flags;
4314 }
4315
4316 +extern void gr_handle_kernel_exploit(void);
4317 +
4318 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4319 int signr)
4320 {
4321 @@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4322 panic("Fatal exception in interrupt");
4323 if (panic_on_oops)
4324 panic("Fatal exception");
4325 +
4326 + gr_handle_kernel_exploit();
4327 +
4328 do_exit(signr);
4329 }
4330
4331 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4332 index 9eb5b9b..e45498a 100644
4333 --- a/arch/powerpc/kernel/vdso.c
4334 +++ b/arch/powerpc/kernel/vdso.c
4335 @@ -34,6 +34,7 @@
4336 #include <asm/firmware.h>
4337 #include <asm/vdso.h>
4338 #include <asm/vdso_datapage.h>
4339 +#include <asm/mman.h>
4340
4341 #include "setup.h"
4342
4343 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4344 vdso_base = VDSO32_MBASE;
4345 #endif
4346
4347 - current->mm->context.vdso_base = 0;
4348 + current->mm->context.vdso_base = ~0UL;
4349
4350 /* vDSO has a problem and was disabled, just don't "enable" it for the
4351 * process
4352 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4353 vdso_base = get_unmapped_area(NULL, vdso_base,
4354 (vdso_pages << PAGE_SHIFT) +
4355 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4356 - 0, 0);
4357 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4358 if (IS_ERR_VALUE(vdso_base)) {
4359 rc = vdso_base;
4360 goto fail_mmapsem;
4361 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4362 index 5eea6f3..5d10396 100644
4363 --- a/arch/powerpc/lib/usercopy_64.c
4364 +++ b/arch/powerpc/lib/usercopy_64.c
4365 @@ -9,22 +9,6 @@
4366 #include <linux/module.h>
4367 #include <asm/uaccess.h>
4368
4369 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4370 -{
4371 - if (likely(access_ok(VERIFY_READ, from, n)))
4372 - n = __copy_from_user(to, from, n);
4373 - else
4374 - memset(to, 0, n);
4375 - return n;
4376 -}
4377 -
4378 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4379 -{
4380 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4381 - n = __copy_to_user(to, from, n);
4382 - return n;
4383 -}
4384 -
4385 unsigned long copy_in_user(void __user *to, const void __user *from,
4386 unsigned long n)
4387 {
4388 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4389 return n;
4390 }
4391
4392 -EXPORT_SYMBOL(copy_from_user);
4393 -EXPORT_SYMBOL(copy_to_user);
4394 EXPORT_SYMBOL(copy_in_user);
4395
4396 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4397 index 08ffcf5..a0ab912 100644
4398 --- a/arch/powerpc/mm/fault.c
4399 +++ b/arch/powerpc/mm/fault.c
4400 @@ -32,6 +32,10 @@
4401 #include <linux/perf_event.h>
4402 #include <linux/magic.h>
4403 #include <linux/ratelimit.h>
4404 +#include <linux/slab.h>
4405 +#include <linux/pagemap.h>
4406 +#include <linux/compiler.h>
4407 +#include <linux/unistd.h>
4408
4409 #include <asm/firmware.h>
4410 #include <asm/page.h>
4411 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4412 }
4413 #endif
4414
4415 +#ifdef CONFIG_PAX_PAGEEXEC
4416 +/*
4417 + * PaX: decide what to do with offenders (regs->nip = fault address)
4418 + *
4419 + * returns 1 when task should be killed
4420 + */
4421 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4422 +{
4423 + return 1;
4424 +}
4425 +
4426 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4427 +{
4428 + unsigned long i;
4429 +
4430 + printk(KERN_ERR "PAX: bytes at PC: ");
4431 + for (i = 0; i < 5; i++) {
4432 + unsigned int c;
4433 + if (get_user(c, (unsigned int __user *)pc+i))
4434 + printk(KERN_CONT "???????? ");
4435 + else
4436 + printk(KERN_CONT "%08x ", c);
4437 + }
4438 + printk("\n");
4439 +}
4440 +#endif
4441 +
4442 /*
4443 * Check whether the instruction at regs->nip is a store using
4444 * an update addressing form which will update r1.
4445 @@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4446 * indicate errors in DSISR but can validly be set in SRR1.
4447 */
4448 if (trap == 0x400)
4449 - error_code &= 0x48200000;
4450 + error_code &= 0x58200000;
4451 else
4452 is_write = error_code & DSISR_ISSTORE;
4453 #else
4454 @@ -366,7 +397,7 @@ good_area:
4455 * "undefined". Of those that can be set, this is the only
4456 * one which seems bad.
4457 */
4458 - if (error_code & 0x10000000)
4459 + if (error_code & DSISR_GUARDED)
4460 /* Guarded storage error. */
4461 goto bad_area;
4462 #endif /* CONFIG_8xx */
4463 @@ -381,7 +412,7 @@ good_area:
4464 * processors use the same I/D cache coherency mechanism
4465 * as embedded.
4466 */
4467 - if (error_code & DSISR_PROTFAULT)
4468 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4469 goto bad_area;
4470 #endif /* CONFIG_PPC_STD_MMU */
4471
4472 @@ -463,6 +494,23 @@ bad_area:
4473 bad_area_nosemaphore:
4474 /* User mode accesses cause a SIGSEGV */
4475 if (user_mode(regs)) {
4476 +
4477 +#ifdef CONFIG_PAX_PAGEEXEC
4478 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4479 +#ifdef CONFIG_PPC_STD_MMU
4480 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4481 +#else
4482 + if (is_exec && regs->nip == address) {
4483 +#endif
4484 + switch (pax_handle_fetch_fault(regs)) {
4485 + }
4486 +
4487 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4488 + do_group_exit(SIGKILL);
4489 + }
4490 + }
4491 +#endif
4492 +
4493 _exception(SIGSEGV, regs, code, address);
4494 return 0;
4495 }
4496 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4497 index 67a42ed..1c7210c 100644
4498 --- a/arch/powerpc/mm/mmap_64.c
4499 +++ b/arch/powerpc/mm/mmap_64.c
4500 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4501 */
4502 if (mmap_is_legacy()) {
4503 mm->mmap_base = TASK_UNMAPPED_BASE;
4504 +
4505 +#ifdef CONFIG_PAX_RANDMMAP
4506 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4507 + mm->mmap_base += mm->delta_mmap;
4508 +#endif
4509 +
4510 mm->get_unmapped_area = arch_get_unmapped_area;
4511 mm->unmap_area = arch_unmap_area;
4512 } else {
4513 mm->mmap_base = mmap_base();
4514 +
4515 +#ifdef CONFIG_PAX_RANDMMAP
4516 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4517 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4518 +#endif
4519 +
4520 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4521 mm->unmap_area = arch_unmap_area_topdown;
4522 }
4523 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4524 index 73709f7..6b90313 100644
4525 --- a/arch/powerpc/mm/slice.c
4526 +++ b/arch/powerpc/mm/slice.c
4527 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4528 if ((mm->task_size - len) < addr)
4529 return 0;
4530 vma = find_vma(mm, addr);
4531 - return (!vma || (addr + len) <= vma->vm_start);
4532 + return check_heap_stack_gap(vma, addr, len);
4533 }
4534
4535 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4536 @@ -256,7 +256,7 @@ full_search:
4537 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4538 continue;
4539 }
4540 - if (!vma || addr + len <= vma->vm_start) {
4541 + if (check_heap_stack_gap(vma, addr, len)) {
4542 /*
4543 * Remember the place where we stopped the search:
4544 */
4545 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4546 }
4547 }
4548
4549 - addr = mm->mmap_base;
4550 - while (addr > len) {
4551 + if (mm->mmap_base < len)
4552 + addr = -ENOMEM;
4553 + else
4554 + addr = mm->mmap_base - len;
4555 +
4556 + while (!IS_ERR_VALUE(addr)) {
4557 /* Go down by chunk size */
4558 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4559 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4560
4561 /* Check for hit with different page size */
4562 mask = slice_range_to_mask(addr, len);
4563 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4564 * return with success:
4565 */
4566 vma = find_vma(mm, addr);
4567 - if (!vma || (addr + len) <= vma->vm_start) {
4568 + if (check_heap_stack_gap(vma, addr, len)) {
4569 /* remember the address as a hint for next time */
4570 if (use_cache)
4571 mm->free_area_cache = addr;
4572 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4573 mm->cached_hole_size = vma->vm_start - addr;
4574
4575 /* try just below the current vma->vm_start */
4576 - addr = vma->vm_start;
4577 + addr = skip_heap_stack_gap(vma, len);
4578 }
4579
4580 /*
4581 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4582 if (fixed && addr > (mm->task_size - len))
4583 return -EINVAL;
4584
4585 +#ifdef CONFIG_PAX_RANDMMAP
4586 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4587 + addr = 0;
4588 +#endif
4589 +
4590 /* If hint, make sure it matches our alignment restrictions */
4591 if (!fixed && addr) {
4592 addr = _ALIGN_UP(addr, 1ul << pshift);
4593 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4594 index 748347b..81bc6c7 100644
4595 --- a/arch/s390/include/asm/atomic.h
4596 +++ b/arch/s390/include/asm/atomic.h
4597 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4598 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4599 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4600
4601 +#define atomic64_read_unchecked(v) atomic64_read(v)
4602 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4603 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4604 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4605 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4606 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4607 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4608 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4609 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4610 +
4611 #define smp_mb__before_atomic_dec() smp_mb()
4612 #define smp_mb__after_atomic_dec() smp_mb()
4613 #define smp_mb__before_atomic_inc() smp_mb()
4614 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4615 index 2a30d5a..5e5586f 100644
4616 --- a/arch/s390/include/asm/cache.h
4617 +++ b/arch/s390/include/asm/cache.h
4618 @@ -11,8 +11,10 @@
4619 #ifndef __ARCH_S390_CACHE_H
4620 #define __ARCH_S390_CACHE_H
4621
4622 -#define L1_CACHE_BYTES 256
4623 +#include <linux/const.h>
4624 +
4625 #define L1_CACHE_SHIFT 8
4626 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4627 #define NET_SKB_PAD 32
4628
4629 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4630 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4631 index c4ee39f..352881b 100644
4632 --- a/arch/s390/include/asm/elf.h
4633 +++ b/arch/s390/include/asm/elf.h
4634 @@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4635 the loader. We need to make sure that it is out of the way of the program
4636 that it will "exec", and that there is sufficient room for the brk. */
4637
4638 -extern unsigned long randomize_et_dyn(unsigned long base);
4639 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4640 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4641 +
4642 +#ifdef CONFIG_PAX_ASLR
4643 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4644 +
4645 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4646 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4647 +#endif
4648
4649 /* This yields a mask that user programs can use to figure out what
4650 instruction set this CPU supports. */
4651 @@ -210,7 +216,4 @@ struct linux_binprm;
4652 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4653 int arch_setup_additional_pages(struct linux_binprm *, int);
4654
4655 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4656 -#define arch_randomize_brk arch_randomize_brk
4657 -
4658 #endif
4659 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4660 index c4a93d6..4d2a9b4 100644
4661 --- a/arch/s390/include/asm/exec.h
4662 +++ b/arch/s390/include/asm/exec.h
4663 @@ -7,6 +7,6 @@
4664 #ifndef __ASM_EXEC_H
4665 #define __ASM_EXEC_H
4666
4667 -extern unsigned long arch_align_stack(unsigned long sp);
4668 +#define arch_align_stack(x) ((x) & ~0xfUL)
4669
4670 #endif /* __ASM_EXEC_H */
4671 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4672 index 8f2cada..1cddd55 100644
4673 --- a/arch/s390/include/asm/uaccess.h
4674 +++ b/arch/s390/include/asm/uaccess.h
4675 @@ -236,6 +236,10 @@ static inline unsigned long __must_check
4676 copy_to_user(void __user *to, const void *from, unsigned long n)
4677 {
4678 might_fault();
4679 +
4680 + if ((long)n < 0)
4681 + return n;
4682 +
4683 if (access_ok(VERIFY_WRITE, to, n))
4684 n = __copy_to_user(to, from, n);
4685 return n;
4686 @@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4687 static inline unsigned long __must_check
4688 __copy_from_user(void *to, const void __user *from, unsigned long n)
4689 {
4690 + if ((long)n < 0)
4691 + return n;
4692 +
4693 if (__builtin_constant_p(n) && (n <= 256))
4694 return uaccess.copy_from_user_small(n, from, to);
4695 else
4696 @@ -295,6 +302,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4697 unsigned int sz = __compiletime_object_size(to);
4698
4699 might_fault();
4700 +
4701 + if ((long)n < 0)
4702 + return n;
4703 +
4704 if (unlikely(sz != -1 && sz < n)) {
4705 copy_from_user_overflow();
4706 return n;
4707 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4708 index dfcb343..eda788a 100644
4709 --- a/arch/s390/kernel/module.c
4710 +++ b/arch/s390/kernel/module.c
4711 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4712
4713 /* Increase core size by size of got & plt and set start
4714 offsets for got and plt. */
4715 - me->core_size = ALIGN(me->core_size, 4);
4716 - me->arch.got_offset = me->core_size;
4717 - me->core_size += me->arch.got_size;
4718 - me->arch.plt_offset = me->core_size;
4719 - me->core_size += me->arch.plt_size;
4720 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4721 + me->arch.got_offset = me->core_size_rw;
4722 + me->core_size_rw += me->arch.got_size;
4723 + me->arch.plt_offset = me->core_size_rx;
4724 + me->core_size_rx += me->arch.plt_size;
4725 return 0;
4726 }
4727
4728 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4729 if (info->got_initialized == 0) {
4730 Elf_Addr *gotent;
4731
4732 - gotent = me->module_core + me->arch.got_offset +
4733 + gotent = me->module_core_rw + me->arch.got_offset +
4734 info->got_offset;
4735 *gotent = val;
4736 info->got_initialized = 1;
4737 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4738 else if (r_type == R_390_GOTENT ||
4739 r_type == R_390_GOTPLTENT)
4740 *(unsigned int *) loc =
4741 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4742 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4743 else if (r_type == R_390_GOT64 ||
4744 r_type == R_390_GOTPLT64)
4745 *(unsigned long *) loc = val;
4746 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4747 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4748 if (info->plt_initialized == 0) {
4749 unsigned int *ip;
4750 - ip = me->module_core + me->arch.plt_offset +
4751 + ip = me->module_core_rx + me->arch.plt_offset +
4752 info->plt_offset;
4753 #ifndef CONFIG_64BIT
4754 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4755 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4756 val - loc + 0xffffUL < 0x1ffffeUL) ||
4757 (r_type == R_390_PLT32DBL &&
4758 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4759 - val = (Elf_Addr) me->module_core +
4760 + val = (Elf_Addr) me->module_core_rx +
4761 me->arch.plt_offset +
4762 info->plt_offset;
4763 val += rela->r_addend - loc;
4764 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4765 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4766 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4767 val = val + rela->r_addend -
4768 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4769 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4770 if (r_type == R_390_GOTOFF16)
4771 *(unsigned short *) loc = val;
4772 else if (r_type == R_390_GOTOFF32)
4773 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4774 break;
4775 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4776 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4777 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4778 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4779 rela->r_addend - loc;
4780 if (r_type == R_390_GOTPC)
4781 *(unsigned int *) loc = val;
4782 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4783 index 60055ce..ee4b252 100644
4784 --- a/arch/s390/kernel/process.c
4785 +++ b/arch/s390/kernel/process.c
4786 @@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4787 }
4788 return 0;
4789 }
4790 -
4791 -unsigned long arch_align_stack(unsigned long sp)
4792 -{
4793 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4794 - sp -= get_random_int() & ~PAGE_MASK;
4795 - return sp & ~0xf;
4796 -}
4797 -
4798 -static inline unsigned long brk_rnd(void)
4799 -{
4800 - /* 8MB for 32bit, 1GB for 64bit */
4801 - if (is_32bit_task())
4802 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4803 - else
4804 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4805 -}
4806 -
4807 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4808 -{
4809 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4810 -
4811 - if (ret < mm->brk)
4812 - return mm->brk;
4813 - return ret;
4814 -}
4815 -
4816 -unsigned long randomize_et_dyn(unsigned long base)
4817 -{
4818 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4819 -
4820 - if (!(current->flags & PF_RANDOMIZE))
4821 - return base;
4822 - if (ret < base)
4823 - return base;
4824 - return ret;
4825 -}
4826 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4827 index 2857c48..d047481 100644
4828 --- a/arch/s390/mm/mmap.c
4829 +++ b/arch/s390/mm/mmap.c
4830 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4831 */
4832 if (mmap_is_legacy()) {
4833 mm->mmap_base = TASK_UNMAPPED_BASE;
4834 +
4835 +#ifdef CONFIG_PAX_RANDMMAP
4836 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4837 + mm->mmap_base += mm->delta_mmap;
4838 +#endif
4839 +
4840 mm->get_unmapped_area = arch_get_unmapped_area;
4841 mm->unmap_area = arch_unmap_area;
4842 } else {
4843 mm->mmap_base = mmap_base();
4844 +
4845 +#ifdef CONFIG_PAX_RANDMMAP
4846 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4847 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4848 +#endif
4849 +
4850 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4851 mm->unmap_area = arch_unmap_area_topdown;
4852 }
4853 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4854 */
4855 if (mmap_is_legacy()) {
4856 mm->mmap_base = TASK_UNMAPPED_BASE;
4857 +
4858 +#ifdef CONFIG_PAX_RANDMMAP
4859 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4860 + mm->mmap_base += mm->delta_mmap;
4861 +#endif
4862 +
4863 mm->get_unmapped_area = s390_get_unmapped_area;
4864 mm->unmap_area = arch_unmap_area;
4865 } else {
4866 mm->mmap_base = mmap_base();
4867 +
4868 +#ifdef CONFIG_PAX_RANDMMAP
4869 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4870 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4871 +#endif
4872 +
4873 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4874 mm->unmap_area = arch_unmap_area_topdown;
4875 }
4876 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4877 index ae3d59f..f65f075 100644
4878 --- a/arch/score/include/asm/cache.h
4879 +++ b/arch/score/include/asm/cache.h
4880 @@ -1,7 +1,9 @@
4881 #ifndef _ASM_SCORE_CACHE_H
4882 #define _ASM_SCORE_CACHE_H
4883
4884 +#include <linux/const.h>
4885 +
4886 #define L1_CACHE_SHIFT 4
4887 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4888 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4889
4890 #endif /* _ASM_SCORE_CACHE_H */
4891 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4892 index f9f3cd5..58ff438 100644
4893 --- a/arch/score/include/asm/exec.h
4894 +++ b/arch/score/include/asm/exec.h
4895 @@ -1,6 +1,6 @@
4896 #ifndef _ASM_SCORE_EXEC_H
4897 #define _ASM_SCORE_EXEC_H
4898
4899 -extern unsigned long arch_align_stack(unsigned long sp);
4900 +#define arch_align_stack(x) (x)
4901
4902 #endif /* _ASM_SCORE_EXEC_H */
4903 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4904 index 2707023..1c2a3b7 100644
4905 --- a/arch/score/kernel/process.c
4906 +++ b/arch/score/kernel/process.c
4907 @@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
4908
4909 return task_pt_regs(task)->cp0_epc;
4910 }
4911 -
4912 -unsigned long arch_align_stack(unsigned long sp)
4913 -{
4914 - return sp;
4915 -}
4916 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4917 index ef9e555..331bd29 100644
4918 --- a/arch/sh/include/asm/cache.h
4919 +++ b/arch/sh/include/asm/cache.h
4920 @@ -9,10 +9,11 @@
4921 #define __ASM_SH_CACHE_H
4922 #ifdef __KERNEL__
4923
4924 +#include <linux/const.h>
4925 #include <linux/init.h>
4926 #include <cpu/cache.h>
4927
4928 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4929 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4930
4931 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4932
4933 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4934 index afeb710..d1d1289 100644
4935 --- a/arch/sh/mm/mmap.c
4936 +++ b/arch/sh/mm/mmap.c
4937 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4938 addr = PAGE_ALIGN(addr);
4939
4940 vma = find_vma(mm, addr);
4941 - if (TASK_SIZE - len >= addr &&
4942 - (!vma || addr + len <= vma->vm_start))
4943 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4944 return addr;
4945 }
4946
4947 @@ -106,7 +105,7 @@ full_search:
4948 }
4949 return -ENOMEM;
4950 }
4951 - if (likely(!vma || addr + len <= vma->vm_start)) {
4952 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4953 /*
4954 * Remember the place where we stopped the search:
4955 */
4956 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4957 addr = PAGE_ALIGN(addr);
4958
4959 vma = find_vma(mm, addr);
4960 - if (TASK_SIZE - len >= addr &&
4961 - (!vma || addr + len <= vma->vm_start))
4962 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4963 return addr;
4964 }
4965
4966 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4967 /* make sure it can fit in the remaining address space */
4968 if (likely(addr > len)) {
4969 vma = find_vma(mm, addr-len);
4970 - if (!vma || addr <= vma->vm_start) {
4971 + if (check_heap_stack_gap(vma, addr - len, len)) {
4972 /* remember the address as a hint for next time */
4973 return (mm->free_area_cache = addr-len);
4974 }
4975 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4976 if (unlikely(mm->mmap_base < len))
4977 goto bottomup;
4978
4979 - addr = mm->mmap_base-len;
4980 - if (do_colour_align)
4981 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4982 + addr = mm->mmap_base - len;
4983
4984 do {
4985 + if (do_colour_align)
4986 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4987 /*
4988 * Lookup failure means no vma is above this address,
4989 * else if new region fits below vma->vm_start,
4990 * return with success:
4991 */
4992 vma = find_vma(mm, addr);
4993 - if (likely(!vma || addr+len <= vma->vm_start)) {
4994 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4995 /* remember the address as a hint for next time */
4996 return (mm->free_area_cache = addr);
4997 }
4998 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4999 mm->cached_hole_size = vma->vm_start - addr;
5000
5001 /* try just below the current vma->vm_start */
5002 - addr = vma->vm_start-len;
5003 - if (do_colour_align)
5004 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5005 - } while (likely(len < vma->vm_start));
5006 + addr = skip_heap_stack_gap(vma, len);
5007 + } while (!IS_ERR_VALUE(addr));
5008
5009 bottomup:
5010 /*
5011 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5012 index eddcfb3..b117d90 100644
5013 --- a/arch/sparc/Makefile
5014 +++ b/arch/sparc/Makefile
5015 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5016 # Export what is needed by arch/sparc/boot/Makefile
5017 export VMLINUX_INIT VMLINUX_MAIN
5018 VMLINUX_INIT := $(head-y) $(init-y)
5019 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5020 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5021 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5022 VMLINUX_MAIN += $(drivers-y) $(net-y)
5023
5024 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5025 index ce35a1c..2e7b8f9 100644
5026 --- a/arch/sparc/include/asm/atomic_64.h
5027 +++ b/arch/sparc/include/asm/atomic_64.h
5028 @@ -14,18 +14,40 @@
5029 #define ATOMIC64_INIT(i) { (i) }
5030
5031 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5032 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5033 +{
5034 + return v->counter;
5035 +}
5036 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5037 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5038 +{
5039 + return v->counter;
5040 +}
5041
5042 #define atomic_set(v, i) (((v)->counter) = i)
5043 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5044 +{
5045 + v->counter = i;
5046 +}
5047 #define atomic64_set(v, i) (((v)->counter) = i)
5048 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5049 +{
5050 + v->counter = i;
5051 +}
5052
5053 extern void atomic_add(int, atomic_t *);
5054 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5055 extern void atomic64_add(long, atomic64_t *);
5056 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5057 extern void atomic_sub(int, atomic_t *);
5058 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5059 extern void atomic64_sub(long, atomic64_t *);
5060 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5061
5062 extern int atomic_add_ret(int, atomic_t *);
5063 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5064 extern long atomic64_add_ret(long, atomic64_t *);
5065 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5066 extern int atomic_sub_ret(int, atomic_t *);
5067 extern long atomic64_sub_ret(long, atomic64_t *);
5068
5069 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5070 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5071
5072 #define atomic_inc_return(v) atomic_add_ret(1, v)
5073 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5074 +{
5075 + return atomic_add_ret_unchecked(1, v);
5076 +}
5077 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5078 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5079 +{
5080 + return atomic64_add_ret_unchecked(1, v);
5081 +}
5082
5083 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5084 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5085
5086 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5087 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5088 +{
5089 + return atomic_add_ret_unchecked(i, v);
5090 +}
5091 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5092 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5093 +{
5094 + return atomic64_add_ret_unchecked(i, v);
5095 +}
5096
5097 /*
5098 * atomic_inc_and_test - increment and test
5099 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5100 * other cases.
5101 */
5102 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5103 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5104 +{
5105 + return atomic_inc_return_unchecked(v) == 0;
5106 +}
5107 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5108
5109 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5110 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5111 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5112
5113 #define atomic_inc(v) atomic_add(1, v)
5114 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5115 +{
5116 + atomic_add_unchecked(1, v);
5117 +}
5118 #define atomic64_inc(v) atomic64_add(1, v)
5119 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5120 +{
5121 + atomic64_add_unchecked(1, v);
5122 +}
5123
5124 #define atomic_dec(v) atomic_sub(1, v)
5125 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5126 +{
5127 + atomic_sub_unchecked(1, v);
5128 +}
5129 #define atomic64_dec(v) atomic64_sub(1, v)
5130 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5131 +{
5132 + atomic64_sub_unchecked(1, v);
5133 +}
5134
5135 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5136 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5137
5138 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5139 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5140 +{
5141 + return cmpxchg(&v->counter, old, new);
5142 +}
5143 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5144 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5145 +{
5146 + return xchg(&v->counter, new);
5147 +}
5148
5149 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5150 {
5151 - int c, old;
5152 + int c, old, new;
5153 c = atomic_read(v);
5154 for (;;) {
5155 - if (unlikely(c == (u)))
5156 + if (unlikely(c == u))
5157 break;
5158 - old = atomic_cmpxchg((v), c, c + (a));
5159 +
5160 + asm volatile("addcc %2, %0, %0\n"
5161 +
5162 +#ifdef CONFIG_PAX_REFCOUNT
5163 + "tvs %%icc, 6\n"
5164 +#endif
5165 +
5166 + : "=r" (new)
5167 + : "0" (c), "ir" (a)
5168 + : "cc");
5169 +
5170 + old = atomic_cmpxchg(v, c, new);
5171 if (likely(old == c))
5172 break;
5173 c = old;
5174 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5175 #define atomic64_cmpxchg(v, o, n) \
5176 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5177 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5178 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5179 +{
5180 + return xchg(&v->counter, new);
5181 +}
5182
5183 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5184 {
5185 - long c, old;
5186 + long c, old, new;
5187 c = atomic64_read(v);
5188 for (;;) {
5189 - if (unlikely(c == (u)))
5190 + if (unlikely(c == u))
5191 break;
5192 - old = atomic64_cmpxchg((v), c, c + (a));
5193 +
5194 + asm volatile("addcc %2, %0, %0\n"
5195 +
5196 +#ifdef CONFIG_PAX_REFCOUNT
5197 + "tvs %%xcc, 6\n"
5198 +#endif
5199 +
5200 + : "=r" (new)
5201 + : "0" (c), "ir" (a)
5202 + : "cc");
5203 +
5204 + old = atomic64_cmpxchg(v, c, new);
5205 if (likely(old == c))
5206 break;
5207 c = old;
5208 }
5209 - return c != (u);
5210 + return c != u;
5211 }
5212
5213 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5214 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5215 index 69358b5..9d0d492 100644
5216 --- a/arch/sparc/include/asm/cache.h
5217 +++ b/arch/sparc/include/asm/cache.h
5218 @@ -7,10 +7,12 @@
5219 #ifndef _SPARC_CACHE_H
5220 #define _SPARC_CACHE_H
5221
5222 +#include <linux/const.h>
5223 +
5224 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5225
5226 #define L1_CACHE_SHIFT 5
5227 -#define L1_CACHE_BYTES 32
5228 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5229
5230 #ifdef CONFIG_SPARC32
5231 #define SMP_CACHE_BYTES_SHIFT 5
5232 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5233 index 4269ca6..e3da77f 100644
5234 --- a/arch/sparc/include/asm/elf_32.h
5235 +++ b/arch/sparc/include/asm/elf_32.h
5236 @@ -114,6 +114,13 @@ typedef struct {
5237
5238 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5239
5240 +#ifdef CONFIG_PAX_ASLR
5241 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5242 +
5243 +#define PAX_DELTA_MMAP_LEN 16
5244 +#define PAX_DELTA_STACK_LEN 16
5245 +#endif
5246 +
5247 /* This yields a mask that user programs can use to figure out what
5248 instruction set this cpu supports. This can NOT be done in userspace
5249 on Sparc. */
5250 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5251 index 7df8b7f..4946269 100644
5252 --- a/arch/sparc/include/asm/elf_64.h
5253 +++ b/arch/sparc/include/asm/elf_64.h
5254 @@ -180,6 +180,13 @@ typedef struct {
5255 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5256 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5257
5258 +#ifdef CONFIG_PAX_ASLR
5259 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5260 +
5261 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5262 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5263 +#endif
5264 +
5265 extern unsigned long sparc64_elf_hwcap;
5266 #define ELF_HWCAP sparc64_elf_hwcap
5267
5268 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5269 index ca2b344..c6084f89 100644
5270 --- a/arch/sparc/include/asm/pgalloc_32.h
5271 +++ b/arch/sparc/include/asm/pgalloc_32.h
5272 @@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5273 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5274 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5275 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5276 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5277
5278 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5279 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5280 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5281 index 40b2d7a..22a665b 100644
5282 --- a/arch/sparc/include/asm/pgalloc_64.h
5283 +++ b/arch/sparc/include/asm/pgalloc_64.h
5284 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5285 }
5286
5287 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5288 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5289
5290 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5291 {
5292 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5293 index 3d71018..48a11c5 100644
5294 --- a/arch/sparc/include/asm/pgtable_32.h
5295 +++ b/arch/sparc/include/asm/pgtable_32.h
5296 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5297 BTFIXUPDEF_INT(page_none)
5298 BTFIXUPDEF_INT(page_copy)
5299 BTFIXUPDEF_INT(page_readonly)
5300 +
5301 +#ifdef CONFIG_PAX_PAGEEXEC
5302 +BTFIXUPDEF_INT(page_shared_noexec)
5303 +BTFIXUPDEF_INT(page_copy_noexec)
5304 +BTFIXUPDEF_INT(page_readonly_noexec)
5305 +#endif
5306 +
5307 BTFIXUPDEF_INT(page_kernel)
5308
5309 #define PMD_SHIFT SUN4C_PMD_SHIFT
5310 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5311 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5312 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5313
5314 +#ifdef CONFIG_PAX_PAGEEXEC
5315 +extern pgprot_t PAGE_SHARED_NOEXEC;
5316 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5317 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5318 +#else
5319 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5320 +# define PAGE_COPY_NOEXEC PAGE_COPY
5321 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5322 +#endif
5323 +
5324 extern unsigned long page_kernel;
5325
5326 #ifdef MODULE
5327 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5328 index f6ae2b2..b03ffc7 100644
5329 --- a/arch/sparc/include/asm/pgtsrmmu.h
5330 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5331 @@ -115,6 +115,13 @@
5332 SRMMU_EXEC | SRMMU_REF)
5333 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5334 SRMMU_EXEC | SRMMU_REF)
5335 +
5336 +#ifdef CONFIG_PAX_PAGEEXEC
5337 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5338 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5339 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5340 +#endif
5341 +
5342 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5343 SRMMU_DIRTY | SRMMU_REF)
5344
5345 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5346 index 9689176..63c18ea 100644
5347 --- a/arch/sparc/include/asm/spinlock_64.h
5348 +++ b/arch/sparc/include/asm/spinlock_64.h
5349 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5350
5351 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5352
5353 -static void inline arch_read_lock(arch_rwlock_t *lock)
5354 +static inline void arch_read_lock(arch_rwlock_t *lock)
5355 {
5356 unsigned long tmp1, tmp2;
5357
5358 __asm__ __volatile__ (
5359 "1: ldsw [%2], %0\n"
5360 " brlz,pn %0, 2f\n"
5361 -"4: add %0, 1, %1\n"
5362 +"4: addcc %0, 1, %1\n"
5363 +
5364 +#ifdef CONFIG_PAX_REFCOUNT
5365 +" tvs %%icc, 6\n"
5366 +#endif
5367 +
5368 " cas [%2], %0, %1\n"
5369 " cmp %0, %1\n"
5370 " bne,pn %%icc, 1b\n"
5371 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5372 " .previous"
5373 : "=&r" (tmp1), "=&r" (tmp2)
5374 : "r" (lock)
5375 - : "memory");
5376 + : "memory", "cc");
5377 }
5378
5379 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5380 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5381 {
5382 int tmp1, tmp2;
5383
5384 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5385 "1: ldsw [%2], %0\n"
5386 " brlz,a,pn %0, 2f\n"
5387 " mov 0, %0\n"
5388 -" add %0, 1, %1\n"
5389 +" addcc %0, 1, %1\n"
5390 +
5391 +#ifdef CONFIG_PAX_REFCOUNT
5392 +" tvs %%icc, 6\n"
5393 +#endif
5394 +
5395 " cas [%2], %0, %1\n"
5396 " cmp %0, %1\n"
5397 " bne,pn %%icc, 1b\n"
5398 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5399 return tmp1;
5400 }
5401
5402 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5403 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5404 {
5405 unsigned long tmp1, tmp2;
5406
5407 __asm__ __volatile__(
5408 "1: lduw [%2], %0\n"
5409 -" sub %0, 1, %1\n"
5410 +" subcc %0, 1, %1\n"
5411 +
5412 +#ifdef CONFIG_PAX_REFCOUNT
5413 +" tvs %%icc, 6\n"
5414 +#endif
5415 +
5416 " cas [%2], %0, %1\n"
5417 " cmp %0, %1\n"
5418 " bne,pn %%xcc, 1b\n"
5419 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5420 : "memory");
5421 }
5422
5423 -static void inline arch_write_lock(arch_rwlock_t *lock)
5424 +static inline void arch_write_lock(arch_rwlock_t *lock)
5425 {
5426 unsigned long mask, tmp1, tmp2;
5427
5428 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5429 : "memory");
5430 }
5431
5432 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5433 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5434 {
5435 __asm__ __volatile__(
5436 " stw %%g0, [%0]"
5437 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5438 : "memory");
5439 }
5440
5441 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5442 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5443 {
5444 unsigned long mask, tmp1, tmp2, result;
5445
5446 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5447 index c2a1080..21ed218 100644
5448 --- a/arch/sparc/include/asm/thread_info_32.h
5449 +++ b/arch/sparc/include/asm/thread_info_32.h
5450 @@ -50,6 +50,8 @@ struct thread_info {
5451 unsigned long w_saved;
5452
5453 struct restart_block restart_block;
5454 +
5455 + unsigned long lowest_stack;
5456 };
5457
5458 /*
5459 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5460 index 01d057f..13a7d2f 100644
5461 --- a/arch/sparc/include/asm/thread_info_64.h
5462 +++ b/arch/sparc/include/asm/thread_info_64.h
5463 @@ -63,6 +63,8 @@ struct thread_info {
5464 struct pt_regs *kern_una_regs;
5465 unsigned int kern_una_insn;
5466
5467 + unsigned long lowest_stack;
5468 +
5469 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5470 };
5471
5472 @@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5473 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5474 /* flag bit 6 is available */
5475 #define TIF_32BIT 7 /* 32-bit binary */
5476 -/* flag bit 8 is available */
5477 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5478 #define TIF_SECCOMP 9 /* secure computing */
5479 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5480 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5481 +
5482 /* NOTE: Thread flags >= 12 should be ones we have no interest
5483 * in using in assembly, else we can't use the mask as
5484 * an immediate value in instructions such as andcc.
5485 @@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5486 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5487 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5488 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5489 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5490
5491 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5492 _TIF_DO_NOTIFY_RESUME_MASK | \
5493 _TIF_NEED_RESCHED)
5494 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5495
5496 +#define _TIF_WORK_SYSCALL \
5497 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5498 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5499 +
5500 +
5501 /*
5502 * Thread-synchronous status.
5503 *
5504 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5505 index e88fbe5..96b0ce5 100644
5506 --- a/arch/sparc/include/asm/uaccess.h
5507 +++ b/arch/sparc/include/asm/uaccess.h
5508 @@ -1,5 +1,13 @@
5509 #ifndef ___ASM_SPARC_UACCESS_H
5510 #define ___ASM_SPARC_UACCESS_H
5511 +
5512 +#ifdef __KERNEL__
5513 +#ifndef __ASSEMBLY__
5514 +#include <linux/types.h>
5515 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5516 +#endif
5517 +#endif
5518 +
5519 #if defined(__sparc__) && defined(__arch64__)
5520 #include <asm/uaccess_64.h>
5521 #else
5522 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5523 index 8303ac4..07f333d 100644
5524 --- a/arch/sparc/include/asm/uaccess_32.h
5525 +++ b/arch/sparc/include/asm/uaccess_32.h
5526 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5527
5528 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5529 {
5530 - if (n && __access_ok((unsigned long) to, n))
5531 + if ((long)n < 0)
5532 + return n;
5533 +
5534 + if (n && __access_ok((unsigned long) to, n)) {
5535 + if (!__builtin_constant_p(n))
5536 + check_object_size(from, n, true);
5537 return __copy_user(to, (__force void __user *) from, n);
5538 - else
5539 + } else
5540 return n;
5541 }
5542
5543 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5544 {
5545 + if ((long)n < 0)
5546 + return n;
5547 +
5548 + if (!__builtin_constant_p(n))
5549 + check_object_size(from, n, true);
5550 +
5551 return __copy_user(to, (__force void __user *) from, n);
5552 }
5553
5554 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5555 {
5556 - if (n && __access_ok((unsigned long) from, n))
5557 + if ((long)n < 0)
5558 + return n;
5559 +
5560 + if (n && __access_ok((unsigned long) from, n)) {
5561 + if (!__builtin_constant_p(n))
5562 + check_object_size(to, n, false);
5563 return __copy_user((__force void __user *) to, from, n);
5564 - else
5565 + } else
5566 return n;
5567 }
5568
5569 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5570 {
5571 + if ((long)n < 0)
5572 + return n;
5573 +
5574 return __copy_user((__force void __user *) to, from, n);
5575 }
5576
5577 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5578 index a1091afb..380228e 100644
5579 --- a/arch/sparc/include/asm/uaccess_64.h
5580 +++ b/arch/sparc/include/asm/uaccess_64.h
5581 @@ -10,6 +10,7 @@
5582 #include <linux/compiler.h>
5583 #include <linux/string.h>
5584 #include <linux/thread_info.h>
5585 +#include <linux/kernel.h>
5586 #include <asm/asi.h>
5587 #include <asm/spitfire.h>
5588 #include <asm-generic/uaccess-unaligned.h>
5589 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5590 static inline unsigned long __must_check
5591 copy_from_user(void *to, const void __user *from, unsigned long size)
5592 {
5593 - unsigned long ret = ___copy_from_user(to, from, size);
5594 + unsigned long ret;
5595
5596 + if ((long)size < 0 || size > INT_MAX)
5597 + return size;
5598 +
5599 + if (!__builtin_constant_p(size))
5600 + check_object_size(to, size, false);
5601 +
5602 + ret = ___copy_from_user(to, from, size);
5603 if (unlikely(ret))
5604 ret = copy_from_user_fixup(to, from, size);
5605
5606 @@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5607 static inline unsigned long __must_check
5608 copy_to_user(void __user *to, const void *from, unsigned long size)
5609 {
5610 - unsigned long ret = ___copy_to_user(to, from, size);
5611 + unsigned long ret;
5612
5613 + if ((long)size < 0 || size > INT_MAX)
5614 + return size;
5615 +
5616 + if (!__builtin_constant_p(size))
5617 + check_object_size(from, size, true);
5618 +
5619 + ret = ___copy_to_user(to, from, size);
5620 if (unlikely(ret))
5621 ret = copy_to_user_fixup(to, from, size);
5622 return ret;
5623 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5624 index cb85458..e063f17 100644
5625 --- a/arch/sparc/kernel/Makefile
5626 +++ b/arch/sparc/kernel/Makefile
5627 @@ -3,7 +3,7 @@
5628 #
5629
5630 asflags-y := -ansi
5631 -ccflags-y := -Werror
5632 +#ccflags-y := -Werror
5633
5634 extra-y := head_$(BITS).o
5635 extra-y += init_task.o
5636 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5637 index efa0754..74b03fe 100644
5638 --- a/arch/sparc/kernel/process_32.c
5639 +++ b/arch/sparc/kernel/process_32.c
5640 @@ -200,7 +200,7 @@ void __show_backtrace(unsigned long fp)
5641 rw->ins[4], rw->ins[5],
5642 rw->ins[6],
5643 rw->ins[7]);
5644 - printk("%pS\n", (void *) rw->ins[7]);
5645 + printk("%pA\n", (void *) rw->ins[7]);
5646 rw = (struct reg_window32 *) rw->ins[6];
5647 }
5648 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5649 @@ -267,14 +267,14 @@ void show_regs(struct pt_regs *r)
5650
5651 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5652 r->psr, r->pc, r->npc, r->y, print_tainted());
5653 - printk("PC: <%pS>\n", (void *) r->pc);
5654 + printk("PC: <%pA>\n", (void *) r->pc);
5655 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5656 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5657 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5658 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5659 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5660 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5661 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5662 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5663
5664 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5665 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5666 @@ -309,7 +309,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5667 rw = (struct reg_window32 *) fp;
5668 pc = rw->ins[7];
5669 printk("[%08lx : ", pc);
5670 - printk("%pS ] ", (void *) pc);
5671 + printk("%pA ] ", (void *) pc);
5672 fp = rw->ins[6];
5673 } while (++count < 16);
5674 printk("\n");
5675 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5676 index aff0c72..9067b39 100644
5677 --- a/arch/sparc/kernel/process_64.c
5678 +++ b/arch/sparc/kernel/process_64.c
5679 @@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5680 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5681 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5682 if (regs->tstate & TSTATE_PRIV)
5683 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5684 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5685 }
5686
5687 void show_regs(struct pt_regs *regs)
5688 {
5689 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5690 regs->tpc, regs->tnpc, regs->y, print_tainted());
5691 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5692 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5693 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5694 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5695 regs->u_regs[3]);
5696 @@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5697 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5698 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5699 regs->u_regs[15]);
5700 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5701 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5702 show_regwindow(regs);
5703 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5704 }
5705 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5706 ((tp && tp->task) ? tp->task->pid : -1));
5707
5708 if (gp->tstate & TSTATE_PRIV) {
5709 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5710 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5711 (void *) gp->tpc,
5712 (void *) gp->o7,
5713 (void *) gp->i7,
5714 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5715 index 6f97c07..b1300ec 100644
5716 --- a/arch/sparc/kernel/ptrace_64.c
5717 +++ b/arch/sparc/kernel/ptrace_64.c
5718 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5719 return ret;
5720 }
5721
5722 +#ifdef CONFIG_GRKERNSEC_SETXID
5723 +extern void gr_delayed_cred_worker(void);
5724 +#endif
5725 +
5726 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5727 {
5728 int ret = 0;
5729 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5730 /* do the secure computing check first */
5731 secure_computing(regs->u_regs[UREG_G1]);
5732
5733 +#ifdef CONFIG_GRKERNSEC_SETXID
5734 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5735 + gr_delayed_cred_worker();
5736 +#endif
5737 +
5738 if (test_thread_flag(TIF_SYSCALL_TRACE))
5739 ret = tracehook_report_syscall_entry(regs);
5740
5741 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5742
5743 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5744 {
5745 +#ifdef CONFIG_GRKERNSEC_SETXID
5746 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5747 + gr_delayed_cred_worker();
5748 +#endif
5749 +
5750 audit_syscall_exit(regs);
5751
5752 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5753 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5754 index 42b282f..28ce9f2 100644
5755 --- a/arch/sparc/kernel/sys_sparc_32.c
5756 +++ b/arch/sparc/kernel/sys_sparc_32.c
5757 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5758 if (ARCH_SUN4C && len > 0x20000000)
5759 return -ENOMEM;
5760 if (!addr)
5761 - addr = TASK_UNMAPPED_BASE;
5762 + addr = current->mm->mmap_base;
5763
5764 if (flags & MAP_SHARED)
5765 addr = COLOUR_ALIGN(addr);
5766 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5767 }
5768 if (TASK_SIZE - PAGE_SIZE - len < addr)
5769 return -ENOMEM;
5770 - if (!vmm || addr + len <= vmm->vm_start)
5771 + if (check_heap_stack_gap(vmm, addr, len))
5772 return addr;
5773 addr = vmm->vm_end;
5774 if (flags & MAP_SHARED)
5775 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5776 index 3ee51f1..2ba4913 100644
5777 --- a/arch/sparc/kernel/sys_sparc_64.c
5778 +++ b/arch/sparc/kernel/sys_sparc_64.c
5779 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5780 /* We do not accept a shared mapping if it would violate
5781 * cache aliasing constraints.
5782 */
5783 - if ((flags & MAP_SHARED) &&
5784 + if ((filp || (flags & MAP_SHARED)) &&
5785 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5786 return -EINVAL;
5787 return addr;
5788 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5789 if (filp || (flags & MAP_SHARED))
5790 do_color_align = 1;
5791
5792 +#ifdef CONFIG_PAX_RANDMMAP
5793 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5794 +#endif
5795 +
5796 if (addr) {
5797 if (do_color_align)
5798 addr = COLOUR_ALIGN(addr, pgoff);
5799 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5800 addr = PAGE_ALIGN(addr);
5801
5802 vma = find_vma(mm, addr);
5803 - if (task_size - len >= addr &&
5804 - (!vma || addr + len <= vma->vm_start))
5805 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5806 return addr;
5807 }
5808
5809 if (len > mm->cached_hole_size) {
5810 - start_addr = addr = mm->free_area_cache;
5811 + start_addr = addr = mm->free_area_cache;
5812 } else {
5813 - start_addr = addr = TASK_UNMAPPED_BASE;
5814 + start_addr = addr = mm->mmap_base;
5815 mm->cached_hole_size = 0;
5816 }
5817
5818 @@ -174,14 +177,14 @@ full_search:
5819 vma = find_vma(mm, VA_EXCLUDE_END);
5820 }
5821 if (unlikely(task_size < addr)) {
5822 - if (start_addr != TASK_UNMAPPED_BASE) {
5823 - start_addr = addr = TASK_UNMAPPED_BASE;
5824 + if (start_addr != mm->mmap_base) {
5825 + start_addr = addr = mm->mmap_base;
5826 mm->cached_hole_size = 0;
5827 goto full_search;
5828 }
5829 return -ENOMEM;
5830 }
5831 - if (likely(!vma || addr + len <= vma->vm_start)) {
5832 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5833 /*
5834 * Remember the place where we stopped the search:
5835 */
5836 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5837 /* We do not accept a shared mapping if it would violate
5838 * cache aliasing constraints.
5839 */
5840 - if ((flags & MAP_SHARED) &&
5841 + if ((filp || (flags & MAP_SHARED)) &&
5842 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5843 return -EINVAL;
5844 return addr;
5845 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5846 addr = PAGE_ALIGN(addr);
5847
5848 vma = find_vma(mm, addr);
5849 - if (task_size - len >= addr &&
5850 - (!vma || addr + len <= vma->vm_start))
5851 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5852 return addr;
5853 }
5854
5855 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5856 /* make sure it can fit in the remaining address space */
5857 if (likely(addr > len)) {
5858 vma = find_vma(mm, addr-len);
5859 - if (!vma || addr <= vma->vm_start) {
5860 + if (check_heap_stack_gap(vma, addr - len, len)) {
5861 /* remember the address as a hint for next time */
5862 return (mm->free_area_cache = addr-len);
5863 }
5864 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5865 if (unlikely(mm->mmap_base < len))
5866 goto bottomup;
5867
5868 - addr = mm->mmap_base-len;
5869 - if (do_color_align)
5870 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5871 + addr = mm->mmap_base - len;
5872
5873 do {
5874 + if (do_color_align)
5875 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5876 /*
5877 * Lookup failure means no vma is above this address,
5878 * else if new region fits below vma->vm_start,
5879 * return with success:
5880 */
5881 vma = find_vma(mm, addr);
5882 - if (likely(!vma || addr+len <= vma->vm_start)) {
5883 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5884 /* remember the address as a hint for next time */
5885 return (mm->free_area_cache = addr);
5886 }
5887 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5888 mm->cached_hole_size = vma->vm_start - addr;
5889
5890 /* try just below the current vma->vm_start */
5891 - addr = vma->vm_start-len;
5892 - if (do_color_align)
5893 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5894 - } while (likely(len < vma->vm_start));
5895 + addr = skip_heap_stack_gap(vma, len);
5896 + } while (!IS_ERR_VALUE(addr));
5897
5898 bottomup:
5899 /*
5900 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5901 gap == RLIM_INFINITY ||
5902 sysctl_legacy_va_layout) {
5903 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5904 +
5905 +#ifdef CONFIG_PAX_RANDMMAP
5906 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5907 + mm->mmap_base += mm->delta_mmap;
5908 +#endif
5909 +
5910 mm->get_unmapped_area = arch_get_unmapped_area;
5911 mm->unmap_area = arch_unmap_area;
5912 } else {
5913 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5914 gap = (task_size / 6 * 5);
5915
5916 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5917 +
5918 +#ifdef CONFIG_PAX_RANDMMAP
5919 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5920 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5921 +#endif
5922 +
5923 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5924 mm->unmap_area = arch_unmap_area_topdown;
5925 }
5926 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
5927 index 1d7e274..b39c527 100644
5928 --- a/arch/sparc/kernel/syscalls.S
5929 +++ b/arch/sparc/kernel/syscalls.S
5930 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
5931 #endif
5932 .align 32
5933 1: ldx [%g6 + TI_FLAGS], %l5
5934 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5935 + andcc %l5, _TIF_WORK_SYSCALL, %g0
5936 be,pt %icc, rtrap
5937 nop
5938 call syscall_trace_leave
5939 @@ -179,7 +179,7 @@ linux_sparc_syscall32:
5940
5941 srl %i5, 0, %o5 ! IEU1
5942 srl %i2, 0, %o2 ! IEU0 Group
5943 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5944 + andcc %l0, _TIF_WORK_SYSCALL, %g0
5945 bne,pn %icc, linux_syscall_trace32 ! CTI
5946 mov %i0, %l5 ! IEU1
5947 call %l7 ! CTI Group brk forced
5948 @@ -202,7 +202,7 @@ linux_sparc_syscall:
5949
5950 mov %i3, %o3 ! IEU1
5951 mov %i4, %o4 ! IEU0 Group
5952 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5953 + andcc %l0, _TIF_WORK_SYSCALL, %g0
5954 bne,pn %icc, linux_syscall_trace ! CTI Group
5955 mov %i0, %l5 ! IEU0
5956 2: call %l7 ! CTI Group brk forced
5957 @@ -226,7 +226,7 @@ ret_sys_call:
5958
5959 cmp %o0, -ERESTART_RESTARTBLOCK
5960 bgeu,pn %xcc, 1f
5961 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
5962 + andcc %l0, _TIF_WORK_SYSCALL, %l6
5963 80:
5964 /* System call success, clear Carry condition code. */
5965 andn %g3, %g2, %g3
5966 @@ -241,7 +241,7 @@ ret_sys_call:
5967 /* System call failure, set Carry condition code.
5968 * Also, get abs(errno) to return to the process.
5969 */
5970 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
5971 + andcc %l0, _TIF_WORK_SYSCALL, %l6
5972 sub %g0, %o0, %o0
5973 or %g3, %g2, %g3
5974 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
5975 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5976 index d2de213..6b22bc3 100644
5977 --- a/arch/sparc/kernel/traps_32.c
5978 +++ b/arch/sparc/kernel/traps_32.c
5979 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5980 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5981 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5982
5983 +extern void gr_handle_kernel_exploit(void);
5984 +
5985 void die_if_kernel(char *str, struct pt_regs *regs)
5986 {
5987 static int die_counter;
5988 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5989 count++ < 30 &&
5990 (((unsigned long) rw) >= PAGE_OFFSET) &&
5991 !(((unsigned long) rw) & 0x7)) {
5992 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5993 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5994 (void *) rw->ins[7]);
5995 rw = (struct reg_window32 *)rw->ins[6];
5996 }
5997 }
5998 printk("Instruction DUMP:");
5999 instruction_dump ((unsigned long *) regs->pc);
6000 - if(regs->psr & PSR_PS)
6001 + if(regs->psr & PSR_PS) {
6002 + gr_handle_kernel_exploit();
6003 do_exit(SIGKILL);
6004 + }
6005 do_exit(SIGSEGV);
6006 }
6007
6008 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6009 index c72fdf5..743a344 100644
6010 --- a/arch/sparc/kernel/traps_64.c
6011 +++ b/arch/sparc/kernel/traps_64.c
6012 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6013 i + 1,
6014 p->trapstack[i].tstate, p->trapstack[i].tpc,
6015 p->trapstack[i].tnpc, p->trapstack[i].tt);
6016 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6017 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6018 }
6019 }
6020
6021 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6022
6023 lvl -= 0x100;
6024 if (regs->tstate & TSTATE_PRIV) {
6025 +
6026 +#ifdef CONFIG_PAX_REFCOUNT
6027 + if (lvl == 6)
6028 + pax_report_refcount_overflow(regs);
6029 +#endif
6030 +
6031 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6032 die_if_kernel(buffer, regs);
6033 }
6034 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6035 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6036 {
6037 char buffer[32];
6038 -
6039 +
6040 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6041 0, lvl, SIGTRAP) == NOTIFY_STOP)
6042 return;
6043
6044 +#ifdef CONFIG_PAX_REFCOUNT
6045 + if (lvl == 6)
6046 + pax_report_refcount_overflow(regs);
6047 +#endif
6048 +
6049 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6050
6051 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6052 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6053 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6054 printk("%s" "ERROR(%d): ",
6055 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6056 - printk("TPC<%pS>\n", (void *) regs->tpc);
6057 + printk("TPC<%pA>\n", (void *) regs->tpc);
6058 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6059 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6060 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6061 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6062 smp_processor_id(),
6063 (type & 0x1) ? 'I' : 'D',
6064 regs->tpc);
6065 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6066 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6067 panic("Irrecoverable Cheetah+ parity error.");
6068 }
6069
6070 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6071 smp_processor_id(),
6072 (type & 0x1) ? 'I' : 'D',
6073 regs->tpc);
6074 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6075 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6076 }
6077
6078 struct sun4v_error_entry {
6079 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6080
6081 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6082 regs->tpc, tl);
6083 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6084 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6085 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6086 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6087 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6088 (void *) regs->u_regs[UREG_I7]);
6089 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6090 "pte[%lx] error[%lx]\n",
6091 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6092
6093 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6094 regs->tpc, tl);
6095 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6096 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6097 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6098 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6099 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6100 (void *) regs->u_regs[UREG_I7]);
6101 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6102 "pte[%lx] error[%lx]\n",
6103 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6104 fp = (unsigned long)sf->fp + STACK_BIAS;
6105 }
6106
6107 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6108 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6109 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6110 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6111 int index = tsk->curr_ret_stack;
6112 if (tsk->ret_stack && index >= graph) {
6113 pc = tsk->ret_stack[index - graph].ret;
6114 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6115 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6116 graph++;
6117 }
6118 }
6119 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6120 return (struct reg_window *) (fp + STACK_BIAS);
6121 }
6122
6123 +extern void gr_handle_kernel_exploit(void);
6124 +
6125 void die_if_kernel(char *str, struct pt_regs *regs)
6126 {
6127 static int die_counter;
6128 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6129 while (rw &&
6130 count++ < 30 &&
6131 kstack_valid(tp, (unsigned long) rw)) {
6132 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6133 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6134 (void *) rw->ins[7]);
6135
6136 rw = kernel_stack_up(rw);
6137 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6138 }
6139 user_instruction_dump ((unsigned int __user *) regs->tpc);
6140 }
6141 - if (regs->tstate & TSTATE_PRIV)
6142 + if (regs->tstate & TSTATE_PRIV) {
6143 + gr_handle_kernel_exploit();
6144 do_exit(SIGKILL);
6145 + }
6146 do_exit(SIGSEGV);
6147 }
6148 EXPORT_SYMBOL(die_if_kernel);
6149 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6150 index dae85bc..af1e19d 100644
6151 --- a/arch/sparc/kernel/unaligned_64.c
6152 +++ b/arch/sparc/kernel/unaligned_64.c
6153 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6154 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6155
6156 if (__ratelimit(&ratelimit)) {
6157 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6158 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6159 regs->tpc, (void *) regs->tpc);
6160 }
6161 }
6162 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6163 index a3fc437..fea9957 100644
6164 --- a/arch/sparc/lib/Makefile
6165 +++ b/arch/sparc/lib/Makefile
6166 @@ -2,7 +2,7 @@
6167 #
6168
6169 asflags-y := -ansi -DST_DIV0=0x02
6170 -ccflags-y := -Werror
6171 +#ccflags-y := -Werror
6172
6173 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6174 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6175 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6176 index 59186e0..f747d7a 100644
6177 --- a/arch/sparc/lib/atomic_64.S
6178 +++ b/arch/sparc/lib/atomic_64.S
6179 @@ -18,7 +18,12 @@
6180 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6181 BACKOFF_SETUP(%o2)
6182 1: lduw [%o1], %g1
6183 - add %g1, %o0, %g7
6184 + addcc %g1, %o0, %g7
6185 +
6186 +#ifdef CONFIG_PAX_REFCOUNT
6187 + tvs %icc, 6
6188 +#endif
6189 +
6190 cas [%o1], %g1, %g7
6191 cmp %g1, %g7
6192 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6193 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6194 2: BACKOFF_SPIN(%o2, %o3, 1b)
6195 .size atomic_add, .-atomic_add
6196
6197 + .globl atomic_add_unchecked
6198 + .type atomic_add_unchecked,#function
6199 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6200 + BACKOFF_SETUP(%o2)
6201 +1: lduw [%o1], %g1
6202 + add %g1, %o0, %g7
6203 + cas [%o1], %g1, %g7
6204 + cmp %g1, %g7
6205 + bne,pn %icc, 2f
6206 + nop
6207 + retl
6208 + nop
6209 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6210 + .size atomic_add_unchecked, .-atomic_add_unchecked
6211 +
6212 .globl atomic_sub
6213 .type atomic_sub,#function
6214 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6215 BACKOFF_SETUP(%o2)
6216 1: lduw [%o1], %g1
6217 - sub %g1, %o0, %g7
6218 + subcc %g1, %o0, %g7
6219 +
6220 +#ifdef CONFIG_PAX_REFCOUNT
6221 + tvs %icc, 6
6222 +#endif
6223 +
6224 cas [%o1], %g1, %g7
6225 cmp %g1, %g7
6226 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6227 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6228 2: BACKOFF_SPIN(%o2, %o3, 1b)
6229 .size atomic_sub, .-atomic_sub
6230
6231 + .globl atomic_sub_unchecked
6232 + .type atomic_sub_unchecked,#function
6233 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6234 + BACKOFF_SETUP(%o2)
6235 +1: lduw [%o1], %g1
6236 + sub %g1, %o0, %g7
6237 + cas [%o1], %g1, %g7
6238 + cmp %g1, %g7
6239 + bne,pn %icc, 2f
6240 + nop
6241 + retl
6242 + nop
6243 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6244 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
6245 +
6246 .globl atomic_add_ret
6247 .type atomic_add_ret,#function
6248 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6249 BACKOFF_SETUP(%o2)
6250 1: lduw [%o1], %g1
6251 - add %g1, %o0, %g7
6252 + addcc %g1, %o0, %g7
6253 +
6254 +#ifdef CONFIG_PAX_REFCOUNT
6255 + tvs %icc, 6
6256 +#endif
6257 +
6258 cas [%o1], %g1, %g7
6259 cmp %g1, %g7
6260 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6261 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6262 2: BACKOFF_SPIN(%o2, %o3, 1b)
6263 .size atomic_add_ret, .-atomic_add_ret
6264
6265 + .globl atomic_add_ret_unchecked
6266 + .type atomic_add_ret_unchecked,#function
6267 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6268 + BACKOFF_SETUP(%o2)
6269 +1: lduw [%o1], %g1
6270 + addcc %g1, %o0, %g7
6271 + cas [%o1], %g1, %g7
6272 + cmp %g1, %g7
6273 + bne,pn %icc, 2f
6274 + add %g7, %o0, %g7
6275 + sra %g7, 0, %o0
6276 + retl
6277 + nop
6278 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6279 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6280 +
6281 .globl atomic_sub_ret
6282 .type atomic_sub_ret,#function
6283 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6284 BACKOFF_SETUP(%o2)
6285 1: lduw [%o1], %g1
6286 - sub %g1, %o0, %g7
6287 + subcc %g1, %o0, %g7
6288 +
6289 +#ifdef CONFIG_PAX_REFCOUNT
6290 + tvs %icc, 6
6291 +#endif
6292 +
6293 cas [%o1], %g1, %g7
6294 cmp %g1, %g7
6295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6296 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6297 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6298 BACKOFF_SETUP(%o2)
6299 1: ldx [%o1], %g1
6300 - add %g1, %o0, %g7
6301 + addcc %g1, %o0, %g7
6302 +
6303 +#ifdef CONFIG_PAX_REFCOUNT
6304 + tvs %xcc, 6
6305 +#endif
6306 +
6307 casx [%o1], %g1, %g7
6308 cmp %g1, %g7
6309 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6310 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6311 2: BACKOFF_SPIN(%o2, %o3, 1b)
6312 .size atomic64_add, .-atomic64_add
6313
6314 + .globl atomic64_add_unchecked
6315 + .type atomic64_add_unchecked,#function
6316 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6317 + BACKOFF_SETUP(%o2)
6318 +1: ldx [%o1], %g1
6319 + addcc %g1, %o0, %g7
6320 + casx [%o1], %g1, %g7
6321 + cmp %g1, %g7
6322 + bne,pn %xcc, 2f
6323 + nop
6324 + retl
6325 + nop
6326 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6327 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
6328 +
6329 .globl atomic64_sub
6330 .type atomic64_sub,#function
6331 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6332 BACKOFF_SETUP(%o2)
6333 1: ldx [%o1], %g1
6334 - sub %g1, %o0, %g7
6335 + subcc %g1, %o0, %g7
6336 +
6337 +#ifdef CONFIG_PAX_REFCOUNT
6338 + tvs %xcc, 6
6339 +#endif
6340 +
6341 casx [%o1], %g1, %g7
6342 cmp %g1, %g7
6343 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6344 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6345 2: BACKOFF_SPIN(%o2, %o3, 1b)
6346 .size atomic64_sub, .-atomic64_sub
6347
6348 + .globl atomic64_sub_unchecked
6349 + .type atomic64_sub_unchecked,#function
6350 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6351 + BACKOFF_SETUP(%o2)
6352 +1: ldx [%o1], %g1
6353 + subcc %g1, %o0, %g7
6354 + casx [%o1], %g1, %g7
6355 + cmp %g1, %g7
6356 + bne,pn %xcc, 2f
6357 + nop
6358 + retl
6359 + nop
6360 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6361 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6362 +
6363 .globl atomic64_add_ret
6364 .type atomic64_add_ret,#function
6365 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6366 BACKOFF_SETUP(%o2)
6367 1: ldx [%o1], %g1
6368 - add %g1, %o0, %g7
6369 + addcc %g1, %o0, %g7
6370 +
6371 +#ifdef CONFIG_PAX_REFCOUNT
6372 + tvs %xcc, 6
6373 +#endif
6374 +
6375 casx [%o1], %g1, %g7
6376 cmp %g1, %g7
6377 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6378 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6379 2: BACKOFF_SPIN(%o2, %o3, 1b)
6380 .size atomic64_add_ret, .-atomic64_add_ret
6381
6382 + .globl atomic64_add_ret_unchecked
6383 + .type atomic64_add_ret_unchecked,#function
6384 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6385 + BACKOFF_SETUP(%o2)
6386 +1: ldx [%o1], %g1
6387 + addcc %g1, %o0, %g7
6388 + casx [%o1], %g1, %g7
6389 + cmp %g1, %g7
6390 + bne,pn %xcc, 2f
6391 + add %g7, %o0, %g7
6392 + mov %g7, %o0
6393 + retl
6394 + nop
6395 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6396 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6397 +
6398 .globl atomic64_sub_ret
6399 .type atomic64_sub_ret,#function
6400 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6401 BACKOFF_SETUP(%o2)
6402 1: ldx [%o1], %g1
6403 - sub %g1, %o0, %g7
6404 + subcc %g1, %o0, %g7
6405 +
6406 +#ifdef CONFIG_PAX_REFCOUNT
6407 + tvs %xcc, 6
6408 +#endif
6409 +
6410 casx [%o1], %g1, %g7
6411 cmp %g1, %g7
6412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6413 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6414 index f73c224..662af10 100644
6415 --- a/arch/sparc/lib/ksyms.c
6416 +++ b/arch/sparc/lib/ksyms.c
6417 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6418
6419 /* Atomic counter implementation. */
6420 EXPORT_SYMBOL(atomic_add);
6421 +EXPORT_SYMBOL(atomic_add_unchecked);
6422 EXPORT_SYMBOL(atomic_add_ret);
6423 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6424 EXPORT_SYMBOL(atomic_sub);
6425 +EXPORT_SYMBOL(atomic_sub_unchecked);
6426 EXPORT_SYMBOL(atomic_sub_ret);
6427 EXPORT_SYMBOL(atomic64_add);
6428 +EXPORT_SYMBOL(atomic64_add_unchecked);
6429 EXPORT_SYMBOL(atomic64_add_ret);
6430 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6431 EXPORT_SYMBOL(atomic64_sub);
6432 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6433 EXPORT_SYMBOL(atomic64_sub_ret);
6434
6435 /* Atomic bit operations. */
6436 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6437 index 301421c..e2535d1 100644
6438 --- a/arch/sparc/mm/Makefile
6439 +++ b/arch/sparc/mm/Makefile
6440 @@ -2,7 +2,7 @@
6441 #
6442
6443 asflags-y := -ansi
6444 -ccflags-y := -Werror
6445 +#ccflags-y := -Werror
6446
6447 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6448 obj-y += fault_$(BITS).o
6449 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6450 index df3155a..eb708b8 100644
6451 --- a/arch/sparc/mm/fault_32.c
6452 +++ b/arch/sparc/mm/fault_32.c
6453 @@ -21,6 +21,9 @@
6454 #include <linux/perf_event.h>
6455 #include <linux/interrupt.h>
6456 #include <linux/kdebug.h>
6457 +#include <linux/slab.h>
6458 +#include <linux/pagemap.h>
6459 +#include <linux/compiler.h>
6460
6461 #include <asm/page.h>
6462 #include <asm/pgtable.h>
6463 @@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6464 return safe_compute_effective_address(regs, insn);
6465 }
6466
6467 +#ifdef CONFIG_PAX_PAGEEXEC
6468 +#ifdef CONFIG_PAX_DLRESOLVE
6469 +static void pax_emuplt_close(struct vm_area_struct *vma)
6470 +{
6471 + vma->vm_mm->call_dl_resolve = 0UL;
6472 +}
6473 +
6474 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6475 +{
6476 + unsigned int *kaddr;
6477 +
6478 + vmf->page = alloc_page(GFP_HIGHUSER);
6479 + if (!vmf->page)
6480 + return VM_FAULT_OOM;
6481 +
6482 + kaddr = kmap(vmf->page);
6483 + memset(kaddr, 0, PAGE_SIZE);
6484 + kaddr[0] = 0x9DE3BFA8U; /* save */
6485 + flush_dcache_page(vmf->page);
6486 + kunmap(vmf->page);
6487 + return VM_FAULT_MAJOR;
6488 +}
6489 +
6490 +static const struct vm_operations_struct pax_vm_ops = {
6491 + .close = pax_emuplt_close,
6492 + .fault = pax_emuplt_fault
6493 +};
6494 +
6495 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6496 +{
6497 + int ret;
6498 +
6499 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6500 + vma->vm_mm = current->mm;
6501 + vma->vm_start = addr;
6502 + vma->vm_end = addr + PAGE_SIZE;
6503 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6504 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6505 + vma->vm_ops = &pax_vm_ops;
6506 +
6507 + ret = insert_vm_struct(current->mm, vma);
6508 + if (ret)
6509 + return ret;
6510 +
6511 + ++current->mm->total_vm;
6512 + return 0;
6513 +}
6514 +#endif
6515 +
6516 +/*
6517 + * PaX: decide what to do with offenders (regs->pc = fault address)
6518 + *
6519 + * returns 1 when task should be killed
6520 + * 2 when patched PLT trampoline was detected
6521 + * 3 when unpatched PLT trampoline was detected
6522 + */
6523 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6524 +{
6525 +
6526 +#ifdef CONFIG_PAX_EMUPLT
6527 + int err;
6528 +
6529 + do { /* PaX: patched PLT emulation #1 */
6530 + unsigned int sethi1, sethi2, jmpl;
6531 +
6532 + err = get_user(sethi1, (unsigned int *)regs->pc);
6533 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6534 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6535 +
6536 + if (err)
6537 + break;
6538 +
6539 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6540 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6541 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6542 + {
6543 + unsigned int addr;
6544 +
6545 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6546 + addr = regs->u_regs[UREG_G1];
6547 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6548 + regs->pc = addr;
6549 + regs->npc = addr+4;
6550 + return 2;
6551 + }
6552 + } while (0);
6553 +
6554 + { /* PaX: patched PLT emulation #2 */
6555 + unsigned int ba;
6556 +
6557 + err = get_user(ba, (unsigned int *)regs->pc);
6558 +
6559 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6560 + unsigned int addr;
6561 +
6562 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6563 + regs->pc = addr;
6564 + regs->npc = addr+4;
6565 + return 2;
6566 + }
6567 + }
6568 +
6569 + do { /* PaX: patched PLT emulation #3 */
6570 + unsigned int sethi, jmpl, nop;
6571 +
6572 + err = get_user(sethi, (unsigned int *)regs->pc);
6573 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6574 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6575 +
6576 + if (err)
6577 + break;
6578 +
6579 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6580 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6581 + nop == 0x01000000U)
6582 + {
6583 + unsigned int addr;
6584 +
6585 + addr = (sethi & 0x003FFFFFU) << 10;
6586 + regs->u_regs[UREG_G1] = addr;
6587 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6588 + regs->pc = addr;
6589 + regs->npc = addr+4;
6590 + return 2;
6591 + }
6592 + } while (0);
6593 +
6594 + do { /* PaX: unpatched PLT emulation step 1 */
6595 + unsigned int sethi, ba, nop;
6596 +
6597 + err = get_user(sethi, (unsigned int *)regs->pc);
6598 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6599 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6600 +
6601 + if (err)
6602 + break;
6603 +
6604 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6605 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6606 + nop == 0x01000000U)
6607 + {
6608 + unsigned int addr, save, call;
6609 +
6610 + if ((ba & 0xFFC00000U) == 0x30800000U)
6611 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6612 + else
6613 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6614 +
6615 + err = get_user(save, (unsigned int *)addr);
6616 + err |= get_user(call, (unsigned int *)(addr+4));
6617 + err |= get_user(nop, (unsigned int *)(addr+8));
6618 + if (err)
6619 + break;
6620 +
6621 +#ifdef CONFIG_PAX_DLRESOLVE
6622 + if (save == 0x9DE3BFA8U &&
6623 + (call & 0xC0000000U) == 0x40000000U &&
6624 + nop == 0x01000000U)
6625 + {
6626 + struct vm_area_struct *vma;
6627 + unsigned long call_dl_resolve;
6628 +
6629 + down_read(&current->mm->mmap_sem);
6630 + call_dl_resolve = current->mm->call_dl_resolve;
6631 + up_read(&current->mm->mmap_sem);
6632 + if (likely(call_dl_resolve))
6633 + goto emulate;
6634 +
6635 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6636 +
6637 + down_write(&current->mm->mmap_sem);
6638 + if (current->mm->call_dl_resolve) {
6639 + call_dl_resolve = current->mm->call_dl_resolve;
6640 + up_write(&current->mm->mmap_sem);
6641 + if (vma)
6642 + kmem_cache_free(vm_area_cachep, vma);
6643 + goto emulate;
6644 + }
6645 +
6646 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6647 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6648 + up_write(&current->mm->mmap_sem);
6649 + if (vma)
6650 + kmem_cache_free(vm_area_cachep, vma);
6651 + return 1;
6652 + }
6653 +
6654 + if (pax_insert_vma(vma, call_dl_resolve)) {
6655 + up_write(&current->mm->mmap_sem);
6656 + kmem_cache_free(vm_area_cachep, vma);
6657 + return 1;
6658 + }
6659 +
6660 + current->mm->call_dl_resolve = call_dl_resolve;
6661 + up_write(&current->mm->mmap_sem);
6662 +
6663 +emulate:
6664 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6665 + regs->pc = call_dl_resolve;
6666 + regs->npc = addr+4;
6667 + return 3;
6668 + }
6669 +#endif
6670 +
6671 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6672 + if ((save & 0xFFC00000U) == 0x05000000U &&
6673 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6674 + nop == 0x01000000U)
6675 + {
6676 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6677 + regs->u_regs[UREG_G2] = addr + 4;
6678 + addr = (save & 0x003FFFFFU) << 10;
6679 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6680 + regs->pc = addr;
6681 + regs->npc = addr+4;
6682 + return 3;
6683 + }
6684 + }
6685 + } while (0);
6686 +
6687 + do { /* PaX: unpatched PLT emulation step 2 */
6688 + unsigned int save, call, nop;
6689 +
6690 + err = get_user(save, (unsigned int *)(regs->pc-4));
6691 + err |= get_user(call, (unsigned int *)regs->pc);
6692 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6693 + if (err)
6694 + break;
6695 +
6696 + if (save == 0x9DE3BFA8U &&
6697 + (call & 0xC0000000U) == 0x40000000U &&
6698 + nop == 0x01000000U)
6699 + {
6700 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6701 +
6702 + regs->u_regs[UREG_RETPC] = regs->pc;
6703 + regs->pc = dl_resolve;
6704 + regs->npc = dl_resolve+4;
6705 + return 3;
6706 + }
6707 + } while (0);
6708 +#endif
6709 +
6710 + return 1;
6711 +}
6712 +
6713 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6714 +{
6715 + unsigned long i;
6716 +
6717 + printk(KERN_ERR "PAX: bytes at PC: ");
6718 + for (i = 0; i < 8; i++) {
6719 + unsigned int c;
6720 + if (get_user(c, (unsigned int *)pc+i))
6721 + printk(KERN_CONT "???????? ");
6722 + else
6723 + printk(KERN_CONT "%08x ", c);
6724 + }
6725 + printk("\n");
6726 +}
6727 +#endif
6728 +
6729 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6730 int text_fault)
6731 {
6732 @@ -282,6 +547,24 @@ good_area:
6733 if(!(vma->vm_flags & VM_WRITE))
6734 goto bad_area;
6735 } else {
6736 +
6737 +#ifdef CONFIG_PAX_PAGEEXEC
6738 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6739 + up_read(&mm->mmap_sem);
6740 + switch (pax_handle_fetch_fault(regs)) {
6741 +
6742 +#ifdef CONFIG_PAX_EMUPLT
6743 + case 2:
6744 + case 3:
6745 + return;
6746 +#endif
6747 +
6748 + }
6749 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6750 + do_group_exit(SIGKILL);
6751 + }
6752 +#endif
6753 +
6754 /* Allow reads even for write-only mappings */
6755 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6756 goto bad_area;
6757 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6758 index 1fe0429..aee2e87 100644
6759 --- a/arch/sparc/mm/fault_64.c
6760 +++ b/arch/sparc/mm/fault_64.c
6761 @@ -21,6 +21,9 @@
6762 #include <linux/kprobes.h>
6763 #include <linux/kdebug.h>
6764 #include <linux/percpu.h>
6765 +#include <linux/slab.h>
6766 +#include <linux/pagemap.h>
6767 +#include <linux/compiler.h>
6768
6769 #include <asm/page.h>
6770 #include <asm/pgtable.h>
6771 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6772 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6773 regs->tpc);
6774 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6775 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6776 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6777 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6778 dump_stack();
6779 unhandled_fault(regs->tpc, current, regs);
6780 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6781 show_regs(regs);
6782 }
6783
6784 +#ifdef CONFIG_PAX_PAGEEXEC
6785 +#ifdef CONFIG_PAX_DLRESOLVE
6786 +static void pax_emuplt_close(struct vm_area_struct *vma)
6787 +{
6788 + vma->vm_mm->call_dl_resolve = 0UL;
6789 +}
6790 +
6791 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6792 +{
6793 + unsigned int *kaddr;
6794 +
6795 + vmf->page = alloc_page(GFP_HIGHUSER);
6796 + if (!vmf->page)
6797 + return VM_FAULT_OOM;
6798 +
6799 + kaddr = kmap(vmf->page);
6800 + memset(kaddr, 0, PAGE_SIZE);
6801 + kaddr[0] = 0x9DE3BFA8U; /* save */
6802 + flush_dcache_page(vmf->page);
6803 + kunmap(vmf->page);
6804 + return VM_FAULT_MAJOR;
6805 +}
6806 +
6807 +static const struct vm_operations_struct pax_vm_ops = {
6808 + .close = pax_emuplt_close,
6809 + .fault = pax_emuplt_fault
6810 +};
6811 +
6812 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6813 +{
6814 + int ret;
6815 +
6816 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6817 + vma->vm_mm = current->mm;
6818 + vma->vm_start = addr;
6819 + vma->vm_end = addr + PAGE_SIZE;
6820 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6821 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6822 + vma->vm_ops = &pax_vm_ops;
6823 +
6824 + ret = insert_vm_struct(current->mm, vma);
6825 + if (ret)
6826 + return ret;
6827 +
6828 + ++current->mm->total_vm;
6829 + return 0;
6830 +}
6831 +#endif
6832 +
6833 +/*
6834 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6835 + *
6836 + * returns 1 when task should be killed
6837 + * 2 when patched PLT trampoline was detected
6838 + * 3 when unpatched PLT trampoline was detected
6839 + */
6840 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6841 +{
6842 +
6843 +#ifdef CONFIG_PAX_EMUPLT
6844 + int err;
6845 +
6846 + do { /* PaX: patched PLT emulation #1 */
6847 + unsigned int sethi1, sethi2, jmpl;
6848 +
6849 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6850 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6851 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6852 +
6853 + if (err)
6854 + break;
6855 +
6856 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6857 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6858 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6859 + {
6860 + unsigned long addr;
6861 +
6862 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6863 + addr = regs->u_regs[UREG_G1];
6864 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6865 +
6866 + if (test_thread_flag(TIF_32BIT))
6867 + addr &= 0xFFFFFFFFUL;
6868 +
6869 + regs->tpc = addr;
6870 + regs->tnpc = addr+4;
6871 + return 2;
6872 + }
6873 + } while (0);
6874 +
6875 + { /* PaX: patched PLT emulation #2 */
6876 + unsigned int ba;
6877 +
6878 + err = get_user(ba, (unsigned int *)regs->tpc);
6879 +
6880 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6881 + unsigned long addr;
6882 +
6883 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6884 +
6885 + if (test_thread_flag(TIF_32BIT))
6886 + addr &= 0xFFFFFFFFUL;
6887 +
6888 + regs->tpc = addr;
6889 + regs->tnpc = addr+4;
6890 + return 2;
6891 + }
6892 + }
6893 +
6894 + do { /* PaX: patched PLT emulation #3 */
6895 + unsigned int sethi, jmpl, nop;
6896 +
6897 + err = get_user(sethi, (unsigned int *)regs->tpc);
6898 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6899 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6900 +
6901 + if (err)
6902 + break;
6903 +
6904 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6905 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6906 + nop == 0x01000000U)
6907 + {
6908 + unsigned long addr;
6909 +
6910 + addr = (sethi & 0x003FFFFFU) << 10;
6911 + regs->u_regs[UREG_G1] = addr;
6912 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6913 +
6914 + if (test_thread_flag(TIF_32BIT))
6915 + addr &= 0xFFFFFFFFUL;
6916 +
6917 + regs->tpc = addr;
6918 + regs->tnpc = addr+4;
6919 + return 2;
6920 + }
6921 + } while (0);
6922 +
6923 + do { /* PaX: patched PLT emulation #4 */
6924 + unsigned int sethi, mov1, call, mov2;
6925 +
6926 + err = get_user(sethi, (unsigned int *)regs->tpc);
6927 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6928 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6929 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6930 +
6931 + if (err)
6932 + break;
6933 +
6934 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6935 + mov1 == 0x8210000FU &&
6936 + (call & 0xC0000000U) == 0x40000000U &&
6937 + mov2 == 0x9E100001U)
6938 + {
6939 + unsigned long addr;
6940 +
6941 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6942 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6943 +
6944 + if (test_thread_flag(TIF_32BIT))
6945 + addr &= 0xFFFFFFFFUL;
6946 +
6947 + regs->tpc = addr;
6948 + regs->tnpc = addr+4;
6949 + return 2;
6950 + }
6951 + } while (0);
6952 +
6953 + do { /* PaX: patched PLT emulation #5 */
6954 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6955 +
6956 + err = get_user(sethi, (unsigned int *)regs->tpc);
6957 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6958 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6959 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6960 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6961 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6962 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6963 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6964 +
6965 + if (err)
6966 + break;
6967 +
6968 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6969 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6970 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6971 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6972 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6973 + sllx == 0x83287020U &&
6974 + jmpl == 0x81C04005U &&
6975 + nop == 0x01000000U)
6976 + {
6977 + unsigned long addr;
6978 +
6979 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6980 + regs->u_regs[UREG_G1] <<= 32;
6981 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6982 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6983 + regs->tpc = addr;
6984 + regs->tnpc = addr+4;
6985 + return 2;
6986 + }
6987 + } while (0);
6988 +
6989 + do { /* PaX: patched PLT emulation #6 */
6990 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6991 +
6992 + err = get_user(sethi, (unsigned int *)regs->tpc);
6993 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6994 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6995 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6996 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6997 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6998 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6999 +
7000 + if (err)
7001 + break;
7002 +
7003 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7004 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7005 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7006 + sllx == 0x83287020U &&
7007 + (or & 0xFFFFE000U) == 0x8A116000U &&
7008 + jmpl == 0x81C04005U &&
7009 + nop == 0x01000000U)
7010 + {
7011 + unsigned long addr;
7012 +
7013 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7014 + regs->u_regs[UREG_G1] <<= 32;
7015 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7016 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7017 + regs->tpc = addr;
7018 + regs->tnpc = addr+4;
7019 + return 2;
7020 + }
7021 + } while (0);
7022 +
7023 + do { /* PaX: unpatched PLT emulation step 1 */
7024 + unsigned int sethi, ba, nop;
7025 +
7026 + err = get_user(sethi, (unsigned int *)regs->tpc);
7027 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7028 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7029 +
7030 + if (err)
7031 + break;
7032 +
7033 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7034 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7035 + nop == 0x01000000U)
7036 + {
7037 + unsigned long addr;
7038 + unsigned int save, call;
7039 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7040 +
7041 + if ((ba & 0xFFC00000U) == 0x30800000U)
7042 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7043 + else
7044 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7045 +
7046 + if (test_thread_flag(TIF_32BIT))
7047 + addr &= 0xFFFFFFFFUL;
7048 +
7049 + err = get_user(save, (unsigned int *)addr);
7050 + err |= get_user(call, (unsigned int *)(addr+4));
7051 + err |= get_user(nop, (unsigned int *)(addr+8));
7052 + if (err)
7053 + break;
7054 +
7055 +#ifdef CONFIG_PAX_DLRESOLVE
7056 + if (save == 0x9DE3BFA8U &&
7057 + (call & 0xC0000000U) == 0x40000000U &&
7058 + nop == 0x01000000U)
7059 + {
7060 + struct vm_area_struct *vma;
7061 + unsigned long call_dl_resolve;
7062 +
7063 + down_read(&current->mm->mmap_sem);
7064 + call_dl_resolve = current->mm->call_dl_resolve;
7065 + up_read(&current->mm->mmap_sem);
7066 + if (likely(call_dl_resolve))
7067 + goto emulate;
7068 +
7069 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7070 +
7071 + down_write(&current->mm->mmap_sem);
7072 + if (current->mm->call_dl_resolve) {
7073 + call_dl_resolve = current->mm->call_dl_resolve;
7074 + up_write(&current->mm->mmap_sem);
7075 + if (vma)
7076 + kmem_cache_free(vm_area_cachep, vma);
7077 + goto emulate;
7078 + }
7079 +
7080 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7081 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7082 + up_write(&current->mm->mmap_sem);
7083 + if (vma)
7084 + kmem_cache_free(vm_area_cachep, vma);
7085 + return 1;
7086 + }
7087 +
7088 + if (pax_insert_vma(vma, call_dl_resolve)) {
7089 + up_write(&current->mm->mmap_sem);
7090 + kmem_cache_free(vm_area_cachep, vma);
7091 + return 1;
7092 + }
7093 +
7094 + current->mm->call_dl_resolve = call_dl_resolve;
7095 + up_write(&current->mm->mmap_sem);
7096 +
7097 +emulate:
7098 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7099 + regs->tpc = call_dl_resolve;
7100 + regs->tnpc = addr+4;
7101 + return 3;
7102 + }
7103 +#endif
7104 +
7105 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7106 + if ((save & 0xFFC00000U) == 0x05000000U &&
7107 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7108 + nop == 0x01000000U)
7109 + {
7110 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7111 + regs->u_regs[UREG_G2] = addr + 4;
7112 + addr = (save & 0x003FFFFFU) << 10;
7113 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7114 +
7115 + if (test_thread_flag(TIF_32BIT))
7116 + addr &= 0xFFFFFFFFUL;
7117 +
7118 + regs->tpc = addr;
7119 + regs->tnpc = addr+4;
7120 + return 3;
7121 + }
7122 +
7123 + /* PaX: 64-bit PLT stub */
7124 + err = get_user(sethi1, (unsigned int *)addr);
7125 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7126 + err |= get_user(or1, (unsigned int *)(addr+8));
7127 + err |= get_user(or2, (unsigned int *)(addr+12));
7128 + err |= get_user(sllx, (unsigned int *)(addr+16));
7129 + err |= get_user(add, (unsigned int *)(addr+20));
7130 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7131 + err |= get_user(nop, (unsigned int *)(addr+28));
7132 + if (err)
7133 + break;
7134 +
7135 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7136 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7137 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7138 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7139 + sllx == 0x89293020U &&
7140 + add == 0x8A010005U &&
7141 + jmpl == 0x89C14000U &&
7142 + nop == 0x01000000U)
7143 + {
7144 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7145 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7146 + regs->u_regs[UREG_G4] <<= 32;
7147 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7148 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7149 + regs->u_regs[UREG_G4] = addr + 24;
7150 + addr = regs->u_regs[UREG_G5];
7151 + regs->tpc = addr;
7152 + regs->tnpc = addr+4;
7153 + return 3;
7154 + }
7155 + }
7156 + } while (0);
7157 +
7158 +#ifdef CONFIG_PAX_DLRESOLVE
7159 + do { /* PaX: unpatched PLT emulation step 2 */
7160 + unsigned int save, call, nop;
7161 +
7162 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7163 + err |= get_user(call, (unsigned int *)regs->tpc);
7164 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7165 + if (err)
7166 + break;
7167 +
7168 + if (save == 0x9DE3BFA8U &&
7169 + (call & 0xC0000000U) == 0x40000000U &&
7170 + nop == 0x01000000U)
7171 + {
7172 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7173 +
7174 + if (test_thread_flag(TIF_32BIT))
7175 + dl_resolve &= 0xFFFFFFFFUL;
7176 +
7177 + regs->u_regs[UREG_RETPC] = regs->tpc;
7178 + regs->tpc = dl_resolve;
7179 + regs->tnpc = dl_resolve+4;
7180 + return 3;
7181 + }
7182 + } while (0);
7183 +#endif
7184 +
7185 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7186 + unsigned int sethi, ba, nop;
7187 +
7188 + err = get_user(sethi, (unsigned int *)regs->tpc);
7189 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7190 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7191 +
7192 + if (err)
7193 + break;
7194 +
7195 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7196 + (ba & 0xFFF00000U) == 0x30600000U &&
7197 + nop == 0x01000000U)
7198 + {
7199 + unsigned long addr;
7200 +
7201 + addr = (sethi & 0x003FFFFFU) << 10;
7202 + regs->u_regs[UREG_G1] = addr;
7203 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7204 +
7205 + if (test_thread_flag(TIF_32BIT))
7206 + addr &= 0xFFFFFFFFUL;
7207 +
7208 + regs->tpc = addr;
7209 + regs->tnpc = addr+4;
7210 + return 2;
7211 + }
7212 + } while (0);
7213 +
7214 +#endif
7215 +
7216 + return 1;
7217 +}
7218 +
7219 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7220 +{
7221 + unsigned long i;
7222 +
7223 + printk(KERN_ERR "PAX: bytes at PC: ");
7224 + for (i = 0; i < 8; i++) {
7225 + unsigned int c;
7226 + if (get_user(c, (unsigned int *)pc+i))
7227 + printk(KERN_CONT "???????? ");
7228 + else
7229 + printk(KERN_CONT "%08x ", c);
7230 + }
7231 + printk("\n");
7232 +}
7233 +#endif
7234 +
7235 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7236 {
7237 struct mm_struct *mm = current->mm;
7238 @@ -343,6 +797,29 @@ retry:
7239 if (!vma)
7240 goto bad_area;
7241
7242 +#ifdef CONFIG_PAX_PAGEEXEC
7243 + /* PaX: detect ITLB misses on non-exec pages */
7244 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7245 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7246 + {
7247 + if (address != regs->tpc)
7248 + goto good_area;
7249 +
7250 + up_read(&mm->mmap_sem);
7251 + switch (pax_handle_fetch_fault(regs)) {
7252 +
7253 +#ifdef CONFIG_PAX_EMUPLT
7254 + case 2:
7255 + case 3:
7256 + return;
7257 +#endif
7258 +
7259 + }
7260 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7261 + do_group_exit(SIGKILL);
7262 + }
7263 +#endif
7264 +
7265 /* Pure DTLB misses do not tell us whether the fault causing
7266 * load/store/atomic was a write or not, it only says that there
7267 * was no match. So in such a case we (carefully) read the
7268 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7269 index 07e1453..0a7d9e9 100644
7270 --- a/arch/sparc/mm/hugetlbpage.c
7271 +++ b/arch/sparc/mm/hugetlbpage.c
7272 @@ -67,7 +67,7 @@ full_search:
7273 }
7274 return -ENOMEM;
7275 }
7276 - if (likely(!vma || addr + len <= vma->vm_start)) {
7277 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7278 /*
7279 * Remember the place where we stopped the search:
7280 */
7281 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7282 /* make sure it can fit in the remaining address space */
7283 if (likely(addr > len)) {
7284 vma = find_vma(mm, addr-len);
7285 - if (!vma || addr <= vma->vm_start) {
7286 + if (check_heap_stack_gap(vma, addr - len, len)) {
7287 /* remember the address as a hint for next time */
7288 return (mm->free_area_cache = addr-len);
7289 }
7290 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7291 if (unlikely(mm->mmap_base < len))
7292 goto bottomup;
7293
7294 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7295 + addr = mm->mmap_base - len;
7296
7297 do {
7298 + addr &= HPAGE_MASK;
7299 /*
7300 * Lookup failure means no vma is above this address,
7301 * else if new region fits below vma->vm_start,
7302 * return with success:
7303 */
7304 vma = find_vma(mm, addr);
7305 - if (likely(!vma || addr+len <= vma->vm_start)) {
7306 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7307 /* remember the address as a hint for next time */
7308 return (mm->free_area_cache = addr);
7309 }
7310 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7311 mm->cached_hole_size = vma->vm_start - addr;
7312
7313 /* try just below the current vma->vm_start */
7314 - addr = (vma->vm_start-len) & HPAGE_MASK;
7315 - } while (likely(len < vma->vm_start));
7316 + addr = skip_heap_stack_gap(vma, len);
7317 + } while (!IS_ERR_VALUE(addr));
7318
7319 bottomup:
7320 /*
7321 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7322 if (addr) {
7323 addr = ALIGN(addr, HPAGE_SIZE);
7324 vma = find_vma(mm, addr);
7325 - if (task_size - len >= addr &&
7326 - (!vma || addr + len <= vma->vm_start))
7327 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7328 return addr;
7329 }
7330 if (mm->get_unmapped_area == arch_get_unmapped_area)
7331 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7332 index c5f9021..7591bae 100644
7333 --- a/arch/sparc/mm/init_32.c
7334 +++ b/arch/sparc/mm/init_32.c
7335 @@ -315,6 +315,9 @@ extern void device_scan(void);
7336 pgprot_t PAGE_SHARED __read_mostly;
7337 EXPORT_SYMBOL(PAGE_SHARED);
7338
7339 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7340 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7341 +
7342 void __init paging_init(void)
7343 {
7344 switch(sparc_cpu_model) {
7345 @@ -343,17 +346,17 @@ void __init paging_init(void)
7346
7347 /* Initialize the protection map with non-constant, MMU dependent values. */
7348 protection_map[0] = PAGE_NONE;
7349 - protection_map[1] = PAGE_READONLY;
7350 - protection_map[2] = PAGE_COPY;
7351 - protection_map[3] = PAGE_COPY;
7352 + protection_map[1] = PAGE_READONLY_NOEXEC;
7353 + protection_map[2] = PAGE_COPY_NOEXEC;
7354 + protection_map[3] = PAGE_COPY_NOEXEC;
7355 protection_map[4] = PAGE_READONLY;
7356 protection_map[5] = PAGE_READONLY;
7357 protection_map[6] = PAGE_COPY;
7358 protection_map[7] = PAGE_COPY;
7359 protection_map[8] = PAGE_NONE;
7360 - protection_map[9] = PAGE_READONLY;
7361 - protection_map[10] = PAGE_SHARED;
7362 - protection_map[11] = PAGE_SHARED;
7363 + protection_map[9] = PAGE_READONLY_NOEXEC;
7364 + protection_map[10] = PAGE_SHARED_NOEXEC;
7365 + protection_map[11] = PAGE_SHARED_NOEXEC;
7366 protection_map[12] = PAGE_READONLY;
7367 protection_map[13] = PAGE_READONLY;
7368 protection_map[14] = PAGE_SHARED;
7369 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7370 index cbef74e..c38fead 100644
7371 --- a/arch/sparc/mm/srmmu.c
7372 +++ b/arch/sparc/mm/srmmu.c
7373 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7374 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7375 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7376 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7377 +
7378 +#ifdef CONFIG_PAX_PAGEEXEC
7379 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7380 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7381 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7382 +#endif
7383 +
7384 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7385 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7386
7387 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7388 index f4500c6..889656c 100644
7389 --- a/arch/tile/include/asm/atomic_64.h
7390 +++ b/arch/tile/include/asm/atomic_64.h
7391 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7392
7393 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7394
7395 +#define atomic64_read_unchecked(v) atomic64_read(v)
7396 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7397 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7398 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7399 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7400 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7401 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7402 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7403 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7404 +
7405 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7406 #define smp_mb__before_atomic_dec() smp_mb()
7407 #define smp_mb__after_atomic_dec() smp_mb()
7408 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7409 index 392e533..536b092 100644
7410 --- a/arch/tile/include/asm/cache.h
7411 +++ b/arch/tile/include/asm/cache.h
7412 @@ -15,11 +15,12 @@
7413 #ifndef _ASM_TILE_CACHE_H
7414 #define _ASM_TILE_CACHE_H
7415
7416 +#include <linux/const.h>
7417 #include <arch/chip.h>
7418
7419 /* bytes per L1 data cache line */
7420 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7421 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7422 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7423
7424 /* bytes per L2 cache line */
7425 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7426 diff --git a/arch/um/Makefile b/arch/um/Makefile
7427 index 55c0661..86ad413 100644
7428 --- a/arch/um/Makefile
7429 +++ b/arch/um/Makefile
7430 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7431 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7432 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7433
7434 +ifdef CONSTIFY_PLUGIN
7435 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7436 +endif
7437 +
7438 #This will adjust *FLAGS accordingly to the platform.
7439 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7440
7441 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7442 index 19e1bdd..3665b77 100644
7443 --- a/arch/um/include/asm/cache.h
7444 +++ b/arch/um/include/asm/cache.h
7445 @@ -1,6 +1,7 @@
7446 #ifndef __UM_CACHE_H
7447 #define __UM_CACHE_H
7448
7449 +#include <linux/const.h>
7450
7451 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7452 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7453 @@ -12,6 +13,6 @@
7454 # define L1_CACHE_SHIFT 5
7455 #endif
7456
7457 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7458 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7459
7460 #endif
7461 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7462 index 6c03acd..a5e0215 100644
7463 --- a/arch/um/include/asm/kmap_types.h
7464 +++ b/arch/um/include/asm/kmap_types.h
7465 @@ -23,6 +23,7 @@ enum km_type {
7466 KM_IRQ1,
7467 KM_SOFTIRQ0,
7468 KM_SOFTIRQ1,
7469 + KM_CLEARPAGE,
7470 KM_TYPE_NR
7471 };
7472
7473 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7474 index 7cfc3ce..cbd1a58 100644
7475 --- a/arch/um/include/asm/page.h
7476 +++ b/arch/um/include/asm/page.h
7477 @@ -14,6 +14,9 @@
7478 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7479 #define PAGE_MASK (~(PAGE_SIZE-1))
7480
7481 +#define ktla_ktva(addr) (addr)
7482 +#define ktva_ktla(addr) (addr)
7483 +
7484 #ifndef __ASSEMBLY__
7485
7486 struct page;
7487 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7488 index 0032f92..cd151e0 100644
7489 --- a/arch/um/include/asm/pgtable-3level.h
7490 +++ b/arch/um/include/asm/pgtable-3level.h
7491 @@ -58,6 +58,7 @@
7492 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7493 #define pud_populate(mm, pud, pmd) \
7494 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7495 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7496
7497 #ifdef CONFIG_64BIT
7498 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7499 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7500 index 2b73ded..804f540 100644
7501 --- a/arch/um/kernel/process.c
7502 +++ b/arch/um/kernel/process.c
7503 @@ -404,22 +404,6 @@ int singlestepping(void * t)
7504 return 2;
7505 }
7506
7507 -/*
7508 - * Only x86 and x86_64 have an arch_align_stack().
7509 - * All other arches have "#define arch_align_stack(x) (x)"
7510 - * in their asm/system.h
7511 - * As this is included in UML from asm-um/system-generic.h,
7512 - * we can use it to behave as the subarch does.
7513 - */
7514 -#ifndef arch_align_stack
7515 -unsigned long arch_align_stack(unsigned long sp)
7516 -{
7517 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7518 - sp -= get_random_int() % 8192;
7519 - return sp & ~0xf;
7520 -}
7521 -#endif
7522 -
7523 unsigned long get_wchan(struct task_struct *p)
7524 {
7525 unsigned long stack_page, sp, ip;
7526 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7527 index ad8f795..2c7eec6 100644
7528 --- a/arch/unicore32/include/asm/cache.h
7529 +++ b/arch/unicore32/include/asm/cache.h
7530 @@ -12,8 +12,10 @@
7531 #ifndef __UNICORE_CACHE_H__
7532 #define __UNICORE_CACHE_H__
7533
7534 -#define L1_CACHE_SHIFT (5)
7535 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7536 +#include <linux/const.h>
7537 +
7538 +#define L1_CACHE_SHIFT 5
7539 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7540
7541 /*
7542 * Memory returned by kmalloc() may be used for DMA, so we must make
7543 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7544 index c9866b0..fe53aef 100644
7545 --- a/arch/x86/Kconfig
7546 +++ b/arch/x86/Kconfig
7547 @@ -229,7 +229,7 @@ config X86_HT
7548
7549 config X86_32_LAZY_GS
7550 def_bool y
7551 - depends on X86_32 && !CC_STACKPROTECTOR
7552 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7553
7554 config ARCH_HWEIGHT_CFLAGS
7555 string
7556 @@ -1042,7 +1042,7 @@ choice
7557
7558 config NOHIGHMEM
7559 bool "off"
7560 - depends on !X86_NUMAQ
7561 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7562 ---help---
7563 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7564 However, the address space of 32-bit x86 processors is only 4
7565 @@ -1079,7 +1079,7 @@ config NOHIGHMEM
7566
7567 config HIGHMEM4G
7568 bool "4GB"
7569 - depends on !X86_NUMAQ
7570 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7571 ---help---
7572 Select this if you have a 32-bit processor and between 1 and 4
7573 gigabytes of physical RAM.
7574 @@ -1133,7 +1133,7 @@ config PAGE_OFFSET
7575 hex
7576 default 0xB0000000 if VMSPLIT_3G_OPT
7577 default 0x80000000 if VMSPLIT_2G
7578 - default 0x78000000 if VMSPLIT_2G_OPT
7579 + default 0x70000000 if VMSPLIT_2G_OPT
7580 default 0x40000000 if VMSPLIT_1G
7581 default 0xC0000000
7582 depends on X86_32
7583 @@ -1523,6 +1523,7 @@ config SECCOMP
7584
7585 config CC_STACKPROTECTOR
7586 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7587 + depends on X86_64 || !PAX_MEMORY_UDEREF
7588 ---help---
7589 This option turns on the -fstack-protector GCC feature. This
7590 feature puts, at the beginning of functions, a canary value on
7591 @@ -1580,6 +1581,7 @@ config KEXEC_JUMP
7592 config PHYSICAL_START
7593 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7594 default "0x1000000"
7595 + range 0x400000 0x40000000
7596 ---help---
7597 This gives the physical address where the kernel is loaded.
7598
7599 @@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
7600 config PHYSICAL_ALIGN
7601 hex "Alignment value to which kernel should be aligned" if X86_32
7602 default "0x1000000"
7603 + range 0x400000 0x1000000 if PAX_KERNEXEC
7604 range 0x2000 0x1000000
7605 ---help---
7606 This value puts the alignment restrictions on physical address
7607 @@ -1674,9 +1677,10 @@ config HOTPLUG_CPU
7608 Say N if you want to disable CPU hotplug.
7609
7610 config COMPAT_VDSO
7611 - def_bool y
7612 + def_bool n
7613 prompt "Compat VDSO support"
7614 depends on X86_32 || IA32_EMULATION
7615 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7616 ---help---
7617 Map the 32-bit VDSO to the predictable old-style address too.
7618
7619 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7620 index 706e12e..62e4feb 100644
7621 --- a/arch/x86/Kconfig.cpu
7622 +++ b/arch/x86/Kconfig.cpu
7623 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7624
7625 config X86_F00F_BUG
7626 def_bool y
7627 - depends on M586MMX || M586TSC || M586 || M486 || M386
7628 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7629
7630 config X86_INVD_BUG
7631 def_bool y
7632 @@ -358,7 +358,7 @@ config X86_POPAD_OK
7633
7634 config X86_ALIGNMENT_16
7635 def_bool y
7636 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7637 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7638
7639 config X86_INTEL_USERCOPY
7640 def_bool y
7641 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
7642 # generates cmov.
7643 config X86_CMOV
7644 def_bool y
7645 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7646 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7647
7648 config X86_MINIMUM_CPU_FAMILY
7649 int
7650 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7651 index e46c214..7c72b55 100644
7652 --- a/arch/x86/Kconfig.debug
7653 +++ b/arch/x86/Kconfig.debug
7654 @@ -84,7 +84,7 @@ config X86_PTDUMP
7655 config DEBUG_RODATA
7656 bool "Write protect kernel read-only data structures"
7657 default y
7658 - depends on DEBUG_KERNEL
7659 + depends on DEBUG_KERNEL && BROKEN
7660 ---help---
7661 Mark the kernel read-only data as write-protected in the pagetables,
7662 in order to catch accidental (and incorrect) writes to such const
7663 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7664
7665 config DEBUG_SET_MODULE_RONX
7666 bool "Set loadable kernel module data as NX and text as RO"
7667 - depends on MODULES
7668 + depends on MODULES && BROKEN
7669 ---help---
7670 This option helps catch unintended modifications to loadable
7671 kernel module's text and read-only data. It also prevents execution
7672 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7673 index b1c611e..2c1a823 100644
7674 --- a/arch/x86/Makefile
7675 +++ b/arch/x86/Makefile
7676 @@ -46,6 +46,7 @@ else
7677 UTS_MACHINE := x86_64
7678 CHECKFLAGS += -D__x86_64__ -m64
7679
7680 + biarch := $(call cc-option,-m64)
7681 KBUILD_AFLAGS += -m64
7682 KBUILD_CFLAGS += -m64
7683
7684 @@ -222,3 +223,12 @@ define archhelp
7685 echo ' FDARGS="..." arguments for the booted kernel'
7686 echo ' FDINITRD=file initrd for the booted kernel'
7687 endef
7688 +
7689 +define OLD_LD
7690 +
7691 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7692 +*** Please upgrade your binutils to 2.18 or newer
7693 +endef
7694 +
7695 +archprepare:
7696 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7697 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7698 index 5a747dd..ff7b12c 100644
7699 --- a/arch/x86/boot/Makefile
7700 +++ b/arch/x86/boot/Makefile
7701 @@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7702 $(call cc-option, -fno-stack-protector) \
7703 $(call cc-option, -mpreferred-stack-boundary=2)
7704 KBUILD_CFLAGS += $(call cc-option, -m32)
7705 +ifdef CONSTIFY_PLUGIN
7706 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7707 +endif
7708 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7709 GCOV_PROFILE := n
7710
7711 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7712 index 878e4b9..20537ab 100644
7713 --- a/arch/x86/boot/bitops.h
7714 +++ b/arch/x86/boot/bitops.h
7715 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7716 u8 v;
7717 const u32 *p = (const u32 *)addr;
7718
7719 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7720 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7721 return v;
7722 }
7723
7724 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7725
7726 static inline void set_bit(int nr, void *addr)
7727 {
7728 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7729 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7730 }
7731
7732 #endif /* BOOT_BITOPS_H */
7733 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7734 index 18997e5..83d9c67 100644
7735 --- a/arch/x86/boot/boot.h
7736 +++ b/arch/x86/boot/boot.h
7737 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7738 static inline u16 ds(void)
7739 {
7740 u16 seg;
7741 - asm("movw %%ds,%0" : "=rm" (seg));
7742 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7743 return seg;
7744 }
7745
7746 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7747 static inline int memcmp(const void *s1, const void *s2, size_t len)
7748 {
7749 u8 diff;
7750 - asm("repe; cmpsb; setnz %0"
7751 + asm volatile("repe; cmpsb; setnz %0"
7752 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7753 return diff;
7754 }
7755 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7756 index e398bb5..3a382ca 100644
7757 --- a/arch/x86/boot/compressed/Makefile
7758 +++ b/arch/x86/boot/compressed/Makefile
7759 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7760 KBUILD_CFLAGS += $(cflags-y)
7761 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7762 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7763 +ifdef CONSTIFY_PLUGIN
7764 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7765 +endif
7766
7767 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7768 GCOV_PROFILE := n
7769 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7770 index 0cdfc0d..6e79437 100644
7771 --- a/arch/x86/boot/compressed/eboot.c
7772 +++ b/arch/x86/boot/compressed/eboot.c
7773 @@ -122,7 +122,6 @@ again:
7774 *addr = max_addr;
7775 }
7776
7777 -free_pool:
7778 efi_call_phys1(sys_table->boottime->free_pool, map);
7779
7780 fail:
7781 @@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7782 if (i == map_size / desc_size)
7783 status = EFI_NOT_FOUND;
7784
7785 -free_pool:
7786 efi_call_phys1(sys_table->boottime->free_pool, map);
7787 fail:
7788 return status;
7789 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7790 index c85e3ac..6f5aa80 100644
7791 --- a/arch/x86/boot/compressed/head_32.S
7792 +++ b/arch/x86/boot/compressed/head_32.S
7793 @@ -106,7 +106,7 @@ preferred_addr:
7794 notl %eax
7795 andl %eax, %ebx
7796 #else
7797 - movl $LOAD_PHYSICAL_ADDR, %ebx
7798 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7799 #endif
7800
7801 /* Target address to relocate to for decompression */
7802 @@ -192,7 +192,7 @@ relocated:
7803 * and where it was actually loaded.
7804 */
7805 movl %ebp, %ebx
7806 - subl $LOAD_PHYSICAL_ADDR, %ebx
7807 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7808 jz 2f /* Nothing to be done if loaded at compiled addr. */
7809 /*
7810 * Process relocations.
7811 @@ -200,8 +200,7 @@ relocated:
7812
7813 1: subl $4, %edi
7814 movl (%edi), %ecx
7815 - testl %ecx, %ecx
7816 - jz 2f
7817 + jecxz 2f
7818 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7819 jmp 1b
7820 2:
7821 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7822 index 87e03a1..0d94c76 100644
7823 --- a/arch/x86/boot/compressed/head_64.S
7824 +++ b/arch/x86/boot/compressed/head_64.S
7825 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7826 notl %eax
7827 andl %eax, %ebx
7828 #else
7829 - movl $LOAD_PHYSICAL_ADDR, %ebx
7830 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7831 #endif
7832
7833 /* Target address to relocate to for decompression */
7834 @@ -263,7 +263,7 @@ preferred_addr:
7835 notq %rax
7836 andq %rax, %rbp
7837 #else
7838 - movq $LOAD_PHYSICAL_ADDR, %rbp
7839 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7840 #endif
7841
7842 /* Target address to relocate to for decompression */
7843 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7844 index 7116dcb..d9ae1d7 100644
7845 --- a/arch/x86/boot/compressed/misc.c
7846 +++ b/arch/x86/boot/compressed/misc.c
7847 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7848 case PT_LOAD:
7849 #ifdef CONFIG_RELOCATABLE
7850 dest = output;
7851 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7852 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7853 #else
7854 dest = (void *)(phdr->p_paddr);
7855 #endif
7856 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7857 error("Destination address too large");
7858 #endif
7859 #ifndef CONFIG_RELOCATABLE
7860 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7861 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7862 error("Wrong destination address");
7863 #endif
7864
7865 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7866 index 4d3ff03..e4972ff 100644
7867 --- a/arch/x86/boot/cpucheck.c
7868 +++ b/arch/x86/boot/cpucheck.c
7869 @@ -74,7 +74,7 @@ static int has_fpu(void)
7870 u16 fcw = -1, fsw = -1;
7871 u32 cr0;
7872
7873 - asm("movl %%cr0,%0" : "=r" (cr0));
7874 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7875 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7876 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7877 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7878 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7879 {
7880 u32 f0, f1;
7881
7882 - asm("pushfl ; "
7883 + asm volatile("pushfl ; "
7884 "pushfl ; "
7885 "popl %0 ; "
7886 "movl %0,%1 ; "
7887 @@ -115,7 +115,7 @@ static void get_flags(void)
7888 set_bit(X86_FEATURE_FPU, cpu.flags);
7889
7890 if (has_eflag(X86_EFLAGS_ID)) {
7891 - asm("cpuid"
7892 + asm volatile("cpuid"
7893 : "=a" (max_intel_level),
7894 "=b" (cpu_vendor[0]),
7895 "=d" (cpu_vendor[1]),
7896 @@ -124,7 +124,7 @@ static void get_flags(void)
7897
7898 if (max_intel_level >= 0x00000001 &&
7899 max_intel_level <= 0x0000ffff) {
7900 - asm("cpuid"
7901 + asm volatile("cpuid"
7902 : "=a" (tfms),
7903 "=c" (cpu.flags[4]),
7904 "=d" (cpu.flags[0])
7905 @@ -136,7 +136,7 @@ static void get_flags(void)
7906 cpu.model += ((tfms >> 16) & 0xf) << 4;
7907 }
7908
7909 - asm("cpuid"
7910 + asm volatile("cpuid"
7911 : "=a" (max_amd_level)
7912 : "a" (0x80000000)
7913 : "ebx", "ecx", "edx");
7914 @@ -144,7 +144,7 @@ static void get_flags(void)
7915 if (max_amd_level >= 0x80000001 &&
7916 max_amd_level <= 0x8000ffff) {
7917 u32 eax = 0x80000001;
7918 - asm("cpuid"
7919 + asm volatile("cpuid"
7920 : "+a" (eax),
7921 "=c" (cpu.flags[6]),
7922 "=d" (cpu.flags[1])
7923 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7924 u32 ecx = MSR_K7_HWCR;
7925 u32 eax, edx;
7926
7927 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7928 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7929 eax &= ~(1 << 15);
7930 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7931 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7932
7933 get_flags(); /* Make sure it really did something */
7934 err = check_flags();
7935 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7936 u32 ecx = MSR_VIA_FCR;
7937 u32 eax, edx;
7938
7939 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7940 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7941 eax |= (1<<1)|(1<<7);
7942 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7943 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7944
7945 set_bit(X86_FEATURE_CX8, cpu.flags);
7946 err = check_flags();
7947 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7948 u32 eax, edx;
7949 u32 level = 1;
7950
7951 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7952 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7953 - asm("cpuid"
7954 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7955 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7956 + asm volatile("cpuid"
7957 : "+a" (level), "=d" (cpu.flags[0])
7958 : : "ecx", "ebx");
7959 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7960 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7961
7962 err = check_flags();
7963 }
7964 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7965 index f1bbeeb..aff09cb 100644
7966 --- a/arch/x86/boot/header.S
7967 +++ b/arch/x86/boot/header.S
7968 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7969 # single linked list of
7970 # struct setup_data
7971
7972 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7973 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7974
7975 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7976 #define VO_INIT_SIZE (VO__end - VO__text)
7977 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7978 index db75d07..8e6d0af 100644
7979 --- a/arch/x86/boot/memory.c
7980 +++ b/arch/x86/boot/memory.c
7981 @@ -19,7 +19,7 @@
7982
7983 static int detect_memory_e820(void)
7984 {
7985 - int count = 0;
7986 + unsigned int count = 0;
7987 struct biosregs ireg, oreg;
7988 struct e820entry *desc = boot_params.e820_map;
7989 static struct e820entry buf; /* static so it is zeroed */
7990 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7991 index 11e8c6e..fdbb1ed 100644
7992 --- a/arch/x86/boot/video-vesa.c
7993 +++ b/arch/x86/boot/video-vesa.c
7994 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7995
7996 boot_params.screen_info.vesapm_seg = oreg.es;
7997 boot_params.screen_info.vesapm_off = oreg.di;
7998 + boot_params.screen_info.vesapm_size = oreg.cx;
7999 }
8000
8001 /*
8002 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8003 index 43eda28..5ab5fdb 100644
8004 --- a/arch/x86/boot/video.c
8005 +++ b/arch/x86/boot/video.c
8006 @@ -96,7 +96,7 @@ static void store_mode_params(void)
8007 static unsigned int get_entry(void)
8008 {
8009 char entry_buf[4];
8010 - int i, len = 0;
8011 + unsigned int i, len = 0;
8012 int key;
8013 unsigned int v;
8014
8015 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8016 index 5b577d5..3c1fed4 100644
8017 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8018 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8019 @@ -8,6 +8,8 @@
8020 * including this sentence is retained in full.
8021 */
8022
8023 +#include <asm/alternative-asm.h>
8024 +
8025 .extern crypto_ft_tab
8026 .extern crypto_it_tab
8027 .extern crypto_fl_tab
8028 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8029 je B192; \
8030 leaq 32(r9),r9;
8031
8032 +#define ret pax_force_retaddr 0, 1; ret
8033 +
8034 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8035 movq r1,r2; \
8036 movq r3,r4; \
8037 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8038 index be6d9e3..21fbbca 100644
8039 --- a/arch/x86/crypto/aesni-intel_asm.S
8040 +++ b/arch/x86/crypto/aesni-intel_asm.S
8041 @@ -31,6 +31,7 @@
8042
8043 #include <linux/linkage.h>
8044 #include <asm/inst.h>
8045 +#include <asm/alternative-asm.h>
8046
8047 #ifdef __x86_64__
8048 .data
8049 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8050 pop %r14
8051 pop %r13
8052 pop %r12
8053 + pax_force_retaddr 0, 1
8054 ret
8055 +ENDPROC(aesni_gcm_dec)
8056
8057
8058 /*****************************************************************************
8059 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8060 pop %r14
8061 pop %r13
8062 pop %r12
8063 + pax_force_retaddr 0, 1
8064 ret
8065 +ENDPROC(aesni_gcm_enc)
8066
8067 #endif
8068
8069 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
8070 pxor %xmm1, %xmm0
8071 movaps %xmm0, (TKEYP)
8072 add $0x10, TKEYP
8073 + pax_force_retaddr_bts
8074 ret
8075
8076 .align 4
8077 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
8078 shufps $0b01001110, %xmm2, %xmm1
8079 movaps %xmm1, 0x10(TKEYP)
8080 add $0x20, TKEYP
8081 + pax_force_retaddr_bts
8082 ret
8083
8084 .align 4
8085 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
8086
8087 movaps %xmm0, (TKEYP)
8088 add $0x10, TKEYP
8089 + pax_force_retaddr_bts
8090 ret
8091
8092 .align 4
8093 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
8094 pxor %xmm1, %xmm2
8095 movaps %xmm2, (TKEYP)
8096 add $0x10, TKEYP
8097 + pax_force_retaddr_bts
8098 ret
8099
8100 /*
8101 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8102 #ifndef __x86_64__
8103 popl KEYP
8104 #endif
8105 + pax_force_retaddr 0, 1
8106 ret
8107 +ENDPROC(aesni_set_key)
8108
8109 /*
8110 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8111 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8112 popl KLEN
8113 popl KEYP
8114 #endif
8115 + pax_force_retaddr 0, 1
8116 ret
8117 +ENDPROC(aesni_enc)
8118
8119 /*
8120 * _aesni_enc1: internal ABI
8121 @@ -1959,6 +1972,7 @@ _aesni_enc1:
8122 AESENC KEY STATE
8123 movaps 0x70(TKEYP), KEY
8124 AESENCLAST KEY STATE
8125 + pax_force_retaddr_bts
8126 ret
8127
8128 /*
8129 @@ -2067,6 +2081,7 @@ _aesni_enc4:
8130 AESENCLAST KEY STATE2
8131 AESENCLAST KEY STATE3
8132 AESENCLAST KEY STATE4
8133 + pax_force_retaddr_bts
8134 ret
8135
8136 /*
8137 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8138 popl KLEN
8139 popl KEYP
8140 #endif
8141 + pax_force_retaddr 0, 1
8142 ret
8143 +ENDPROC(aesni_dec)
8144
8145 /*
8146 * _aesni_dec1: internal ABI
8147 @@ -2146,6 +2163,7 @@ _aesni_dec1:
8148 AESDEC KEY STATE
8149 movaps 0x70(TKEYP), KEY
8150 AESDECLAST KEY STATE
8151 + pax_force_retaddr_bts
8152 ret
8153
8154 /*
8155 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8156 AESDECLAST KEY STATE2
8157 AESDECLAST KEY STATE3
8158 AESDECLAST KEY STATE4
8159 + pax_force_retaddr_bts
8160 ret
8161
8162 /*
8163 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8164 popl KEYP
8165 popl LEN
8166 #endif
8167 + pax_force_retaddr 0, 1
8168 ret
8169 +ENDPROC(aesni_ecb_enc)
8170
8171 /*
8172 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8173 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8174 popl KEYP
8175 popl LEN
8176 #endif
8177 + pax_force_retaddr 0, 1
8178 ret
8179 +ENDPROC(aesni_ecb_dec)
8180
8181 /*
8182 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8183 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8184 popl LEN
8185 popl IVP
8186 #endif
8187 + pax_force_retaddr 0, 1
8188 ret
8189 +ENDPROC(aesni_cbc_enc)
8190
8191 /*
8192 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8193 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
8194 popl LEN
8195 popl IVP
8196 #endif
8197 + pax_force_retaddr 0, 1
8198 ret
8199 +ENDPROC(aesni_cbc_dec)
8200
8201 #ifdef __x86_64__
8202 .align 16
8203 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
8204 mov $1, TCTR_LOW
8205 MOVQ_R64_XMM TCTR_LOW INC
8206 MOVQ_R64_XMM CTR TCTR_LOW
8207 + pax_force_retaddr_bts
8208 ret
8209
8210 /*
8211 @@ -2552,6 +2580,7 @@ _aesni_inc:
8212 .Linc_low:
8213 movaps CTR, IV
8214 PSHUFB_XMM BSWAP_MASK IV
8215 + pax_force_retaddr_bts
8216 ret
8217
8218 /*
8219 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
8220 .Lctr_enc_ret:
8221 movups IV, (IVP)
8222 .Lctr_enc_just_ret:
8223 + pax_force_retaddr 0, 1
8224 ret
8225 +ENDPROC(aesni_ctr_enc)
8226 #endif
8227 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8228 index 391d245..67f35c2 100644
8229 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8230 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8231 @@ -20,6 +20,8 @@
8232 *
8233 */
8234
8235 +#include <asm/alternative-asm.h>
8236 +
8237 .file "blowfish-x86_64-asm.S"
8238 .text
8239
8240 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8241 jnz __enc_xor;
8242
8243 write_block();
8244 + pax_force_retaddr 0, 1
8245 ret;
8246 __enc_xor:
8247 xor_block();
8248 + pax_force_retaddr 0, 1
8249 ret;
8250
8251 .align 8
8252 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8253
8254 movq %r11, %rbp;
8255
8256 + pax_force_retaddr 0, 1
8257 ret;
8258
8259 /**********************************************************************
8260 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8261
8262 popq %rbx;
8263 popq %rbp;
8264 + pax_force_retaddr 0, 1
8265 ret;
8266
8267 __enc_xor4:
8268 @@ -349,6 +355,7 @@ __enc_xor4:
8269
8270 popq %rbx;
8271 popq %rbp;
8272 + pax_force_retaddr 0, 1
8273 ret;
8274
8275 .align 8
8276 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8277 popq %rbx;
8278 popq %rbp;
8279
8280 + pax_force_retaddr 0, 1
8281 ret;
8282
8283 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8284 index 0b33743..7a56206 100644
8285 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8286 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8287 @@ -20,6 +20,8 @@
8288 *
8289 */
8290
8291 +#include <asm/alternative-asm.h>
8292 +
8293 .file "camellia-x86_64-asm_64.S"
8294 .text
8295
8296 @@ -229,12 +231,14 @@ __enc_done:
8297 enc_outunpack(mov, RT1);
8298
8299 movq RRBP, %rbp;
8300 + pax_force_retaddr 0, 1
8301 ret;
8302
8303 __enc_xor:
8304 enc_outunpack(xor, RT1);
8305
8306 movq RRBP, %rbp;
8307 + pax_force_retaddr 0, 1
8308 ret;
8309
8310 .global camellia_dec_blk;
8311 @@ -275,6 +279,7 @@ __dec_rounds16:
8312 dec_outunpack();
8313
8314 movq RRBP, %rbp;
8315 + pax_force_retaddr 0, 1
8316 ret;
8317
8318 /**********************************************************************
8319 @@ -468,6 +473,7 @@ __enc2_done:
8320
8321 movq RRBP, %rbp;
8322 popq %rbx;
8323 + pax_force_retaddr 0, 1
8324 ret;
8325
8326 __enc2_xor:
8327 @@ -475,6 +481,7 @@ __enc2_xor:
8328
8329 movq RRBP, %rbp;
8330 popq %rbx;
8331 + pax_force_retaddr 0, 1
8332 ret;
8333
8334 .global camellia_dec_blk_2way;
8335 @@ -517,4 +524,5 @@ __dec2_rounds16:
8336
8337 movq RRBP, %rbp;
8338 movq RXOR, %rbx;
8339 + pax_force_retaddr 0, 1
8340 ret;
8341 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8342 index 6214a9b..1f4fc9a 100644
8343 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8344 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8345 @@ -1,3 +1,5 @@
8346 +#include <asm/alternative-asm.h>
8347 +
8348 # enter ECRYPT_encrypt_bytes
8349 .text
8350 .p2align 5
8351 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8352 add %r11,%rsp
8353 mov %rdi,%rax
8354 mov %rsi,%rdx
8355 + pax_force_retaddr 0, 1
8356 ret
8357 # bytesatleast65:
8358 ._bytesatleast65:
8359 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8360 add %r11,%rsp
8361 mov %rdi,%rax
8362 mov %rsi,%rdx
8363 + pax_force_retaddr
8364 ret
8365 # enter ECRYPT_ivsetup
8366 .text
8367 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8368 add %r11,%rsp
8369 mov %rdi,%rax
8370 mov %rsi,%rdx
8371 + pax_force_retaddr
8372 ret
8373 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8374 index 3ee1ff0..cbc568b 100644
8375 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8376 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8377 @@ -24,6 +24,8 @@
8378 *
8379 */
8380
8381 +#include <asm/alternative-asm.h>
8382 +
8383 .file "serpent-sse2-x86_64-asm_64.S"
8384 .text
8385
8386 @@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8387 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8388 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8389
8390 + pax_force_retaddr
8391 ret;
8392
8393 __enc_xor8:
8394 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8395 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8396
8397 + pax_force_retaddr
8398 ret;
8399
8400 .align 8
8401 @@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8402 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8403 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8404
8405 + pax_force_retaddr
8406 ret;
8407 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8408 index b2c2f57..8470cab 100644
8409 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8410 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8411 @@ -28,6 +28,8 @@
8412 * (at your option) any later version.
8413 */
8414
8415 +#include <asm/alternative-asm.h>
8416 +
8417 #define CTX %rdi // arg1
8418 #define BUF %rsi // arg2
8419 #define CNT %rdx // arg3
8420 @@ -104,6 +106,7 @@
8421 pop %r12
8422 pop %rbp
8423 pop %rbx
8424 + pax_force_retaddr 0, 1
8425 ret
8426
8427 .size \name, .-\name
8428 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8429 index 5b012a2..36d5364 100644
8430 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8431 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8432 @@ -20,6 +20,8 @@
8433 *
8434 */
8435
8436 +#include <asm/alternative-asm.h>
8437 +
8438 .file "twofish-x86_64-asm-3way.S"
8439 .text
8440
8441 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8442 popq %r13;
8443 popq %r14;
8444 popq %r15;
8445 + pax_force_retaddr 0, 1
8446 ret;
8447
8448 __enc_xor3:
8449 @@ -271,6 +274,7 @@ __enc_xor3:
8450 popq %r13;
8451 popq %r14;
8452 popq %r15;
8453 + pax_force_retaddr 0, 1
8454 ret;
8455
8456 .global twofish_dec_blk_3way
8457 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8458 popq %r13;
8459 popq %r14;
8460 popq %r15;
8461 + pax_force_retaddr 0, 1
8462 ret;
8463
8464 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8465 index 7bcf3fc..f53832f 100644
8466 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8467 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8468 @@ -21,6 +21,7 @@
8469 .text
8470
8471 #include <asm/asm-offsets.h>
8472 +#include <asm/alternative-asm.h>
8473
8474 #define a_offset 0
8475 #define b_offset 4
8476 @@ -268,6 +269,7 @@ twofish_enc_blk:
8477
8478 popq R1
8479 movq $1,%rax
8480 + pax_force_retaddr 0, 1
8481 ret
8482
8483 twofish_dec_blk:
8484 @@ -319,4 +321,5 @@ twofish_dec_blk:
8485
8486 popq R1
8487 movq $1,%rax
8488 + pax_force_retaddr 0, 1
8489 ret
8490 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8491 index 07b3a68..bd2a388 100644
8492 --- a/arch/x86/ia32/ia32_aout.c
8493 +++ b/arch/x86/ia32/ia32_aout.c
8494 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8495 unsigned long dump_start, dump_size;
8496 struct user32 dump;
8497
8498 + memset(&dump, 0, sizeof(dump));
8499 +
8500 fs = get_fs();
8501 set_fs(KERNEL_DS);
8502 has_dumped = 1;
8503 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8504 index a69245b..6d145f4 100644
8505 --- a/arch/x86/ia32/ia32_signal.c
8506 +++ b/arch/x86/ia32/ia32_signal.c
8507 @@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8508 }
8509 seg = get_fs();
8510 set_fs(KERNEL_DS);
8511 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8512 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8513 set_fs(seg);
8514 if (ret >= 0 && uoss_ptr) {
8515 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8516 @@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8517 */
8518 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8519 size_t frame_size,
8520 - void **fpstate)
8521 + void __user **fpstate)
8522 {
8523 unsigned long sp;
8524
8525 @@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8526
8527 if (used_math()) {
8528 sp = sp - sig_xstate_ia32_size;
8529 - *fpstate = (struct _fpstate_ia32 *) sp;
8530 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8531 if (save_i387_xstate_ia32(*fpstate) < 0)
8532 return (void __user *) -1L;
8533 }
8534 @@ -398,7 +398,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8535 sp -= frame_size;
8536 /* Align the stack pointer according to the i386 ABI,
8537 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8538 - sp = ((sp + 4) & -16ul) - 4;
8539 + sp = ((sp - 12) & -16ul) - 4;
8540 return (void __user *) sp;
8541 }
8542
8543 @@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8544 * These are actually not used anymore, but left because some
8545 * gdb versions depend on them as a marker.
8546 */
8547 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8548 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8549 } put_user_catch(err);
8550
8551 if (err)
8552 @@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8553 0xb8,
8554 __NR_ia32_rt_sigreturn,
8555 0x80cd,
8556 - 0,
8557 + 0
8558 };
8559
8560 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8561 @@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8562
8563 if (ka->sa.sa_flags & SA_RESTORER)
8564 restorer = ka->sa.sa_restorer;
8565 + else if (current->mm->context.vdso)
8566 + /* Return stub is in 32bit vsyscall page */
8567 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8568 else
8569 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8570 - rt_sigreturn);
8571 + restorer = &frame->retcode;
8572 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8573
8574 /*
8575 * Not actually used anymore, but left because some gdb
8576 * versions need it.
8577 */
8578 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8579 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8580 } put_user_catch(err);
8581
8582 if (err)
8583 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8584 index e3e7340..05ed805 100644
8585 --- a/arch/x86/ia32/ia32entry.S
8586 +++ b/arch/x86/ia32/ia32entry.S
8587 @@ -13,8 +13,10 @@
8588 #include <asm/thread_info.h>
8589 #include <asm/segment.h>
8590 #include <asm/irqflags.h>
8591 +#include <asm/pgtable.h>
8592 #include <linux/linkage.h>
8593 #include <linux/err.h>
8594 +#include <asm/alternative-asm.h>
8595
8596 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8597 #include <linux/elf-em.h>
8598 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8599 ENDPROC(native_irq_enable_sysexit)
8600 #endif
8601
8602 + .macro pax_enter_kernel_user
8603 + pax_set_fptr_mask
8604 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8605 + call pax_enter_kernel_user
8606 +#endif
8607 + .endm
8608 +
8609 + .macro pax_exit_kernel_user
8610 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8611 + call pax_exit_kernel_user
8612 +#endif
8613 +#ifdef CONFIG_PAX_RANDKSTACK
8614 + pushq %rax
8615 + pushq %r11
8616 + call pax_randomize_kstack
8617 + popq %r11
8618 + popq %rax
8619 +#endif
8620 + .endm
8621 +
8622 +.macro pax_erase_kstack
8623 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8624 + call pax_erase_kstack
8625 +#endif
8626 +.endm
8627 +
8628 /*
8629 * 32bit SYSENTER instruction entry.
8630 *
8631 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8632 CFI_REGISTER rsp,rbp
8633 SWAPGS_UNSAFE_STACK
8634 movq PER_CPU_VAR(kernel_stack), %rsp
8635 - addq $(KERNEL_STACK_OFFSET),%rsp
8636 - /*
8637 - * No need to follow this irqs on/off section: the syscall
8638 - * disabled irqs, here we enable it straight after entry:
8639 - */
8640 - ENABLE_INTERRUPTS(CLBR_NONE)
8641 movl %ebp,%ebp /* zero extension */
8642 pushq_cfi $__USER32_DS
8643 /*CFI_REL_OFFSET ss,0*/
8644 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8645 CFI_REL_OFFSET rsp,0
8646 pushfq_cfi
8647 /*CFI_REL_OFFSET rflags,0*/
8648 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8649 - CFI_REGISTER rip,r10
8650 + orl $X86_EFLAGS_IF,(%rsp)
8651 + GET_THREAD_INFO(%r11)
8652 + movl TI_sysenter_return(%r11), %r11d
8653 + CFI_REGISTER rip,r11
8654 pushq_cfi $__USER32_CS
8655 /*CFI_REL_OFFSET cs,0*/
8656 movl %eax, %eax
8657 - pushq_cfi %r10
8658 + pushq_cfi %r11
8659 CFI_REL_OFFSET rip,0
8660 pushq_cfi %rax
8661 cld
8662 SAVE_ARGS 0,1,0
8663 + pax_enter_kernel_user
8664 + /*
8665 + * No need to follow this irqs on/off section: the syscall
8666 + * disabled irqs, here we enable it straight after entry:
8667 + */
8668 + ENABLE_INTERRUPTS(CLBR_NONE)
8669 /* no need to do an access_ok check here because rbp has been
8670 32bit zero extended */
8671 +
8672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8673 + mov $PAX_USER_SHADOW_BASE,%r11
8674 + add %r11,%rbp
8675 +#endif
8676 +
8677 1: movl (%rbp),%ebp
8678 .section __ex_table,"a"
8679 .quad 1b,ia32_badarg
8680 .previous
8681 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8682 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8683 + GET_THREAD_INFO(%r11)
8684 + orl $TS_COMPAT,TI_status(%r11)
8685 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8686 CFI_REMEMBER_STATE
8687 jnz sysenter_tracesys
8688 cmpq $(IA32_NR_syscalls-1),%rax
8689 @@ -160,12 +197,15 @@ sysenter_do_call:
8690 sysenter_dispatch:
8691 call *ia32_sys_call_table(,%rax,8)
8692 movq %rax,RAX-ARGOFFSET(%rsp)
8693 + GET_THREAD_INFO(%r11)
8694 DISABLE_INTERRUPTS(CLBR_NONE)
8695 TRACE_IRQS_OFF
8696 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8697 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8698 jnz sysexit_audit
8699 sysexit_from_sys_call:
8700 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8701 + pax_exit_kernel_user
8702 + pax_erase_kstack
8703 + andl $~TS_COMPAT,TI_status(%r11)
8704 /* clear IF, that popfq doesn't enable interrupts early */
8705 andl $~0x200,EFLAGS-R11(%rsp)
8706 movl RIP-R11(%rsp),%edx /* User %eip */
8707 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8708 movl %eax,%esi /* 2nd arg: syscall number */
8709 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8710 call __audit_syscall_entry
8711 +
8712 + pax_erase_kstack
8713 +
8714 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8715 cmpq $(IA32_NR_syscalls-1),%rax
8716 ja ia32_badsys
8717 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8718 .endm
8719
8720 .macro auditsys_exit exit
8721 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8722 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8723 jnz ia32_ret_from_sys_call
8724 TRACE_IRQS_ON
8725 sti
8726 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8727 1: setbe %al /* 1 if error, 0 if not */
8728 movzbl %al,%edi /* zero-extend that into %edi */
8729 call __audit_syscall_exit
8730 + GET_THREAD_INFO(%r11)
8731 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8732 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8733 cli
8734 TRACE_IRQS_OFF
8735 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8736 + testl %edi,TI_flags(%r11)
8737 jz \exit
8738 CLEAR_RREGS -ARGOFFSET
8739 jmp int_with_check
8740 @@ -235,7 +279,7 @@ sysexit_audit:
8741
8742 sysenter_tracesys:
8743 #ifdef CONFIG_AUDITSYSCALL
8744 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8745 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8746 jz sysenter_auditsys
8747 #endif
8748 SAVE_REST
8749 @@ -243,6 +287,9 @@ sysenter_tracesys:
8750 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8751 movq %rsp,%rdi /* &pt_regs -> arg1 */
8752 call syscall_trace_enter
8753 +
8754 + pax_erase_kstack
8755 +
8756 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8757 RESTORE_REST
8758 cmpq $(IA32_NR_syscalls-1),%rax
8759 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8760 ENTRY(ia32_cstar_target)
8761 CFI_STARTPROC32 simple
8762 CFI_SIGNAL_FRAME
8763 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8764 + CFI_DEF_CFA rsp,0
8765 CFI_REGISTER rip,rcx
8766 /*CFI_REGISTER rflags,r11*/
8767 SWAPGS_UNSAFE_STACK
8768 movl %esp,%r8d
8769 CFI_REGISTER rsp,r8
8770 movq PER_CPU_VAR(kernel_stack),%rsp
8771 + SAVE_ARGS 8*6,0,0
8772 + pax_enter_kernel_user
8773 /*
8774 * No need to follow this irqs on/off section: the syscall
8775 * disabled irqs and here we enable it straight after entry:
8776 */
8777 ENABLE_INTERRUPTS(CLBR_NONE)
8778 - SAVE_ARGS 8,0,0
8779 movl %eax,%eax /* zero extension */
8780 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8781 movq %rcx,RIP-ARGOFFSET(%rsp)
8782 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8783 /* no need to do an access_ok check here because r8 has been
8784 32bit zero extended */
8785 /* hardware stack frame is complete now */
8786 +
8787 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8788 + mov $PAX_USER_SHADOW_BASE,%r11
8789 + add %r11,%r8
8790 +#endif
8791 +
8792 1: movl (%r8),%r9d
8793 .section __ex_table,"a"
8794 .quad 1b,ia32_badarg
8795 .previous
8796 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8797 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8798 + GET_THREAD_INFO(%r11)
8799 + orl $TS_COMPAT,TI_status(%r11)
8800 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8801 CFI_REMEMBER_STATE
8802 jnz cstar_tracesys
8803 cmpq $IA32_NR_syscalls-1,%rax
8804 @@ -317,12 +372,15 @@ cstar_do_call:
8805 cstar_dispatch:
8806 call *ia32_sys_call_table(,%rax,8)
8807 movq %rax,RAX-ARGOFFSET(%rsp)
8808 + GET_THREAD_INFO(%r11)
8809 DISABLE_INTERRUPTS(CLBR_NONE)
8810 TRACE_IRQS_OFF
8811 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8812 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8813 jnz sysretl_audit
8814 sysretl_from_sys_call:
8815 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8816 + pax_exit_kernel_user
8817 + pax_erase_kstack
8818 + andl $~TS_COMPAT,TI_status(%r11)
8819 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8820 movl RIP-ARGOFFSET(%rsp),%ecx
8821 CFI_REGISTER rip,rcx
8822 @@ -350,7 +408,7 @@ sysretl_audit:
8823
8824 cstar_tracesys:
8825 #ifdef CONFIG_AUDITSYSCALL
8826 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8827 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8828 jz cstar_auditsys
8829 #endif
8830 xchgl %r9d,%ebp
8831 @@ -359,6 +417,9 @@ cstar_tracesys:
8832 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8833 movq %rsp,%rdi /* &pt_regs -> arg1 */
8834 call syscall_trace_enter
8835 +
8836 + pax_erase_kstack
8837 +
8838 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8839 RESTORE_REST
8840 xchgl %ebp,%r9d
8841 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8842 CFI_REL_OFFSET rip,RIP-RIP
8843 PARAVIRT_ADJUST_EXCEPTION_FRAME
8844 SWAPGS
8845 - /*
8846 - * No need to follow this irqs on/off section: the syscall
8847 - * disabled irqs and here we enable it straight after entry:
8848 - */
8849 - ENABLE_INTERRUPTS(CLBR_NONE)
8850 movl %eax,%eax
8851 pushq_cfi %rax
8852 cld
8853 /* note the registers are not zero extended to the sf.
8854 this could be a problem. */
8855 SAVE_ARGS 0,1,0
8856 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8857 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8858 + pax_enter_kernel_user
8859 + /*
8860 + * No need to follow this irqs on/off section: the syscall
8861 + * disabled irqs and here we enable it straight after entry:
8862 + */
8863 + ENABLE_INTERRUPTS(CLBR_NONE)
8864 + GET_THREAD_INFO(%r11)
8865 + orl $TS_COMPAT,TI_status(%r11)
8866 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8867 jnz ia32_tracesys
8868 cmpq $(IA32_NR_syscalls-1),%rax
8869 ja ia32_badsys
8870 @@ -435,6 +498,9 @@ ia32_tracesys:
8871 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8872 movq %rsp,%rdi /* &pt_regs -> arg1 */
8873 call syscall_trace_enter
8874 +
8875 + pax_erase_kstack
8876 +
8877 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8878 RESTORE_REST
8879 cmpq $(IA32_NR_syscalls-1),%rax
8880 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8881 index aec2202..f76174e 100644
8882 --- a/arch/x86/ia32/sys_ia32.c
8883 +++ b/arch/x86/ia32/sys_ia32.c
8884 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8885 */
8886 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8887 {
8888 - typeof(ubuf->st_uid) uid = 0;
8889 - typeof(ubuf->st_gid) gid = 0;
8890 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8891 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8892 SET_UID(uid, stat->uid);
8893 SET_GID(gid, stat->gid);
8894 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8895 @@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8896 return alarm_setitimer(seconds);
8897 }
8898
8899 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8900 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8901 int options)
8902 {
8903 return compat_sys_wait4(pid, stat_addr, options, NULL);
8904 @@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8905 mm_segment_t old_fs = get_fs();
8906
8907 set_fs(KERNEL_DS);
8908 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8909 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8910 set_fs(old_fs);
8911 if (put_compat_timespec(&t, interval))
8912 return -EFAULT;
8913 @@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8914 mm_segment_t old_fs = get_fs();
8915
8916 set_fs(KERNEL_DS);
8917 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8918 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8919 set_fs(old_fs);
8920 if (!ret) {
8921 switch (_NSIG_WORDS) {
8922 @@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8923 if (copy_siginfo_from_user32(&info, uinfo))
8924 return -EFAULT;
8925 set_fs(KERNEL_DS);
8926 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8927 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8928 set_fs(old_fs);
8929 return ret;
8930 }
8931 @@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8932 return -EFAULT;
8933
8934 set_fs(KERNEL_DS);
8935 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8936 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8937 count);
8938 set_fs(old_fs);
8939
8940 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8941 index 952bd01..7692c6f 100644
8942 --- a/arch/x86/include/asm/alternative-asm.h
8943 +++ b/arch/x86/include/asm/alternative-asm.h
8944 @@ -15,6 +15,45 @@
8945 .endm
8946 #endif
8947
8948 +#ifdef KERNEXEC_PLUGIN
8949 + .macro pax_force_retaddr_bts rip=0
8950 + btsq $63,\rip(%rsp)
8951 + .endm
8952 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8953 + .macro pax_force_retaddr rip=0, reload=0
8954 + btsq $63,\rip(%rsp)
8955 + .endm
8956 + .macro pax_force_fptr ptr
8957 + btsq $63,\ptr
8958 + .endm
8959 + .macro pax_set_fptr_mask
8960 + .endm
8961 +#endif
8962 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8963 + .macro pax_force_retaddr rip=0, reload=0
8964 + .if \reload
8965 + pax_set_fptr_mask
8966 + .endif
8967 + orq %r10,\rip(%rsp)
8968 + .endm
8969 + .macro pax_force_fptr ptr
8970 + orq %r10,\ptr
8971 + .endm
8972 + .macro pax_set_fptr_mask
8973 + movabs $0x8000000000000000,%r10
8974 + .endm
8975 +#endif
8976 +#else
8977 + .macro pax_force_retaddr rip=0, reload=0
8978 + .endm
8979 + .macro pax_force_fptr ptr
8980 + .endm
8981 + .macro pax_force_retaddr_bts rip=0
8982 + .endm
8983 + .macro pax_set_fptr_mask
8984 + .endm
8985 +#endif
8986 +
8987 .macro altinstruction_entry orig alt feature orig_len alt_len
8988 .long \orig - .
8989 .long \alt - .
8990 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8991 index 49331be..9706065 100644
8992 --- a/arch/x86/include/asm/alternative.h
8993 +++ b/arch/x86/include/asm/alternative.h
8994 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8995 ".section .discard,\"aw\",@progbits\n" \
8996 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8997 ".previous\n" \
8998 - ".section .altinstr_replacement, \"ax\"\n" \
8999 + ".section .altinstr_replacement, \"a\"\n" \
9000 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9001 ".previous"
9002
9003 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9004 index d854101..f6ea947 100644
9005 --- a/arch/x86/include/asm/apic.h
9006 +++ b/arch/x86/include/asm/apic.h
9007 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9008
9009 #ifdef CONFIG_X86_LOCAL_APIC
9010
9011 -extern unsigned int apic_verbosity;
9012 +extern int apic_verbosity;
9013 extern int local_apic_timer_c2_ok;
9014
9015 extern int disable_apic;
9016 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9017 index 20370c6..a2eb9b0 100644
9018 --- a/arch/x86/include/asm/apm.h
9019 +++ b/arch/x86/include/asm/apm.h
9020 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9021 __asm__ __volatile__(APM_DO_ZERO_SEGS
9022 "pushl %%edi\n\t"
9023 "pushl %%ebp\n\t"
9024 - "lcall *%%cs:apm_bios_entry\n\t"
9025 + "lcall *%%ss:apm_bios_entry\n\t"
9026 "setc %%al\n\t"
9027 "popl %%ebp\n\t"
9028 "popl %%edi\n\t"
9029 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9030 __asm__ __volatile__(APM_DO_ZERO_SEGS
9031 "pushl %%edi\n\t"
9032 "pushl %%ebp\n\t"
9033 - "lcall *%%cs:apm_bios_entry\n\t"
9034 + "lcall *%%ss:apm_bios_entry\n\t"
9035 "setc %%bl\n\t"
9036 "popl %%ebp\n\t"
9037 "popl %%edi\n\t"
9038 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9039 index 58cb6d4..ca9010d 100644
9040 --- a/arch/x86/include/asm/atomic.h
9041 +++ b/arch/x86/include/asm/atomic.h
9042 @@ -22,7 +22,18 @@
9043 */
9044 static inline int atomic_read(const atomic_t *v)
9045 {
9046 - return (*(volatile int *)&(v)->counter);
9047 + return (*(volatile const int *)&(v)->counter);
9048 +}
9049 +
9050 +/**
9051 + * atomic_read_unchecked - read atomic variable
9052 + * @v: pointer of type atomic_unchecked_t
9053 + *
9054 + * Atomically reads the value of @v.
9055 + */
9056 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9057 +{
9058 + return (*(volatile const int *)&(v)->counter);
9059 }
9060
9061 /**
9062 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9063 }
9064
9065 /**
9066 + * atomic_set_unchecked - set atomic variable
9067 + * @v: pointer of type atomic_unchecked_t
9068 + * @i: required value
9069 + *
9070 + * Atomically sets the value of @v to @i.
9071 + */
9072 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9073 +{
9074 + v->counter = i;
9075 +}
9076 +
9077 +/**
9078 * atomic_add - add integer to atomic variable
9079 * @i: integer value to add
9080 * @v: pointer of type atomic_t
9081 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9082 */
9083 static inline void atomic_add(int i, atomic_t *v)
9084 {
9085 - asm volatile(LOCK_PREFIX "addl %1,%0"
9086 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9087 +
9088 +#ifdef CONFIG_PAX_REFCOUNT
9089 + "jno 0f\n"
9090 + LOCK_PREFIX "subl %1,%0\n"
9091 + "int $4\n0:\n"
9092 + _ASM_EXTABLE(0b, 0b)
9093 +#endif
9094 +
9095 + : "+m" (v->counter)
9096 + : "ir" (i));
9097 +}
9098 +
9099 +/**
9100 + * atomic_add_unchecked - add integer to atomic variable
9101 + * @i: integer value to add
9102 + * @v: pointer of type atomic_unchecked_t
9103 + *
9104 + * Atomically adds @i to @v.
9105 + */
9106 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9107 +{
9108 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9109 : "+m" (v->counter)
9110 : "ir" (i));
9111 }
9112 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9113 */
9114 static inline void atomic_sub(int i, atomic_t *v)
9115 {
9116 - asm volatile(LOCK_PREFIX "subl %1,%0"
9117 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9118 +
9119 +#ifdef CONFIG_PAX_REFCOUNT
9120 + "jno 0f\n"
9121 + LOCK_PREFIX "addl %1,%0\n"
9122 + "int $4\n0:\n"
9123 + _ASM_EXTABLE(0b, 0b)
9124 +#endif
9125 +
9126 + : "+m" (v->counter)
9127 + : "ir" (i));
9128 +}
9129 +
9130 +/**
9131 + * atomic_sub_unchecked - subtract integer from atomic variable
9132 + * @i: integer value to subtract
9133 + * @v: pointer of type atomic_unchecked_t
9134 + *
9135 + * Atomically subtracts @i from @v.
9136 + */
9137 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9138 +{
9139 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9140 : "+m" (v->counter)
9141 : "ir" (i));
9142 }
9143 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9144 {
9145 unsigned char c;
9146
9147 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9148 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9149 +
9150 +#ifdef CONFIG_PAX_REFCOUNT
9151 + "jno 0f\n"
9152 + LOCK_PREFIX "addl %2,%0\n"
9153 + "int $4\n0:\n"
9154 + _ASM_EXTABLE(0b, 0b)
9155 +#endif
9156 +
9157 + "sete %1\n"
9158 : "+m" (v->counter), "=qm" (c)
9159 : "ir" (i) : "memory");
9160 return c;
9161 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9162 */
9163 static inline void atomic_inc(atomic_t *v)
9164 {
9165 - asm volatile(LOCK_PREFIX "incl %0"
9166 + asm volatile(LOCK_PREFIX "incl %0\n"
9167 +
9168 +#ifdef CONFIG_PAX_REFCOUNT
9169 + "jno 0f\n"
9170 + LOCK_PREFIX "decl %0\n"
9171 + "int $4\n0:\n"
9172 + _ASM_EXTABLE(0b, 0b)
9173 +#endif
9174 +
9175 + : "+m" (v->counter));
9176 +}
9177 +
9178 +/**
9179 + * atomic_inc_unchecked - increment atomic variable
9180 + * @v: pointer of type atomic_unchecked_t
9181 + *
9182 + * Atomically increments @v by 1.
9183 + */
9184 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9185 +{
9186 + asm volatile(LOCK_PREFIX "incl %0\n"
9187 : "+m" (v->counter));
9188 }
9189
9190 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9191 */
9192 static inline void atomic_dec(atomic_t *v)
9193 {
9194 - asm volatile(LOCK_PREFIX "decl %0"
9195 + asm volatile(LOCK_PREFIX "decl %0\n"
9196 +
9197 +#ifdef CONFIG_PAX_REFCOUNT
9198 + "jno 0f\n"
9199 + LOCK_PREFIX "incl %0\n"
9200 + "int $4\n0:\n"
9201 + _ASM_EXTABLE(0b, 0b)
9202 +#endif
9203 +
9204 + : "+m" (v->counter));
9205 +}
9206 +
9207 +/**
9208 + * atomic_dec_unchecked - decrement atomic variable
9209 + * @v: pointer of type atomic_unchecked_t
9210 + *
9211 + * Atomically decrements @v by 1.
9212 + */
9213 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9214 +{
9215 + asm volatile(LOCK_PREFIX "decl %0\n"
9216 : "+m" (v->counter));
9217 }
9218
9219 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9220 {
9221 unsigned char c;
9222
9223 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9224 + asm volatile(LOCK_PREFIX "decl %0\n"
9225 +
9226 +#ifdef CONFIG_PAX_REFCOUNT
9227 + "jno 0f\n"
9228 + LOCK_PREFIX "incl %0\n"
9229 + "int $4\n0:\n"
9230 + _ASM_EXTABLE(0b, 0b)
9231 +#endif
9232 +
9233 + "sete %1\n"
9234 : "+m" (v->counter), "=qm" (c)
9235 : : "memory");
9236 return c != 0;
9237 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9238 {
9239 unsigned char c;
9240
9241 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9242 + asm volatile(LOCK_PREFIX "incl %0\n"
9243 +
9244 +#ifdef CONFIG_PAX_REFCOUNT
9245 + "jno 0f\n"
9246 + LOCK_PREFIX "decl %0\n"
9247 + "int $4\n0:\n"
9248 + _ASM_EXTABLE(0b, 0b)
9249 +#endif
9250 +
9251 + "sete %1\n"
9252 + : "+m" (v->counter), "=qm" (c)
9253 + : : "memory");
9254 + return c != 0;
9255 +}
9256 +
9257 +/**
9258 + * atomic_inc_and_test_unchecked - increment and test
9259 + * @v: pointer of type atomic_unchecked_t
9260 + *
9261 + * Atomically increments @v by 1
9262 + * and returns true if the result is zero, or false for all
9263 + * other cases.
9264 + */
9265 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9266 +{
9267 + unsigned char c;
9268 +
9269 + asm volatile(LOCK_PREFIX "incl %0\n"
9270 + "sete %1\n"
9271 : "+m" (v->counter), "=qm" (c)
9272 : : "memory");
9273 return c != 0;
9274 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9275 {
9276 unsigned char c;
9277
9278 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9279 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9280 +
9281 +#ifdef CONFIG_PAX_REFCOUNT
9282 + "jno 0f\n"
9283 + LOCK_PREFIX "subl %2,%0\n"
9284 + "int $4\n0:\n"
9285 + _ASM_EXTABLE(0b, 0b)
9286 +#endif
9287 +
9288 + "sets %1\n"
9289 : "+m" (v->counter), "=qm" (c)
9290 : "ir" (i) : "memory");
9291 return c;
9292 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9293 goto no_xadd;
9294 #endif
9295 /* Modern 486+ processor */
9296 - return i + xadd(&v->counter, i);
9297 + return i + xadd_check_overflow(&v->counter, i);
9298
9299 #ifdef CONFIG_M386
9300 no_xadd: /* Legacy 386 processor */
9301 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9302 }
9303
9304 /**
9305 + * atomic_add_return_unchecked - add integer and return
9306 + * @i: integer value to add
9307 + * @v: pointer of type atomic_unchecked_t
9308 + *
9309 + * Atomically adds @i to @v and returns @i + @v
9310 + */
9311 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9312 +{
9313 +#ifdef CONFIG_M386
9314 + int __i;
9315 + unsigned long flags;
9316 + if (unlikely(boot_cpu_data.x86 <= 3))
9317 + goto no_xadd;
9318 +#endif
9319 + /* Modern 486+ processor */
9320 + return i + xadd(&v->counter, i);
9321 +
9322 +#ifdef CONFIG_M386
9323 +no_xadd: /* Legacy 386 processor */
9324 + raw_local_irq_save(flags);
9325 + __i = atomic_read_unchecked(v);
9326 + atomic_set_unchecked(v, i + __i);
9327 + raw_local_irq_restore(flags);
9328 + return i + __i;
9329 +#endif
9330 +}
9331 +
9332 +/**
9333 * atomic_sub_return - subtract integer and return
9334 * @v: pointer of type atomic_t
9335 * @i: integer value to subtract
9336 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9337 }
9338
9339 #define atomic_inc_return(v) (atomic_add_return(1, v))
9340 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9341 +{
9342 + return atomic_add_return_unchecked(1, v);
9343 +}
9344 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9345
9346 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9347 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9348 return cmpxchg(&v->counter, old, new);
9349 }
9350
9351 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9352 +{
9353 + return cmpxchg(&v->counter, old, new);
9354 +}
9355 +
9356 static inline int atomic_xchg(atomic_t *v, int new)
9357 {
9358 return xchg(&v->counter, new);
9359 }
9360
9361 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9362 +{
9363 + return xchg(&v->counter, new);
9364 +}
9365 +
9366 /**
9367 * __atomic_add_unless - add unless the number is already a given value
9368 * @v: pointer of type atomic_t
9369 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9370 */
9371 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9372 {
9373 - int c, old;
9374 + int c, old, new;
9375 c = atomic_read(v);
9376 for (;;) {
9377 - if (unlikely(c == (u)))
9378 + if (unlikely(c == u))
9379 break;
9380 - old = atomic_cmpxchg((v), c, c + (a));
9381 +
9382 + asm volatile("addl %2,%0\n"
9383 +
9384 +#ifdef CONFIG_PAX_REFCOUNT
9385 + "jno 0f\n"
9386 + "subl %2,%0\n"
9387 + "int $4\n0:\n"
9388 + _ASM_EXTABLE(0b, 0b)
9389 +#endif
9390 +
9391 + : "=r" (new)
9392 + : "0" (c), "ir" (a));
9393 +
9394 + old = atomic_cmpxchg(v, c, new);
9395 if (likely(old == c))
9396 break;
9397 c = old;
9398 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9399 return c;
9400 }
9401
9402 +/**
9403 + * atomic_inc_not_zero_hint - increment if not null
9404 + * @v: pointer of type atomic_t
9405 + * @hint: probable value of the atomic before the increment
9406 + *
9407 + * This version of atomic_inc_not_zero() gives a hint of probable
9408 + * value of the atomic. This helps processor to not read the memory
9409 + * before doing the atomic read/modify/write cycle, lowering
9410 + * number of bus transactions on some arches.
9411 + *
9412 + * Returns: 0 if increment was not done, 1 otherwise.
9413 + */
9414 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9415 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9416 +{
9417 + int val, c = hint, new;
9418 +
9419 + /* sanity test, should be removed by compiler if hint is a constant */
9420 + if (!hint)
9421 + return __atomic_add_unless(v, 1, 0);
9422 +
9423 + do {
9424 + asm volatile("incl %0\n"
9425 +
9426 +#ifdef CONFIG_PAX_REFCOUNT
9427 + "jno 0f\n"
9428 + "decl %0\n"
9429 + "int $4\n0:\n"
9430 + _ASM_EXTABLE(0b, 0b)
9431 +#endif
9432 +
9433 + : "=r" (new)
9434 + : "0" (c));
9435 +
9436 + val = atomic_cmpxchg(v, c, new);
9437 + if (val == c)
9438 + return 1;
9439 + c = val;
9440 + } while (c);
9441 +
9442 + return 0;
9443 +}
9444
9445 /*
9446 * atomic_dec_if_positive - decrement by 1 if old value positive
9447 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9448 index 1981199..36b9dfb 100644
9449 --- a/arch/x86/include/asm/atomic64_32.h
9450 +++ b/arch/x86/include/asm/atomic64_32.h
9451 @@ -12,6 +12,14 @@ typedef struct {
9452 u64 __aligned(8) counter;
9453 } atomic64_t;
9454
9455 +#ifdef CONFIG_PAX_REFCOUNT
9456 +typedef struct {
9457 + u64 __aligned(8) counter;
9458 +} atomic64_unchecked_t;
9459 +#else
9460 +typedef atomic64_t atomic64_unchecked_t;
9461 +#endif
9462 +
9463 #define ATOMIC64_INIT(val) { (val) }
9464
9465 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9466 @@ -37,21 +45,31 @@ typedef struct {
9467 ATOMIC64_DECL_ONE(sym##_386)
9468
9469 ATOMIC64_DECL_ONE(add_386);
9470 +ATOMIC64_DECL_ONE(add_unchecked_386);
9471 ATOMIC64_DECL_ONE(sub_386);
9472 +ATOMIC64_DECL_ONE(sub_unchecked_386);
9473 ATOMIC64_DECL_ONE(inc_386);
9474 +ATOMIC64_DECL_ONE(inc_unchecked_386);
9475 ATOMIC64_DECL_ONE(dec_386);
9476 +ATOMIC64_DECL_ONE(dec_unchecked_386);
9477 #endif
9478
9479 #define alternative_atomic64(f, out, in...) \
9480 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9481
9482 ATOMIC64_DECL(read);
9483 +ATOMIC64_DECL(read_unchecked);
9484 ATOMIC64_DECL(set);
9485 +ATOMIC64_DECL(set_unchecked);
9486 ATOMIC64_DECL(xchg);
9487 ATOMIC64_DECL(add_return);
9488 +ATOMIC64_DECL(add_return_unchecked);
9489 ATOMIC64_DECL(sub_return);
9490 +ATOMIC64_DECL(sub_return_unchecked);
9491 ATOMIC64_DECL(inc_return);
9492 +ATOMIC64_DECL(inc_return_unchecked);
9493 ATOMIC64_DECL(dec_return);
9494 +ATOMIC64_DECL(dec_return_unchecked);
9495 ATOMIC64_DECL(dec_if_positive);
9496 ATOMIC64_DECL(inc_not_zero);
9497 ATOMIC64_DECL(add_unless);
9498 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9499 }
9500
9501 /**
9502 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9503 + * @p: pointer to type atomic64_unchecked_t
9504 + * @o: expected value
9505 + * @n: new value
9506 + *
9507 + * Atomically sets @v to @n if it was equal to @o and returns
9508 + * the old value.
9509 + */
9510 +
9511 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9512 +{
9513 + return cmpxchg64(&v->counter, o, n);
9514 +}
9515 +
9516 +/**
9517 * atomic64_xchg - xchg atomic64 variable
9518 * @v: pointer to type atomic64_t
9519 * @n: value to assign
9520 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9521 }
9522
9523 /**
9524 + * atomic64_set_unchecked - set atomic64 variable
9525 + * @v: pointer to type atomic64_unchecked_t
9526 + * @n: value to assign
9527 + *
9528 + * Atomically sets the value of @v to @n.
9529 + */
9530 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9531 +{
9532 + unsigned high = (unsigned)(i >> 32);
9533 + unsigned low = (unsigned)i;
9534 + alternative_atomic64(set, /* no output */,
9535 + "S" (v), "b" (low), "c" (high)
9536 + : "eax", "edx", "memory");
9537 +}
9538 +
9539 +/**
9540 * atomic64_read - read atomic64 variable
9541 * @v: pointer to type atomic64_t
9542 *
9543 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9544 }
9545
9546 /**
9547 + * atomic64_read_unchecked - read atomic64 variable
9548 + * @v: pointer to type atomic64_unchecked_t
9549 + *
9550 + * Atomically reads the value of @v and returns it.
9551 + */
9552 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9553 +{
9554 + long long r;
9555 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9556 + return r;
9557 + }
9558 +
9559 +/**
9560 * atomic64_add_return - add and return
9561 * @i: integer value to add
9562 * @v: pointer to type atomic64_t
9563 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9564 return i;
9565 }
9566
9567 +/**
9568 + * atomic64_add_return_unchecked - add and return
9569 + * @i: integer value to add
9570 + * @v: pointer to type atomic64_unchecked_t
9571 + *
9572 + * Atomically adds @i to @v and returns @i + *@v
9573 + */
9574 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9575 +{
9576 + alternative_atomic64(add_return_unchecked,
9577 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9578 + ASM_NO_INPUT_CLOBBER("memory"));
9579 + return i;
9580 +}
9581 +
9582 /*
9583 * Other variants with different arithmetic operators:
9584 */
9585 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9586 return a;
9587 }
9588
9589 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9590 +{
9591 + long long a;
9592 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
9593 + "S" (v) : "memory", "ecx");
9594 + return a;
9595 +}
9596 +
9597 static inline long long atomic64_dec_return(atomic64_t *v)
9598 {
9599 long long a;
9600 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9601 }
9602
9603 /**
9604 + * atomic64_add_unchecked - add integer to atomic64 variable
9605 + * @i: integer value to add
9606 + * @v: pointer to type atomic64_unchecked_t
9607 + *
9608 + * Atomically adds @i to @v.
9609 + */
9610 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9611 +{
9612 + __alternative_atomic64(add_unchecked, add_return_unchecked,
9613 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9614 + ASM_NO_INPUT_CLOBBER("memory"));
9615 + return i;
9616 +}
9617 +
9618 +/**
9619 * atomic64_sub - subtract the atomic64 variable
9620 * @i: integer value to subtract
9621 * @v: pointer to type atomic64_t
9622 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9623 index 0e1cbfc..5623683 100644
9624 --- a/arch/x86/include/asm/atomic64_64.h
9625 +++ b/arch/x86/include/asm/atomic64_64.h
9626 @@ -18,7 +18,19 @@
9627 */
9628 static inline long atomic64_read(const atomic64_t *v)
9629 {
9630 - return (*(volatile long *)&(v)->counter);
9631 + return (*(volatile const long *)&(v)->counter);
9632 +}
9633 +
9634 +/**
9635 + * atomic64_read_unchecked - read atomic64 variable
9636 + * @v: pointer of type atomic64_unchecked_t
9637 + *
9638 + * Atomically reads the value of @v.
9639 + * Doesn't imply a read memory barrier.
9640 + */
9641 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9642 +{
9643 + return (*(volatile const long *)&(v)->counter);
9644 }
9645
9646 /**
9647 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9648 }
9649
9650 /**
9651 + * atomic64_set_unchecked - set atomic64 variable
9652 + * @v: pointer to type atomic64_unchecked_t
9653 + * @i: required value
9654 + *
9655 + * Atomically sets the value of @v to @i.
9656 + */
9657 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9658 +{
9659 + v->counter = i;
9660 +}
9661 +
9662 +/**
9663 * atomic64_add - add integer to atomic64 variable
9664 * @i: integer value to add
9665 * @v: pointer to type atomic64_t
9666 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9667 */
9668 static inline void atomic64_add(long i, atomic64_t *v)
9669 {
9670 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9671 +
9672 +#ifdef CONFIG_PAX_REFCOUNT
9673 + "jno 0f\n"
9674 + LOCK_PREFIX "subq %1,%0\n"
9675 + "int $4\n0:\n"
9676 + _ASM_EXTABLE(0b, 0b)
9677 +#endif
9678 +
9679 + : "=m" (v->counter)
9680 + : "er" (i), "m" (v->counter));
9681 +}
9682 +
9683 +/**
9684 + * atomic64_add_unchecked - add integer to atomic64 variable
9685 + * @i: integer value to add
9686 + * @v: pointer to type atomic64_unchecked_t
9687 + *
9688 + * Atomically adds @i to @v.
9689 + */
9690 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9691 +{
9692 asm volatile(LOCK_PREFIX "addq %1,%0"
9693 : "=m" (v->counter)
9694 : "er" (i), "m" (v->counter));
9695 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9696 */
9697 static inline void atomic64_sub(long i, atomic64_t *v)
9698 {
9699 - asm volatile(LOCK_PREFIX "subq %1,%0"
9700 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9701 +
9702 +#ifdef CONFIG_PAX_REFCOUNT
9703 + "jno 0f\n"
9704 + LOCK_PREFIX "addq %1,%0\n"
9705 + "int $4\n0:\n"
9706 + _ASM_EXTABLE(0b, 0b)
9707 +#endif
9708 +
9709 + : "=m" (v->counter)
9710 + : "er" (i), "m" (v->counter));
9711 +}
9712 +
9713 +/**
9714 + * atomic64_sub_unchecked - subtract the atomic64 variable
9715 + * @i: integer value to subtract
9716 + * @v: pointer to type atomic64_unchecked_t
9717 + *
9718 + * Atomically subtracts @i from @v.
9719 + */
9720 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9721 +{
9722 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9723 : "=m" (v->counter)
9724 : "er" (i), "m" (v->counter));
9725 }
9726 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9727 {
9728 unsigned char c;
9729
9730 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9731 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9732 +
9733 +#ifdef CONFIG_PAX_REFCOUNT
9734 + "jno 0f\n"
9735 + LOCK_PREFIX "addq %2,%0\n"
9736 + "int $4\n0:\n"
9737 + _ASM_EXTABLE(0b, 0b)
9738 +#endif
9739 +
9740 + "sete %1\n"
9741 : "=m" (v->counter), "=qm" (c)
9742 : "er" (i), "m" (v->counter) : "memory");
9743 return c;
9744 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9745 */
9746 static inline void atomic64_inc(atomic64_t *v)
9747 {
9748 + asm volatile(LOCK_PREFIX "incq %0\n"
9749 +
9750 +#ifdef CONFIG_PAX_REFCOUNT
9751 + "jno 0f\n"
9752 + LOCK_PREFIX "decq %0\n"
9753 + "int $4\n0:\n"
9754 + _ASM_EXTABLE(0b, 0b)
9755 +#endif
9756 +
9757 + : "=m" (v->counter)
9758 + : "m" (v->counter));
9759 +}
9760 +
9761 +/**
9762 + * atomic64_inc_unchecked - increment atomic64 variable
9763 + * @v: pointer to type atomic64_unchecked_t
9764 + *
9765 + * Atomically increments @v by 1.
9766 + */
9767 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9768 +{
9769 asm volatile(LOCK_PREFIX "incq %0"
9770 : "=m" (v->counter)
9771 : "m" (v->counter));
9772 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9773 */
9774 static inline void atomic64_dec(atomic64_t *v)
9775 {
9776 - asm volatile(LOCK_PREFIX "decq %0"
9777 + asm volatile(LOCK_PREFIX "decq %0\n"
9778 +
9779 +#ifdef CONFIG_PAX_REFCOUNT
9780 + "jno 0f\n"
9781 + LOCK_PREFIX "incq %0\n"
9782 + "int $4\n0:\n"
9783 + _ASM_EXTABLE(0b, 0b)
9784 +#endif
9785 +
9786 + : "=m" (v->counter)
9787 + : "m" (v->counter));
9788 +}
9789 +
9790 +/**
9791 + * atomic64_dec_unchecked - decrement atomic64 variable
9792 + * @v: pointer to type atomic64_t
9793 + *
9794 + * Atomically decrements @v by 1.
9795 + */
9796 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9797 +{
9798 + asm volatile(LOCK_PREFIX "decq %0\n"
9799 : "=m" (v->counter)
9800 : "m" (v->counter));
9801 }
9802 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9803 {
9804 unsigned char c;
9805
9806 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9807 + asm volatile(LOCK_PREFIX "decq %0\n"
9808 +
9809 +#ifdef CONFIG_PAX_REFCOUNT
9810 + "jno 0f\n"
9811 + LOCK_PREFIX "incq %0\n"
9812 + "int $4\n0:\n"
9813 + _ASM_EXTABLE(0b, 0b)
9814 +#endif
9815 +
9816 + "sete %1\n"
9817 : "=m" (v->counter), "=qm" (c)
9818 : "m" (v->counter) : "memory");
9819 return c != 0;
9820 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9821 {
9822 unsigned char c;
9823
9824 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9825 + asm volatile(LOCK_PREFIX "incq %0\n"
9826 +
9827 +#ifdef CONFIG_PAX_REFCOUNT
9828 + "jno 0f\n"
9829 + LOCK_PREFIX "decq %0\n"
9830 + "int $4\n0:\n"
9831 + _ASM_EXTABLE(0b, 0b)
9832 +#endif
9833 +
9834 + "sete %1\n"
9835 : "=m" (v->counter), "=qm" (c)
9836 : "m" (v->counter) : "memory");
9837 return c != 0;
9838 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9839 {
9840 unsigned char c;
9841
9842 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9843 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9844 +
9845 +#ifdef CONFIG_PAX_REFCOUNT
9846 + "jno 0f\n"
9847 + LOCK_PREFIX "subq %2,%0\n"
9848 + "int $4\n0:\n"
9849 + _ASM_EXTABLE(0b, 0b)
9850 +#endif
9851 +
9852 + "sets %1\n"
9853 : "=m" (v->counter), "=qm" (c)
9854 : "er" (i), "m" (v->counter) : "memory");
9855 return c;
9856 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9857 */
9858 static inline long atomic64_add_return(long i, atomic64_t *v)
9859 {
9860 + return i + xadd_check_overflow(&v->counter, i);
9861 +}
9862 +
9863 +/**
9864 + * atomic64_add_return_unchecked - add and return
9865 + * @i: integer value to add
9866 + * @v: pointer to type atomic64_unchecked_t
9867 + *
9868 + * Atomically adds @i to @v and returns @i + @v
9869 + */
9870 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9871 +{
9872 return i + xadd(&v->counter, i);
9873 }
9874
9875 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9876 }
9877
9878 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9879 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9880 +{
9881 + return atomic64_add_return_unchecked(1, v);
9882 +}
9883 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9884
9885 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9886 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9887 return cmpxchg(&v->counter, old, new);
9888 }
9889
9890 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9891 +{
9892 + return cmpxchg(&v->counter, old, new);
9893 +}
9894 +
9895 static inline long atomic64_xchg(atomic64_t *v, long new)
9896 {
9897 return xchg(&v->counter, new);
9898 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9899 */
9900 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9901 {
9902 - long c, old;
9903 + long c, old, new;
9904 c = atomic64_read(v);
9905 for (;;) {
9906 - if (unlikely(c == (u)))
9907 + if (unlikely(c == u))
9908 break;
9909 - old = atomic64_cmpxchg((v), c, c + (a));
9910 +
9911 + asm volatile("add %2,%0\n"
9912 +
9913 +#ifdef CONFIG_PAX_REFCOUNT
9914 + "jno 0f\n"
9915 + "sub %2,%0\n"
9916 + "int $4\n0:\n"
9917 + _ASM_EXTABLE(0b, 0b)
9918 +#endif
9919 +
9920 + : "=r" (new)
9921 + : "0" (c), "ir" (a));
9922 +
9923 + old = atomic64_cmpxchg(v, c, new);
9924 if (likely(old == c))
9925 break;
9926 c = old;
9927 }
9928 - return c != (u);
9929 + return c != u;
9930 }
9931
9932 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9933 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9934 index b97596e..9bd48b06 100644
9935 --- a/arch/x86/include/asm/bitops.h
9936 +++ b/arch/x86/include/asm/bitops.h
9937 @@ -38,7 +38,7 @@
9938 * a mask operation on a byte.
9939 */
9940 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9941 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9942 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9943 #define CONST_MASK(nr) (1 << ((nr) & 7))
9944
9945 /**
9946 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9947 index 5e1a2ee..c9f9533 100644
9948 --- a/arch/x86/include/asm/boot.h
9949 +++ b/arch/x86/include/asm/boot.h
9950 @@ -11,10 +11,15 @@
9951 #include <asm/pgtable_types.h>
9952
9953 /* Physical address where kernel should be loaded. */
9954 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9955 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9956 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9957 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9958
9959 +#ifndef __ASSEMBLY__
9960 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9961 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9962 +#endif
9963 +
9964 /* Minimum kernel alignment, as a power of two */
9965 #ifdef CONFIG_X86_64
9966 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9967 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9968 index 48f99f1..d78ebf9 100644
9969 --- a/arch/x86/include/asm/cache.h
9970 +++ b/arch/x86/include/asm/cache.h
9971 @@ -5,12 +5,13 @@
9972
9973 /* L1 cache line size */
9974 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9975 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9976 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9977
9978 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9979 +#define __read_only __attribute__((__section__(".data..read_only")))
9980
9981 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9982 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9983 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9984
9985 #ifdef CONFIG_X86_VSMP
9986 #ifdef CONFIG_SMP
9987 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9988 index 9863ee3..4a1f8e1 100644
9989 --- a/arch/x86/include/asm/cacheflush.h
9990 +++ b/arch/x86/include/asm/cacheflush.h
9991 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9992 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9993
9994 if (pg_flags == _PGMT_DEFAULT)
9995 - return -1;
9996 + return ~0UL;
9997 else if (pg_flags == _PGMT_WC)
9998 return _PAGE_CACHE_WC;
9999 else if (pg_flags == _PGMT_UC_MINUS)
10000 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10001 index 46fc474..b02b0f9 100644
10002 --- a/arch/x86/include/asm/checksum_32.h
10003 +++ b/arch/x86/include/asm/checksum_32.h
10004 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10005 int len, __wsum sum,
10006 int *src_err_ptr, int *dst_err_ptr);
10007
10008 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10009 + int len, __wsum sum,
10010 + int *src_err_ptr, int *dst_err_ptr);
10011 +
10012 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10013 + int len, __wsum sum,
10014 + int *src_err_ptr, int *dst_err_ptr);
10015 +
10016 /*
10017 * Note: when you get a NULL pointer exception here this means someone
10018 * passed in an incorrect kernel address to one of these functions.
10019 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10020 int *err_ptr)
10021 {
10022 might_sleep();
10023 - return csum_partial_copy_generic((__force void *)src, dst,
10024 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10025 len, sum, err_ptr, NULL);
10026 }
10027
10028 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10029 {
10030 might_sleep();
10031 if (access_ok(VERIFY_WRITE, dst, len))
10032 - return csum_partial_copy_generic(src, (__force void *)dst,
10033 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10034 len, sum, NULL, err_ptr);
10035
10036 if (len)
10037 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10038 index 99480e5..d81165b 100644
10039 --- a/arch/x86/include/asm/cmpxchg.h
10040 +++ b/arch/x86/include/asm/cmpxchg.h
10041 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10042 __compiletime_error("Bad argument size for cmpxchg");
10043 extern void __xadd_wrong_size(void)
10044 __compiletime_error("Bad argument size for xadd");
10045 +extern void __xadd_check_overflow_wrong_size(void)
10046 + __compiletime_error("Bad argument size for xadd_check_overflow");
10047 extern void __add_wrong_size(void)
10048 __compiletime_error("Bad argument size for add");
10049 +extern void __add_check_overflow_wrong_size(void)
10050 + __compiletime_error("Bad argument size for add_check_overflow");
10051
10052 /*
10053 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10054 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10055 __ret; \
10056 })
10057
10058 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10059 + ({ \
10060 + __typeof__ (*(ptr)) __ret = (arg); \
10061 + switch (sizeof(*(ptr))) { \
10062 + case __X86_CASE_L: \
10063 + asm volatile (lock #op "l %0, %1\n" \
10064 + "jno 0f\n" \
10065 + "mov %0,%1\n" \
10066 + "int $4\n0:\n" \
10067 + _ASM_EXTABLE(0b, 0b) \
10068 + : "+r" (__ret), "+m" (*(ptr)) \
10069 + : : "memory", "cc"); \
10070 + break; \
10071 + case __X86_CASE_Q: \
10072 + asm volatile (lock #op "q %q0, %1\n" \
10073 + "jno 0f\n" \
10074 + "mov %0,%1\n" \
10075 + "int $4\n0:\n" \
10076 + _ASM_EXTABLE(0b, 0b) \
10077 + : "+r" (__ret), "+m" (*(ptr)) \
10078 + : : "memory", "cc"); \
10079 + break; \
10080 + default: \
10081 + __ ## op ## _check_overflow_wrong_size(); \
10082 + } \
10083 + __ret; \
10084 + })
10085 +
10086 /*
10087 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10088 * Since this is generally used to protect other memory information, we
10089 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10090 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10091 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10092
10093 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10094 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10095 +
10096 #define __add(ptr, inc, lock) \
10097 ({ \
10098 __typeof__ (*(ptr)) __ret = (inc); \
10099 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10100 index 340ee49..4238ced 100644
10101 --- a/arch/x86/include/asm/cpufeature.h
10102 +++ b/arch/x86/include/asm/cpufeature.h
10103 @@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10104 ".section .discard,\"aw\",@progbits\n"
10105 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10106 ".previous\n"
10107 - ".section .altinstr_replacement,\"ax\"\n"
10108 + ".section .altinstr_replacement,\"a\"\n"
10109 "3: movb $1,%0\n"
10110 "4:\n"
10111 ".previous\n"
10112 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10113 index e95822d..a90010e 100644
10114 --- a/arch/x86/include/asm/desc.h
10115 +++ b/arch/x86/include/asm/desc.h
10116 @@ -4,6 +4,7 @@
10117 #include <asm/desc_defs.h>
10118 #include <asm/ldt.h>
10119 #include <asm/mmu.h>
10120 +#include <asm/pgtable.h>
10121
10122 #include <linux/smp.h>
10123
10124 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10125
10126 desc->type = (info->read_exec_only ^ 1) << 1;
10127 desc->type |= info->contents << 2;
10128 + desc->type |= info->seg_not_present ^ 1;
10129
10130 desc->s = 1;
10131 desc->dpl = 0x3;
10132 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10133 }
10134
10135 extern struct desc_ptr idt_descr;
10136 -extern gate_desc idt_table[];
10137 extern struct desc_ptr nmi_idt_descr;
10138 -extern gate_desc nmi_idt_table[];
10139 -
10140 -struct gdt_page {
10141 - struct desc_struct gdt[GDT_ENTRIES];
10142 -} __attribute__((aligned(PAGE_SIZE)));
10143 -
10144 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10145 +extern gate_desc idt_table[256];
10146 +extern gate_desc nmi_idt_table[256];
10147
10148 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10149 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10150 {
10151 - return per_cpu(gdt_page, cpu).gdt;
10152 + return cpu_gdt_table[cpu];
10153 }
10154
10155 #ifdef CONFIG_X86_64
10156 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10157 unsigned long base, unsigned dpl, unsigned flags,
10158 unsigned short seg)
10159 {
10160 - gate->a = (seg << 16) | (base & 0xffff);
10161 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10162 + gate->gate.offset_low = base;
10163 + gate->gate.seg = seg;
10164 + gate->gate.reserved = 0;
10165 + gate->gate.type = type;
10166 + gate->gate.s = 0;
10167 + gate->gate.dpl = dpl;
10168 + gate->gate.p = 1;
10169 + gate->gate.offset_high = base >> 16;
10170 }
10171
10172 #endif
10173 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10174
10175 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10176 {
10177 + pax_open_kernel();
10178 memcpy(&idt[entry], gate, sizeof(*gate));
10179 + pax_close_kernel();
10180 }
10181
10182 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10183 {
10184 + pax_open_kernel();
10185 memcpy(&ldt[entry], desc, 8);
10186 + pax_close_kernel();
10187 }
10188
10189 static inline void
10190 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10191 default: size = sizeof(*gdt); break;
10192 }
10193
10194 + pax_open_kernel();
10195 memcpy(&gdt[entry], desc, size);
10196 + pax_close_kernel();
10197 }
10198
10199 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10200 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10201
10202 static inline void native_load_tr_desc(void)
10203 {
10204 + pax_open_kernel();
10205 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10206 + pax_close_kernel();
10207 }
10208
10209 static inline void native_load_gdt(const struct desc_ptr *dtr)
10210 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10211 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10212 unsigned int i;
10213
10214 + pax_open_kernel();
10215 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10216 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10217 + pax_close_kernel();
10218 }
10219
10220 #define _LDT_empty(info) \
10221 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10222 }
10223
10224 #ifdef CONFIG_X86_64
10225 -static inline void set_nmi_gate(int gate, void *addr)
10226 +static inline void set_nmi_gate(int gate, const void *addr)
10227 {
10228 gate_desc s;
10229
10230 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10231 }
10232 #endif
10233
10234 -static inline void _set_gate(int gate, unsigned type, void *addr,
10235 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10236 unsigned dpl, unsigned ist, unsigned seg)
10237 {
10238 gate_desc s;
10239 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10240 * Pentium F0 0F bugfix can have resulted in the mapped
10241 * IDT being write-protected.
10242 */
10243 -static inline void set_intr_gate(unsigned int n, void *addr)
10244 +static inline void set_intr_gate(unsigned int n, const void *addr)
10245 {
10246 BUG_ON((unsigned)n > 0xFF);
10247 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10248 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10249 /*
10250 * This routine sets up an interrupt gate at directory privilege level 3.
10251 */
10252 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10253 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10254 {
10255 BUG_ON((unsigned)n > 0xFF);
10256 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10257 }
10258
10259 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10260 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10261 {
10262 BUG_ON((unsigned)n > 0xFF);
10263 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10264 }
10265
10266 -static inline void set_trap_gate(unsigned int n, void *addr)
10267 +static inline void set_trap_gate(unsigned int n, const void *addr)
10268 {
10269 BUG_ON((unsigned)n > 0xFF);
10270 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10271 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10272 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10273 {
10274 BUG_ON((unsigned)n > 0xFF);
10275 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10276 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10277 }
10278
10279 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10280 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10281 {
10282 BUG_ON((unsigned)n > 0xFF);
10283 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10284 }
10285
10286 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10287 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10288 {
10289 BUG_ON((unsigned)n > 0xFF);
10290 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10291 }
10292
10293 +#ifdef CONFIG_X86_32
10294 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10295 +{
10296 + struct desc_struct d;
10297 +
10298 + if (likely(limit))
10299 + limit = (limit - 1UL) >> PAGE_SHIFT;
10300 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10301 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10302 +}
10303 +#endif
10304 +
10305 #endif /* _ASM_X86_DESC_H */
10306 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10307 index 278441f..b95a174 100644
10308 --- a/arch/x86/include/asm/desc_defs.h
10309 +++ b/arch/x86/include/asm/desc_defs.h
10310 @@ -31,6 +31,12 @@ struct desc_struct {
10311 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10312 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10313 };
10314 + struct {
10315 + u16 offset_low;
10316 + u16 seg;
10317 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10318 + unsigned offset_high: 16;
10319 + } gate;
10320 };
10321 } __attribute__((packed));
10322
10323 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10324 index 3778256..c5d4fce 100644
10325 --- a/arch/x86/include/asm/e820.h
10326 +++ b/arch/x86/include/asm/e820.h
10327 @@ -69,7 +69,7 @@ struct e820map {
10328 #define ISA_START_ADDRESS 0xa0000
10329 #define ISA_END_ADDRESS 0x100000
10330
10331 -#define BIOS_BEGIN 0x000a0000
10332 +#define BIOS_BEGIN 0x000c0000
10333 #define BIOS_END 0x00100000
10334
10335 #define BIOS_ROM_BASE 0xffe00000
10336 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10337 index 5939f44..193f4a7 100644
10338 --- a/arch/x86/include/asm/elf.h
10339 +++ b/arch/x86/include/asm/elf.h
10340 @@ -243,7 +243,25 @@ extern int force_personality32;
10341 the loader. We need to make sure that it is out of the way of the program
10342 that it will "exec", and that there is sufficient room for the brk. */
10343
10344 +#ifdef CONFIG_PAX_SEGMEXEC
10345 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10346 +#else
10347 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10348 +#endif
10349 +
10350 +#ifdef CONFIG_PAX_ASLR
10351 +#ifdef CONFIG_X86_32
10352 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10353 +
10354 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10355 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10356 +#else
10357 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10358 +
10359 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10360 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10361 +#endif
10362 +#endif
10363
10364 /* This yields a mask that user programs can use to figure out what
10365 instruction set this CPU supports. This could be done in user space,
10366 @@ -296,16 +314,12 @@ do { \
10367
10368 #define ARCH_DLINFO \
10369 do { \
10370 - if (vdso_enabled) \
10371 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10372 - (unsigned long)current->mm->context.vdso); \
10373 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10374 } while (0)
10375
10376 #define ARCH_DLINFO_X32 \
10377 do { \
10378 - if (vdso_enabled) \
10379 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10380 - (unsigned long)current->mm->context.vdso); \
10381 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10382 } while (0)
10383
10384 #define AT_SYSINFO 32
10385 @@ -320,7 +334,7 @@ else \
10386
10387 #endif /* !CONFIG_X86_32 */
10388
10389 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10390 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10391
10392 #define VDSO_ENTRY \
10393 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10394 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10395 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10396 #define compat_arch_setup_additional_pages syscall32_setup_pages
10397
10398 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10399 -#define arch_randomize_brk arch_randomize_brk
10400 -
10401 /*
10402 * True on X86_32 or when emulating IA32 on X86_64
10403 */
10404 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10405 index cc70c1c..d96d011 100644
10406 --- a/arch/x86/include/asm/emergency-restart.h
10407 +++ b/arch/x86/include/asm/emergency-restart.h
10408 @@ -15,6 +15,6 @@ enum reboot_type {
10409
10410 extern enum reboot_type reboot_type;
10411
10412 -extern void machine_emergency_restart(void);
10413 +extern void machine_emergency_restart(void) __noreturn;
10414
10415 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10416 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10417 index 4fa8815..71b121a 100644
10418 --- a/arch/x86/include/asm/fpu-internal.h
10419 +++ b/arch/x86/include/asm/fpu-internal.h
10420 @@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10421 {
10422 int err;
10423
10424 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10425 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10426 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10427 +#endif
10428 +
10429 /* See comment in fxsave() below. */
10430 #ifdef CONFIG_AS_FXSAVEQ
10431 asm volatile("1: fxrstorq %[fx]\n\t"
10432 @@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10433 {
10434 int err;
10435
10436 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10437 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10438 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10439 +#endif
10440 +
10441 /*
10442 * Clear the bytes not touched by the fxsave and reserved
10443 * for the SW usage.
10444 @@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10445 "emms\n\t" /* clear stack tags */
10446 "fildl %P[addr]", /* set F?P to defined value */
10447 X86_FEATURE_FXSAVE_LEAK,
10448 - [addr] "m" (tsk->thread.fpu.has_fpu));
10449 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10450
10451 return fpu_restore_checking(&tsk->thread.fpu);
10452 }
10453 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10454 index 71ecbcb..bac10b7 100644
10455 --- a/arch/x86/include/asm/futex.h
10456 +++ b/arch/x86/include/asm/futex.h
10457 @@ -11,16 +11,18 @@
10458 #include <asm/processor.h>
10459
10460 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10461 + typecheck(u32 __user *, uaddr); \
10462 asm volatile("1:\t" insn "\n" \
10463 "2:\t.section .fixup,\"ax\"\n" \
10464 "3:\tmov\t%3, %1\n" \
10465 "\tjmp\t2b\n" \
10466 "\t.previous\n" \
10467 _ASM_EXTABLE(1b, 3b) \
10468 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10469 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10470 : "i" (-EFAULT), "0" (oparg), "1" (0))
10471
10472 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10473 + typecheck(u32 __user *, uaddr); \
10474 asm volatile("1:\tmovl %2, %0\n" \
10475 "\tmovl\t%0, %3\n" \
10476 "\t" insn "\n" \
10477 @@ -33,7 +35,7 @@
10478 _ASM_EXTABLE(1b, 4b) \
10479 _ASM_EXTABLE(2b, 4b) \
10480 : "=&a" (oldval), "=&r" (ret), \
10481 - "+m" (*uaddr), "=&r" (tem) \
10482 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10483 : "r" (oparg), "i" (-EFAULT), "1" (0))
10484
10485 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10486 @@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10487
10488 switch (op) {
10489 case FUTEX_OP_SET:
10490 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10491 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10492 break;
10493 case FUTEX_OP_ADD:
10494 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10495 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10496 uaddr, oparg);
10497 break;
10498 case FUTEX_OP_OR:
10499 @@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10500 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10501 return -EFAULT;
10502
10503 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10504 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10505 "2:\t.section .fixup, \"ax\"\n"
10506 "3:\tmov %3, %0\n"
10507 "\tjmp 2b\n"
10508 "\t.previous\n"
10509 _ASM_EXTABLE(1b, 3b)
10510 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10511 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10512 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10513 : "memory"
10514 );
10515 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10516 index eb92a6e..b98b2f4 100644
10517 --- a/arch/x86/include/asm/hw_irq.h
10518 +++ b/arch/x86/include/asm/hw_irq.h
10519 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10520 extern void enable_IO_APIC(void);
10521
10522 /* Statistics */
10523 -extern atomic_t irq_err_count;
10524 -extern atomic_t irq_mis_count;
10525 +extern atomic_unchecked_t irq_err_count;
10526 +extern atomic_unchecked_t irq_mis_count;
10527
10528 /* EISA */
10529 extern void eisa_set_level_irq(unsigned int irq);
10530 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10531 index d8e8eef..99f81ae 100644
10532 --- a/arch/x86/include/asm/io.h
10533 +++ b/arch/x86/include/asm/io.h
10534 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10535
10536 #include <linux/vmalloc.h>
10537
10538 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10539 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10540 +{
10541 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10542 +}
10543 +
10544 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10545 +{
10546 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10547 +}
10548 +
10549 /*
10550 * Convert a virtual cached pointer to an uncached pointer
10551 */
10552 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10553 index bba3cf8..06bc8da 100644
10554 --- a/arch/x86/include/asm/irqflags.h
10555 +++ b/arch/x86/include/asm/irqflags.h
10556 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10557 sti; \
10558 sysexit
10559
10560 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10561 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10562 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10563 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10564 +
10565 #else
10566 #define INTERRUPT_RETURN iret
10567 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10568 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10569 index 5478825..839e88c 100644
10570 --- a/arch/x86/include/asm/kprobes.h
10571 +++ b/arch/x86/include/asm/kprobes.h
10572 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10573 #define RELATIVEJUMP_SIZE 5
10574 #define RELATIVECALL_OPCODE 0xe8
10575 #define RELATIVE_ADDR_SIZE 4
10576 -#define MAX_STACK_SIZE 64
10577 -#define MIN_STACK_SIZE(ADDR) \
10578 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10579 - THREAD_SIZE - (unsigned long)(ADDR))) \
10580 - ? (MAX_STACK_SIZE) \
10581 - : (((unsigned long)current_thread_info()) + \
10582 - THREAD_SIZE - (unsigned long)(ADDR)))
10583 +#define MAX_STACK_SIZE 64UL
10584 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10585
10586 #define flush_insn_slot(p) do { } while (0)
10587
10588 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10589 index e216ba0..453f6ec 100644
10590 --- a/arch/x86/include/asm/kvm_host.h
10591 +++ b/arch/x86/include/asm/kvm_host.h
10592 @@ -679,7 +679,7 @@ struct kvm_x86_ops {
10593 int (*check_intercept)(struct kvm_vcpu *vcpu,
10594 struct x86_instruction_info *info,
10595 enum x86_intercept_stage stage);
10596 -};
10597 +} __do_const;
10598
10599 struct kvm_arch_async_pf {
10600 u32 token;
10601 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10602 index c8bed0d..e5721fa 100644
10603 --- a/arch/x86/include/asm/local.h
10604 +++ b/arch/x86/include/asm/local.h
10605 @@ -17,26 +17,58 @@ typedef struct {
10606
10607 static inline void local_inc(local_t *l)
10608 {
10609 - asm volatile(_ASM_INC "%0"
10610 + asm volatile(_ASM_INC "%0\n"
10611 +
10612 +#ifdef CONFIG_PAX_REFCOUNT
10613 + "jno 0f\n"
10614 + _ASM_DEC "%0\n"
10615 + "int $4\n0:\n"
10616 + _ASM_EXTABLE(0b, 0b)
10617 +#endif
10618 +
10619 : "+m" (l->a.counter));
10620 }
10621
10622 static inline void local_dec(local_t *l)
10623 {
10624 - asm volatile(_ASM_DEC "%0"
10625 + asm volatile(_ASM_DEC "%0\n"
10626 +
10627 +#ifdef CONFIG_PAX_REFCOUNT
10628 + "jno 0f\n"
10629 + _ASM_INC "%0\n"
10630 + "int $4\n0:\n"
10631 + _ASM_EXTABLE(0b, 0b)
10632 +#endif
10633 +
10634 : "+m" (l->a.counter));
10635 }
10636
10637 static inline void local_add(long i, local_t *l)
10638 {
10639 - asm volatile(_ASM_ADD "%1,%0"
10640 + asm volatile(_ASM_ADD "%1,%0\n"
10641 +
10642 +#ifdef CONFIG_PAX_REFCOUNT
10643 + "jno 0f\n"
10644 + _ASM_SUB "%1,%0\n"
10645 + "int $4\n0:\n"
10646 + _ASM_EXTABLE(0b, 0b)
10647 +#endif
10648 +
10649 : "+m" (l->a.counter)
10650 : "ir" (i));
10651 }
10652
10653 static inline void local_sub(long i, local_t *l)
10654 {
10655 - asm volatile(_ASM_SUB "%1,%0"
10656 + asm volatile(_ASM_SUB "%1,%0\n"
10657 +
10658 +#ifdef CONFIG_PAX_REFCOUNT
10659 + "jno 0f\n"
10660 + _ASM_ADD "%1,%0\n"
10661 + "int $4\n0:\n"
10662 + _ASM_EXTABLE(0b, 0b)
10663 +#endif
10664 +
10665 : "+m" (l->a.counter)
10666 : "ir" (i));
10667 }
10668 @@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10669 {
10670 unsigned char c;
10671
10672 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10673 + asm volatile(_ASM_SUB "%2,%0\n"
10674 +
10675 +#ifdef CONFIG_PAX_REFCOUNT
10676 + "jno 0f\n"
10677 + _ASM_ADD "%2,%0\n"
10678 + "int $4\n0:\n"
10679 + _ASM_EXTABLE(0b, 0b)
10680 +#endif
10681 +
10682 + "sete %1\n"
10683 : "+m" (l->a.counter), "=qm" (c)
10684 : "ir" (i) : "memory");
10685 return c;
10686 @@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10687 {
10688 unsigned char c;
10689
10690 - asm volatile(_ASM_DEC "%0; sete %1"
10691 + asm volatile(_ASM_DEC "%0\n"
10692 +
10693 +#ifdef CONFIG_PAX_REFCOUNT
10694 + "jno 0f\n"
10695 + _ASM_INC "%0\n"
10696 + "int $4\n0:\n"
10697 + _ASM_EXTABLE(0b, 0b)
10698 +#endif
10699 +
10700 + "sete %1\n"
10701 : "+m" (l->a.counter), "=qm" (c)
10702 : : "memory");
10703 return c != 0;
10704 @@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10705 {
10706 unsigned char c;
10707
10708 - asm volatile(_ASM_INC "%0; sete %1"
10709 + asm volatile(_ASM_INC "%0\n"
10710 +
10711 +#ifdef CONFIG_PAX_REFCOUNT
10712 + "jno 0f\n"
10713 + _ASM_DEC "%0\n"
10714 + "int $4\n0:\n"
10715 + _ASM_EXTABLE(0b, 0b)
10716 +#endif
10717 +
10718 + "sete %1\n"
10719 : "+m" (l->a.counter), "=qm" (c)
10720 : : "memory");
10721 return c != 0;
10722 @@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10723 {
10724 unsigned char c;
10725
10726 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10727 + asm volatile(_ASM_ADD "%2,%0\n"
10728 +
10729 +#ifdef CONFIG_PAX_REFCOUNT
10730 + "jno 0f\n"
10731 + _ASM_SUB "%2,%0\n"
10732 + "int $4\n0:\n"
10733 + _ASM_EXTABLE(0b, 0b)
10734 +#endif
10735 +
10736 + "sets %1\n"
10737 : "+m" (l->a.counter), "=qm" (c)
10738 : "ir" (i) : "memory");
10739 return c;
10740 @@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10741 #endif
10742 /* Modern 486+ processor */
10743 __i = i;
10744 - asm volatile(_ASM_XADD "%0, %1;"
10745 + asm volatile(_ASM_XADD "%0, %1\n"
10746 +
10747 +#ifdef CONFIG_PAX_REFCOUNT
10748 + "jno 0f\n"
10749 + _ASM_MOV "%0,%1\n"
10750 + "int $4\n0:\n"
10751 + _ASM_EXTABLE(0b, 0b)
10752 +#endif
10753 +
10754 : "+r" (i), "+m" (l->a.counter)
10755 : : "memory");
10756 return i + __i;
10757 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10758 index 593e51d..fa69c9a 100644
10759 --- a/arch/x86/include/asm/mman.h
10760 +++ b/arch/x86/include/asm/mman.h
10761 @@ -5,4 +5,14 @@
10762
10763 #include <asm-generic/mman.h>
10764
10765 +#ifdef __KERNEL__
10766 +#ifndef __ASSEMBLY__
10767 +#ifdef CONFIG_X86_32
10768 +#define arch_mmap_check i386_mmap_check
10769 +int i386_mmap_check(unsigned long addr, unsigned long len,
10770 + unsigned long flags);
10771 +#endif
10772 +#endif
10773 +#endif
10774 +
10775 #endif /* _ASM_X86_MMAN_H */
10776 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10777 index 5f55e69..e20bfb1 100644
10778 --- a/arch/x86/include/asm/mmu.h
10779 +++ b/arch/x86/include/asm/mmu.h
10780 @@ -9,7 +9,7 @@
10781 * we put the segment information here.
10782 */
10783 typedef struct {
10784 - void *ldt;
10785 + struct desc_struct *ldt;
10786 int size;
10787
10788 #ifdef CONFIG_X86_64
10789 @@ -18,7 +18,19 @@ typedef struct {
10790 #endif
10791
10792 struct mutex lock;
10793 - void *vdso;
10794 + unsigned long vdso;
10795 +
10796 +#ifdef CONFIG_X86_32
10797 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10798 + unsigned long user_cs_base;
10799 + unsigned long user_cs_limit;
10800 +
10801 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10802 + cpumask_t cpu_user_cs_mask;
10803 +#endif
10804 +
10805 +#endif
10806 +#endif
10807 } mm_context_t;
10808
10809 #ifdef CONFIG_SMP
10810 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10811 index 6902152..da4283a 100644
10812 --- a/arch/x86/include/asm/mmu_context.h
10813 +++ b/arch/x86/include/asm/mmu_context.h
10814 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10815
10816 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10817 {
10818 +
10819 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10820 + unsigned int i;
10821 + pgd_t *pgd;
10822 +
10823 + pax_open_kernel();
10824 + pgd = get_cpu_pgd(smp_processor_id());
10825 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10826 + set_pgd_batched(pgd+i, native_make_pgd(0));
10827 + pax_close_kernel();
10828 +#endif
10829 +
10830 #ifdef CONFIG_SMP
10831 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10832 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10833 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10834 struct task_struct *tsk)
10835 {
10836 unsigned cpu = smp_processor_id();
10837 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10838 + int tlbstate = TLBSTATE_OK;
10839 +#endif
10840
10841 if (likely(prev != next)) {
10842 #ifdef CONFIG_SMP
10843 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10844 + tlbstate = percpu_read(cpu_tlbstate.state);
10845 +#endif
10846 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10847 percpu_write(cpu_tlbstate.active_mm, next);
10848 #endif
10849 cpumask_set_cpu(cpu, mm_cpumask(next));
10850
10851 /* Re-load page tables */
10852 +#ifdef CONFIG_PAX_PER_CPU_PGD
10853 + pax_open_kernel();
10854 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10855 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10856 + pax_close_kernel();
10857 + load_cr3(get_cpu_pgd(cpu));
10858 +#else
10859 load_cr3(next->pgd);
10860 +#endif
10861
10862 /* stop flush ipis for the previous mm */
10863 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10864 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10865 */
10866 if (unlikely(prev->context.ldt != next->context.ldt))
10867 load_LDT_nolock(&next->context);
10868 - }
10869 +
10870 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10871 + if (!(__supported_pte_mask & _PAGE_NX)) {
10872 + smp_mb__before_clear_bit();
10873 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10874 + smp_mb__after_clear_bit();
10875 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10876 + }
10877 +#endif
10878 +
10879 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10880 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10881 + prev->context.user_cs_limit != next->context.user_cs_limit))
10882 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10883 #ifdef CONFIG_SMP
10884 + else if (unlikely(tlbstate != TLBSTATE_OK))
10885 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10886 +#endif
10887 +#endif
10888 +
10889 + }
10890 else {
10891 +
10892 +#ifdef CONFIG_PAX_PER_CPU_PGD
10893 + pax_open_kernel();
10894 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10895 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10896 + pax_close_kernel();
10897 + load_cr3(get_cpu_pgd(cpu));
10898 +#endif
10899 +
10900 +#ifdef CONFIG_SMP
10901 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10902 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10903
10904 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10905 * tlb flush IPI delivery. We must reload CR3
10906 * to make sure to use no freed page tables.
10907 */
10908 +
10909 +#ifndef CONFIG_PAX_PER_CPU_PGD
10910 load_cr3(next->pgd);
10911 +#endif
10912 +
10913 load_LDT_nolock(&next->context);
10914 +
10915 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10916 + if (!(__supported_pte_mask & _PAGE_NX))
10917 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10918 +#endif
10919 +
10920 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10921 +#ifdef CONFIG_PAX_PAGEEXEC
10922 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10923 +#endif
10924 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10925 +#endif
10926 +
10927 }
10928 +#endif
10929 }
10930 -#endif
10931 }
10932
10933 #define activate_mm(prev, next) \
10934 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10935 index 9eae775..c914fea 100644
10936 --- a/arch/x86/include/asm/module.h
10937 +++ b/arch/x86/include/asm/module.h
10938 @@ -5,6 +5,7 @@
10939
10940 #ifdef CONFIG_X86_64
10941 /* X86_64 does not define MODULE_PROC_FAMILY */
10942 +#define MODULE_PROC_FAMILY ""
10943 #elif defined CONFIG_M386
10944 #define MODULE_PROC_FAMILY "386 "
10945 #elif defined CONFIG_M486
10946 @@ -59,8 +60,20 @@
10947 #error unknown processor family
10948 #endif
10949
10950 -#ifdef CONFIG_X86_32
10951 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10952 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10953 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10954 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10955 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10956 +#else
10957 +#define MODULE_PAX_KERNEXEC ""
10958 #endif
10959
10960 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10961 +#define MODULE_PAX_UDEREF "UDEREF "
10962 +#else
10963 +#define MODULE_PAX_UDEREF ""
10964 +#endif
10965 +
10966 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10967 +
10968 #endif /* _ASM_X86_MODULE_H */
10969 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10970 index 7639dbf..e08a58c 100644
10971 --- a/arch/x86/include/asm/page_64_types.h
10972 +++ b/arch/x86/include/asm/page_64_types.h
10973 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10974
10975 /* duplicated to the one in bootmem.h */
10976 extern unsigned long max_pfn;
10977 -extern unsigned long phys_base;
10978 +extern const unsigned long phys_base;
10979
10980 extern unsigned long __phys_addr(unsigned long);
10981 #define __phys_reloc_hide(x) (x)
10982 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10983 index aa0f913..0c5bc6a 100644
10984 --- a/arch/x86/include/asm/paravirt.h
10985 +++ b/arch/x86/include/asm/paravirt.h
10986 @@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10987 val);
10988 }
10989
10990 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10991 +{
10992 + pgdval_t val = native_pgd_val(pgd);
10993 +
10994 + if (sizeof(pgdval_t) > sizeof(long))
10995 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10996 + val, (u64)val >> 32);
10997 + else
10998 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10999 + val);
11000 +}
11001 +
11002 static inline void pgd_clear(pgd_t *pgdp)
11003 {
11004 set_pgd(pgdp, __pgd(0));
11005 @@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11006 pv_mmu_ops.set_fixmap(idx, phys, flags);
11007 }
11008
11009 +#ifdef CONFIG_PAX_KERNEXEC
11010 +static inline unsigned long pax_open_kernel(void)
11011 +{
11012 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11013 +}
11014 +
11015 +static inline unsigned long pax_close_kernel(void)
11016 +{
11017 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11018 +}
11019 +#else
11020 +static inline unsigned long pax_open_kernel(void) { return 0; }
11021 +static inline unsigned long pax_close_kernel(void) { return 0; }
11022 +#endif
11023 +
11024 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11025
11026 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11027 @@ -965,7 +992,7 @@ extern void default_banner(void);
11028
11029 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11030 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11031 -#define PARA_INDIRECT(addr) *%cs:addr
11032 +#define PARA_INDIRECT(addr) *%ss:addr
11033 #endif
11034
11035 #define INTERRUPT_RETURN \
11036 @@ -1042,6 +1069,21 @@ extern void default_banner(void);
11037 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11038 CLBR_NONE, \
11039 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11040 +
11041 +#define GET_CR0_INTO_RDI \
11042 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11043 + mov %rax,%rdi
11044 +
11045 +#define SET_RDI_INTO_CR0 \
11046 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11047 +
11048 +#define GET_CR3_INTO_RDI \
11049 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11050 + mov %rax,%rdi
11051 +
11052 +#define SET_RDI_INTO_CR3 \
11053 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11054 +
11055 #endif /* CONFIG_X86_32 */
11056
11057 #endif /* __ASSEMBLY__ */
11058 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11059 index 8e8b9a4..f07d725 100644
11060 --- a/arch/x86/include/asm/paravirt_types.h
11061 +++ b/arch/x86/include/asm/paravirt_types.h
11062 @@ -84,20 +84,20 @@ struct pv_init_ops {
11063 */
11064 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11065 unsigned long addr, unsigned len);
11066 -};
11067 +} __no_const;
11068
11069
11070 struct pv_lazy_ops {
11071 /* Set deferred update mode, used for batching operations. */
11072 void (*enter)(void);
11073 void (*leave)(void);
11074 -};
11075 +} __no_const;
11076
11077 struct pv_time_ops {
11078 unsigned long long (*sched_clock)(void);
11079 unsigned long long (*steal_clock)(int cpu);
11080 unsigned long (*get_tsc_khz)(void);
11081 -};
11082 +} __no_const;
11083
11084 struct pv_cpu_ops {
11085 /* hooks for various privileged instructions */
11086 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
11087
11088 void (*start_context_switch)(struct task_struct *prev);
11089 void (*end_context_switch)(struct task_struct *next);
11090 -};
11091 +} __no_const;
11092
11093 struct pv_irq_ops {
11094 /*
11095 @@ -224,7 +224,7 @@ struct pv_apic_ops {
11096 unsigned long start_eip,
11097 unsigned long start_esp);
11098 #endif
11099 -};
11100 +} __no_const;
11101
11102 struct pv_mmu_ops {
11103 unsigned long (*read_cr2)(void);
11104 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
11105 struct paravirt_callee_save make_pud;
11106
11107 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11108 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11109 #endif /* PAGETABLE_LEVELS == 4 */
11110 #endif /* PAGETABLE_LEVELS >= 3 */
11111
11112 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
11113 an mfn. We can tell which is which from the index. */
11114 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11115 phys_addr_t phys, pgprot_t flags);
11116 +
11117 +#ifdef CONFIG_PAX_KERNEXEC
11118 + unsigned long (*pax_open_kernel)(void);
11119 + unsigned long (*pax_close_kernel)(void);
11120 +#endif
11121 +
11122 };
11123
11124 struct arch_spinlock;
11125 @@ -334,7 +341,7 @@ struct pv_lock_ops {
11126 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11127 int (*spin_trylock)(struct arch_spinlock *lock);
11128 void (*spin_unlock)(struct arch_spinlock *lock);
11129 -};
11130 +} __no_const;
11131
11132 /* This contains all the paravirt structures: we get a convenient
11133 * number for each function using the offset which we use to indicate
11134 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11135 index b4389a4..7024269 100644
11136 --- a/arch/x86/include/asm/pgalloc.h
11137 +++ b/arch/x86/include/asm/pgalloc.h
11138 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11139 pmd_t *pmd, pte_t *pte)
11140 {
11141 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11142 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11143 +}
11144 +
11145 +static inline void pmd_populate_user(struct mm_struct *mm,
11146 + pmd_t *pmd, pte_t *pte)
11147 +{
11148 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11149 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11150 }
11151
11152 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11153
11154 #ifdef CONFIG_X86_PAE
11155 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11156 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11157 +{
11158 + pud_populate(mm, pudp, pmd);
11159 +}
11160 #else /* !CONFIG_X86_PAE */
11161 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11162 {
11163 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11164 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11165 }
11166 +
11167 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11168 +{
11169 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11170 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11171 +}
11172 #endif /* CONFIG_X86_PAE */
11173
11174 #if PAGETABLE_LEVELS > 3
11175 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11176 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11177 }
11178
11179 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11180 +{
11181 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11182 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11183 +}
11184 +
11185 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11186 {
11187 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11188 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11189 index 98391db..8f6984e 100644
11190 --- a/arch/x86/include/asm/pgtable-2level.h
11191 +++ b/arch/x86/include/asm/pgtable-2level.h
11192 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11193
11194 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11195 {
11196 + pax_open_kernel();
11197 *pmdp = pmd;
11198 + pax_close_kernel();
11199 }
11200
11201 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11202 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11203 index effff47..bbb8295 100644
11204 --- a/arch/x86/include/asm/pgtable-3level.h
11205 +++ b/arch/x86/include/asm/pgtable-3level.h
11206 @@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
11207 ptep->pte_low = pte.pte_low;
11208 }
11209
11210 +#define __HAVE_ARCH_READ_PMD_ATOMIC
11211 +/*
11212 + * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
11213 + * a "*pmdp" dereference done by gcc. Problem is, in certain places
11214 + * where pte_offset_map_lock is called, concurrent page faults are
11215 + * allowed, if the mmap_sem is hold for reading. An example is mincore
11216 + * vs page faults vs MADV_DONTNEED. On the page fault side
11217 + * pmd_populate rightfully does a set_64bit, but if we're reading the
11218 + * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
11219 + * because gcc will not read the 64bit of the pmd atomically. To fix
11220 + * this all places running pmd_offset_map_lock() while holding the
11221 + * mmap_sem in read mode, shall read the pmdp pointer using this
11222 + * function to know if the pmd is null nor not, and in turn to know if
11223 + * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
11224 + * operations.
11225 + *
11226 + * Without THP if the mmap_sem is hold for reading, the
11227 + * pmd can only transition from null to not null while read_pmd_atomic runs.
11228 + * So there's no need of literally reading it atomically.
11229 + *
11230 + * With THP if the mmap_sem is hold for reading, the pmd can become
11231 + * THP or null or point to a pte (and in turn become "stable") at any
11232 + * time under read_pmd_atomic, so it's mandatory to read it atomically
11233 + * with cmpxchg8b.
11234 + */
11235 +#ifndef CONFIG_TRANSPARENT_HUGEPAGE
11236 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11237 +{
11238 + pmdval_t ret;
11239 + u32 *tmp = (u32 *)pmdp;
11240 +
11241 + ret = (pmdval_t) (*tmp);
11242 + if (ret) {
11243 + /*
11244 + * If the low part is null, we must not read the high part
11245 + * or we can end up with a partial pmd.
11246 + */
11247 + smp_rmb();
11248 + ret |= ((pmdval_t)*(tmp + 1)) << 32;
11249 + }
11250 +
11251 + return __pmd(ret);
11252 +}
11253 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
11254 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11255 +{
11256 + return __pmd(atomic64_read((atomic64_t *)pmdp));
11257 +}
11258 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
11259 +
11260 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11261 {
11262 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
11263 @@ -38,12 +88,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11264
11265 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11266 {
11267 + pax_open_kernel();
11268 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11269 + pax_close_kernel();
11270 }
11271
11272 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11273 {
11274 + pax_open_kernel();
11275 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11276 + pax_close_kernel();
11277 }
11278
11279 /*
11280 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11281 index 49afb3f..91a8c63 100644
11282 --- a/arch/x86/include/asm/pgtable.h
11283 +++ b/arch/x86/include/asm/pgtable.h
11284 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11285
11286 #ifndef __PAGETABLE_PUD_FOLDED
11287 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11288 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11289 #define pgd_clear(pgd) native_pgd_clear(pgd)
11290 #endif
11291
11292 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11293
11294 #define arch_end_context_switch(prev) do {} while(0)
11295
11296 +#define pax_open_kernel() native_pax_open_kernel()
11297 +#define pax_close_kernel() native_pax_close_kernel()
11298 #endif /* CONFIG_PARAVIRT */
11299
11300 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11301 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11302 +
11303 +#ifdef CONFIG_PAX_KERNEXEC
11304 +static inline unsigned long native_pax_open_kernel(void)
11305 +{
11306 + unsigned long cr0;
11307 +
11308 + preempt_disable();
11309 + barrier();
11310 + cr0 = read_cr0() ^ X86_CR0_WP;
11311 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11312 + write_cr0(cr0);
11313 + return cr0 ^ X86_CR0_WP;
11314 +}
11315 +
11316 +static inline unsigned long native_pax_close_kernel(void)
11317 +{
11318 + unsigned long cr0;
11319 +
11320 + cr0 = read_cr0() ^ X86_CR0_WP;
11321 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11322 + write_cr0(cr0);
11323 + barrier();
11324 + preempt_enable_no_resched();
11325 + return cr0 ^ X86_CR0_WP;
11326 +}
11327 +#else
11328 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11329 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11330 +#endif
11331 +
11332 /*
11333 * The following only work if pte_present() is true.
11334 * Undefined behaviour if not..
11335 */
11336 +static inline int pte_user(pte_t pte)
11337 +{
11338 + return pte_val(pte) & _PAGE_USER;
11339 +}
11340 +
11341 static inline int pte_dirty(pte_t pte)
11342 {
11343 return pte_flags(pte) & _PAGE_DIRTY;
11344 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11345 return pte_clear_flags(pte, _PAGE_RW);
11346 }
11347
11348 +static inline pte_t pte_mkread(pte_t pte)
11349 +{
11350 + return __pte(pte_val(pte) | _PAGE_USER);
11351 +}
11352 +
11353 static inline pte_t pte_mkexec(pte_t pte)
11354 {
11355 - return pte_clear_flags(pte, _PAGE_NX);
11356 +#ifdef CONFIG_X86_PAE
11357 + if (__supported_pte_mask & _PAGE_NX)
11358 + return pte_clear_flags(pte, _PAGE_NX);
11359 + else
11360 +#endif
11361 + return pte_set_flags(pte, _PAGE_USER);
11362 +}
11363 +
11364 +static inline pte_t pte_exprotect(pte_t pte)
11365 +{
11366 +#ifdef CONFIG_X86_PAE
11367 + if (__supported_pte_mask & _PAGE_NX)
11368 + return pte_set_flags(pte, _PAGE_NX);
11369 + else
11370 +#endif
11371 + return pte_clear_flags(pte, _PAGE_USER);
11372 }
11373
11374 static inline pte_t pte_mkdirty(pte_t pte)
11375 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11376 #endif
11377
11378 #ifndef __ASSEMBLY__
11379 +
11380 +#ifdef CONFIG_PAX_PER_CPU_PGD
11381 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11382 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11383 +{
11384 + return cpu_pgd[cpu];
11385 +}
11386 +#endif
11387 +
11388 #include <linux/mm_types.h>
11389
11390 static inline int pte_none(pte_t pte)
11391 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11392
11393 static inline int pgd_bad(pgd_t pgd)
11394 {
11395 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11396 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11397 }
11398
11399 static inline int pgd_none(pgd_t pgd)
11400 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11401 * pgd_offset() returns a (pgd_t *)
11402 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11403 */
11404 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11405 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11406 +
11407 +#ifdef CONFIG_PAX_PER_CPU_PGD
11408 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11409 +#endif
11410 +
11411 /*
11412 * a shortcut which implies the use of the kernel's pgd, instead
11413 * of a process's
11414 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11415 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11416 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11417
11418 +#ifdef CONFIG_X86_32
11419 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11420 +#else
11421 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11422 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11423 +
11424 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11425 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11426 +#else
11427 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11428 +#endif
11429 +
11430 +#endif
11431 +
11432 #ifndef __ASSEMBLY__
11433
11434 extern int direct_gbpages;
11435 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11436 * dst and src can be on the same page, but the range must not overlap,
11437 * and must not cross a page boundary.
11438 */
11439 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11440 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11441 {
11442 - memcpy(dst, src, count * sizeof(pgd_t));
11443 + pax_open_kernel();
11444 + while (count--)
11445 + *dst++ = *src++;
11446 + pax_close_kernel();
11447 }
11448
11449 +#ifdef CONFIG_PAX_PER_CPU_PGD
11450 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11451 +#endif
11452 +
11453 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11454 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11455 +#else
11456 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11457 +#endif
11458
11459 #include <asm-generic/pgtable.h>
11460 #endif /* __ASSEMBLY__ */
11461 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11462 index 0c92113..34a77c6 100644
11463 --- a/arch/x86/include/asm/pgtable_32.h
11464 +++ b/arch/x86/include/asm/pgtable_32.h
11465 @@ -25,9 +25,6 @@
11466 struct mm_struct;
11467 struct vm_area_struct;
11468
11469 -extern pgd_t swapper_pg_dir[1024];
11470 -extern pgd_t initial_page_table[1024];
11471 -
11472 static inline void pgtable_cache_init(void) { }
11473 static inline void check_pgt_cache(void) { }
11474 void paging_init(void);
11475 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11476 # include <asm/pgtable-2level.h>
11477 #endif
11478
11479 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11480 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11481 +#ifdef CONFIG_X86_PAE
11482 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11483 +#endif
11484 +
11485 #if defined(CONFIG_HIGHPTE)
11486 #define pte_offset_map(dir, address) \
11487 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11488 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11489 /* Clear a kernel PTE and flush it from the TLB */
11490 #define kpte_clear_flush(ptep, vaddr) \
11491 do { \
11492 + pax_open_kernel(); \
11493 pte_clear(&init_mm, (vaddr), (ptep)); \
11494 + pax_close_kernel(); \
11495 __flush_tlb_one((vaddr)); \
11496 } while (0)
11497
11498 @@ -74,6 +79,9 @@ do { \
11499
11500 #endif /* !__ASSEMBLY__ */
11501
11502 +#define HAVE_ARCH_UNMAPPED_AREA
11503 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11504 +
11505 /*
11506 * kern_addr_valid() is (1) for FLATMEM and (0) for
11507 * SPARSEMEM and DISCONTIGMEM
11508 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11509 index ed5903b..c7fe163 100644
11510 --- a/arch/x86/include/asm/pgtable_32_types.h
11511 +++ b/arch/x86/include/asm/pgtable_32_types.h
11512 @@ -8,7 +8,7 @@
11513 */
11514 #ifdef CONFIG_X86_PAE
11515 # include <asm/pgtable-3level_types.h>
11516 -# define PMD_SIZE (1UL << PMD_SHIFT)
11517 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11518 # define PMD_MASK (~(PMD_SIZE - 1))
11519 #else
11520 # include <asm/pgtable-2level_types.h>
11521 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11522 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11523 #endif
11524
11525 +#ifdef CONFIG_PAX_KERNEXEC
11526 +#ifndef __ASSEMBLY__
11527 +extern unsigned char MODULES_EXEC_VADDR[];
11528 +extern unsigned char MODULES_EXEC_END[];
11529 +#endif
11530 +#include <asm/boot.h>
11531 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11532 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11533 +#else
11534 +#define ktla_ktva(addr) (addr)
11535 +#define ktva_ktla(addr) (addr)
11536 +#endif
11537 +
11538 #define MODULES_VADDR VMALLOC_START
11539 #define MODULES_END VMALLOC_END
11540 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11541 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11542 index 975f709..9f779c9 100644
11543 --- a/arch/x86/include/asm/pgtable_64.h
11544 +++ b/arch/x86/include/asm/pgtable_64.h
11545 @@ -16,10 +16,14 @@
11546
11547 extern pud_t level3_kernel_pgt[512];
11548 extern pud_t level3_ident_pgt[512];
11549 +extern pud_t level3_vmalloc_start_pgt[512];
11550 +extern pud_t level3_vmalloc_end_pgt[512];
11551 +extern pud_t level3_vmemmap_pgt[512];
11552 +extern pud_t level2_vmemmap_pgt[512];
11553 extern pmd_t level2_kernel_pgt[512];
11554 extern pmd_t level2_fixmap_pgt[512];
11555 -extern pmd_t level2_ident_pgt[512];
11556 -extern pgd_t init_level4_pgt[];
11557 +extern pmd_t level2_ident_pgt[512*2];
11558 +extern pgd_t init_level4_pgt[512];
11559
11560 #define swapper_pg_dir init_level4_pgt
11561
11562 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11563
11564 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11565 {
11566 + pax_open_kernel();
11567 *pmdp = pmd;
11568 + pax_close_kernel();
11569 }
11570
11571 static inline void native_pmd_clear(pmd_t *pmd)
11572 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11573
11574 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11575 {
11576 + pax_open_kernel();
11577 *pudp = pud;
11578 + pax_close_kernel();
11579 }
11580
11581 static inline void native_pud_clear(pud_t *pud)
11582 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11583
11584 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11585 {
11586 + pax_open_kernel();
11587 + *pgdp = pgd;
11588 + pax_close_kernel();
11589 +}
11590 +
11591 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11592 +{
11593 *pgdp = pgd;
11594 }
11595
11596 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11597 index 766ea16..5b96cb3 100644
11598 --- a/arch/x86/include/asm/pgtable_64_types.h
11599 +++ b/arch/x86/include/asm/pgtable_64_types.h
11600 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11601 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11602 #define MODULES_END _AC(0xffffffffff000000, UL)
11603 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11604 +#define MODULES_EXEC_VADDR MODULES_VADDR
11605 +#define MODULES_EXEC_END MODULES_END
11606 +
11607 +#define ktla_ktva(addr) (addr)
11608 +#define ktva_ktla(addr) (addr)
11609
11610 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11611 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11612 index 013286a..8b42f4f 100644
11613 --- a/arch/x86/include/asm/pgtable_types.h
11614 +++ b/arch/x86/include/asm/pgtable_types.h
11615 @@ -16,13 +16,12 @@
11616 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11617 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11618 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11619 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11620 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11621 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11622 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11623 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11624 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11625 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11626 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11627 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11628 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11629 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11630
11631 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11632 @@ -40,7 +39,6 @@
11633 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11634 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11635 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11636 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11637 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11638 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11639 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11640 @@ -57,8 +55,10 @@
11641
11642 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11643 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11644 -#else
11645 +#elif defined(CONFIG_KMEMCHECK)
11646 #define _PAGE_NX (_AT(pteval_t, 0))
11647 +#else
11648 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11649 #endif
11650
11651 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11652 @@ -96,6 +96,9 @@
11653 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11654 _PAGE_ACCESSED)
11655
11656 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11657 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11658 +
11659 #define __PAGE_KERNEL_EXEC \
11660 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11661 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11662 @@ -106,7 +109,7 @@
11663 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11664 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11665 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11666 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11667 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11668 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11669 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11670 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11671 @@ -168,8 +171,8 @@
11672 * bits are combined, this will alow user to access the high address mapped
11673 * VDSO in the presence of CONFIG_COMPAT_VDSO
11674 */
11675 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11676 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11677 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11678 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11679 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11680 #endif
11681
11682 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11683 {
11684 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11685 }
11686 +#endif
11687
11688 +#if PAGETABLE_LEVELS == 3
11689 +#include <asm-generic/pgtable-nopud.h>
11690 +#endif
11691 +
11692 +#if PAGETABLE_LEVELS == 2
11693 +#include <asm-generic/pgtable-nopmd.h>
11694 +#endif
11695 +
11696 +#ifndef __ASSEMBLY__
11697 #if PAGETABLE_LEVELS > 3
11698 typedef struct { pudval_t pud; } pud_t;
11699
11700 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11701 return pud.pud;
11702 }
11703 #else
11704 -#include <asm-generic/pgtable-nopud.h>
11705 -
11706 static inline pudval_t native_pud_val(pud_t pud)
11707 {
11708 return native_pgd_val(pud.pgd);
11709 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11710 return pmd.pmd;
11711 }
11712 #else
11713 -#include <asm-generic/pgtable-nopmd.h>
11714 -
11715 static inline pmdval_t native_pmd_val(pmd_t pmd)
11716 {
11717 return native_pgd_val(pmd.pud.pgd);
11718 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11719
11720 extern pteval_t __supported_pte_mask;
11721 extern void set_nx(void);
11722 -extern int nx_enabled;
11723
11724 #define pgprot_writecombine pgprot_writecombine
11725 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11726 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11727 index 4fa7dcc..764e33a 100644
11728 --- a/arch/x86/include/asm/processor.h
11729 +++ b/arch/x86/include/asm/processor.h
11730 @@ -276,7 +276,7 @@ struct tss_struct {
11731
11732 } ____cacheline_aligned;
11733
11734 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11735 +extern struct tss_struct init_tss[NR_CPUS];
11736
11737 /*
11738 * Save the original ist values for checking stack pointers during debugging
11739 @@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
11740 */
11741 #define TASK_SIZE PAGE_OFFSET
11742 #define TASK_SIZE_MAX TASK_SIZE
11743 +
11744 +#ifdef CONFIG_PAX_SEGMEXEC
11745 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11746 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11747 +#else
11748 #define STACK_TOP TASK_SIZE
11749 -#define STACK_TOP_MAX STACK_TOP
11750 +#endif
11751 +
11752 +#define STACK_TOP_MAX TASK_SIZE
11753
11754 #define INIT_THREAD { \
11755 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11756 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11757 .vm86_info = NULL, \
11758 .sysenter_cs = __KERNEL_CS, \
11759 .io_bitmap_ptr = NULL, \
11760 @@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
11761 */
11762 #define INIT_TSS { \
11763 .x86_tss = { \
11764 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11765 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11766 .ss0 = __KERNEL_DS, \
11767 .ss1 = __KERNEL_CS, \
11768 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11769 @@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
11770 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11771
11772 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11773 -#define KSTK_TOP(info) \
11774 -({ \
11775 - unsigned long *__ptr = (unsigned long *)(info); \
11776 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11777 -})
11778 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11779
11780 /*
11781 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11782 @@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11783 #define task_pt_regs(task) \
11784 ({ \
11785 struct pt_regs *__regs__; \
11786 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11787 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11788 __regs__ - 1; \
11789 })
11790
11791 @@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11792 /*
11793 * User space process size. 47bits minus one guard page.
11794 */
11795 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11796 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11797
11798 /* This decides where the kernel will search for a free chunk of vm
11799 * space during mmap's.
11800 */
11801 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11802 - 0xc0000000 : 0xFFFFe000)
11803 + 0xc0000000 : 0xFFFFf000)
11804
11805 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11806 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11807 @@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11808 #define STACK_TOP_MAX TASK_SIZE_MAX
11809
11810 #define INIT_THREAD { \
11811 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11812 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11813 }
11814
11815 #define INIT_TSS { \
11816 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11817 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11818 }
11819
11820 /*
11821 @@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11822 */
11823 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11824
11825 +#ifdef CONFIG_PAX_SEGMEXEC
11826 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11827 +#endif
11828 +
11829 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11830
11831 /* Get/set a process' ability to use the timestamp counter instruction */
11832 @@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11833
11834 void cpu_idle_wait(void);
11835
11836 -extern unsigned long arch_align_stack(unsigned long sp);
11837 +#define arch_align_stack(x) ((x) & ~0xfUL)
11838 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11839
11840 void default_idle(void);
11841 bool set_pm_idle_to_default(void);
11842
11843 -void stop_this_cpu(void *dummy);
11844 +void stop_this_cpu(void *dummy) __noreturn;
11845
11846 #endif /* _ASM_X86_PROCESSOR_H */
11847 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11848 index dcfde52..dbfea06 100644
11849 --- a/arch/x86/include/asm/ptrace.h
11850 +++ b/arch/x86/include/asm/ptrace.h
11851 @@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11852 }
11853
11854 /*
11855 - * user_mode_vm(regs) determines whether a register set came from user mode.
11856 + * user_mode(regs) determines whether a register set came from user mode.
11857 * This is true if V8086 mode was enabled OR if the register set was from
11858 * protected mode with RPL-3 CS value. This tricky test checks that with
11859 * one comparison. Many places in the kernel can bypass this full check
11860 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11861 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11862 + * be used.
11863 */
11864 -static inline int user_mode(struct pt_regs *regs)
11865 +static inline int user_mode_novm(struct pt_regs *regs)
11866 {
11867 #ifdef CONFIG_X86_32
11868 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11869 #else
11870 - return !!(regs->cs & 3);
11871 + return !!(regs->cs & SEGMENT_RPL_MASK);
11872 #endif
11873 }
11874
11875 -static inline int user_mode_vm(struct pt_regs *regs)
11876 +static inline int user_mode(struct pt_regs *regs)
11877 {
11878 #ifdef CONFIG_X86_32
11879 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11880 USER_RPL;
11881 #else
11882 - return user_mode(regs);
11883 + return user_mode_novm(regs);
11884 #endif
11885 }
11886
11887 @@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11888 #ifdef CONFIG_X86_64
11889 static inline bool user_64bit_mode(struct pt_regs *regs)
11890 {
11891 + unsigned long cs = regs->cs & 0xffff;
11892 #ifndef CONFIG_PARAVIRT
11893 /*
11894 * On non-paravirt systems, this is the only long mode CPL 3
11895 * selector. We do not allow long mode selectors in the LDT.
11896 */
11897 - return regs->cs == __USER_CS;
11898 + return cs == __USER_CS;
11899 #else
11900 /* Headers are too twisted for this to go in paravirt.h. */
11901 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11902 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11903 #endif
11904 }
11905 #endif
11906 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11907 index 92f29706..a79cbbb 100644
11908 --- a/arch/x86/include/asm/reboot.h
11909 +++ b/arch/x86/include/asm/reboot.h
11910 @@ -6,19 +6,19 @@
11911 struct pt_regs;
11912
11913 struct machine_ops {
11914 - void (*restart)(char *cmd);
11915 - void (*halt)(void);
11916 - void (*power_off)(void);
11917 + void (* __noreturn restart)(char *cmd);
11918 + void (* __noreturn halt)(void);
11919 + void (* __noreturn power_off)(void);
11920 void (*shutdown)(void);
11921 void (*crash_shutdown)(struct pt_regs *);
11922 - void (*emergency_restart)(void);
11923 -};
11924 + void (* __noreturn emergency_restart)(void);
11925 +} __no_const;
11926
11927 extern struct machine_ops machine_ops;
11928
11929 void native_machine_crash_shutdown(struct pt_regs *regs);
11930 void native_machine_shutdown(void);
11931 -void machine_real_restart(unsigned int type);
11932 +void machine_real_restart(unsigned int type) __noreturn;
11933 /* These must match dispatch_table in reboot_32.S */
11934 #define MRR_BIOS 0
11935 #define MRR_APM 1
11936 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11937 index 2dbe4a7..ce1db00 100644
11938 --- a/arch/x86/include/asm/rwsem.h
11939 +++ b/arch/x86/include/asm/rwsem.h
11940 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11941 {
11942 asm volatile("# beginning down_read\n\t"
11943 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11944 +
11945 +#ifdef CONFIG_PAX_REFCOUNT
11946 + "jno 0f\n"
11947 + LOCK_PREFIX _ASM_DEC "(%1)\n"
11948 + "int $4\n0:\n"
11949 + _ASM_EXTABLE(0b, 0b)
11950 +#endif
11951 +
11952 /* adds 0x00000001 */
11953 " jns 1f\n"
11954 " call call_rwsem_down_read_failed\n"
11955 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11956 "1:\n\t"
11957 " mov %1,%2\n\t"
11958 " add %3,%2\n\t"
11959 +
11960 +#ifdef CONFIG_PAX_REFCOUNT
11961 + "jno 0f\n"
11962 + "sub %3,%2\n"
11963 + "int $4\n0:\n"
11964 + _ASM_EXTABLE(0b, 0b)
11965 +#endif
11966 +
11967 " jle 2f\n\t"
11968 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11969 " jnz 1b\n\t"
11970 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11971 long tmp;
11972 asm volatile("# beginning down_write\n\t"
11973 LOCK_PREFIX " xadd %1,(%2)\n\t"
11974 +
11975 +#ifdef CONFIG_PAX_REFCOUNT
11976 + "jno 0f\n"
11977 + "mov %1,(%2)\n"
11978 + "int $4\n0:\n"
11979 + _ASM_EXTABLE(0b, 0b)
11980 +#endif
11981 +
11982 /* adds 0xffff0001, returns the old value */
11983 " test %1,%1\n\t"
11984 /* was the count 0 before? */
11985 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11986 long tmp;
11987 asm volatile("# beginning __up_read\n\t"
11988 LOCK_PREFIX " xadd %1,(%2)\n\t"
11989 +
11990 +#ifdef CONFIG_PAX_REFCOUNT
11991 + "jno 0f\n"
11992 + "mov %1,(%2)\n"
11993 + "int $4\n0:\n"
11994 + _ASM_EXTABLE(0b, 0b)
11995 +#endif
11996 +
11997 /* subtracts 1, returns the old value */
11998 " jns 1f\n\t"
11999 " call call_rwsem_wake\n" /* expects old value in %edx */
12000 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12001 long tmp;
12002 asm volatile("# beginning __up_write\n\t"
12003 LOCK_PREFIX " xadd %1,(%2)\n\t"
12004 +
12005 +#ifdef CONFIG_PAX_REFCOUNT
12006 + "jno 0f\n"
12007 + "mov %1,(%2)\n"
12008 + "int $4\n0:\n"
12009 + _ASM_EXTABLE(0b, 0b)
12010 +#endif
12011 +
12012 /* subtracts 0xffff0001, returns the old value */
12013 " jns 1f\n\t"
12014 " call call_rwsem_wake\n" /* expects old value in %edx */
12015 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12016 {
12017 asm volatile("# beginning __downgrade_write\n\t"
12018 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12019 +
12020 +#ifdef CONFIG_PAX_REFCOUNT
12021 + "jno 0f\n"
12022 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12023 + "int $4\n0:\n"
12024 + _ASM_EXTABLE(0b, 0b)
12025 +#endif
12026 +
12027 /*
12028 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12029 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12030 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12031 */
12032 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12033 {
12034 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12035 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12036 +
12037 +#ifdef CONFIG_PAX_REFCOUNT
12038 + "jno 0f\n"
12039 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12040 + "int $4\n0:\n"
12041 + _ASM_EXTABLE(0b, 0b)
12042 +#endif
12043 +
12044 : "+m" (sem->count)
12045 : "er" (delta));
12046 }
12047 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12048 */
12049 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12050 {
12051 - return delta + xadd(&sem->count, delta);
12052 + return delta + xadd_check_overflow(&sem->count, delta);
12053 }
12054
12055 #endif /* __KERNEL__ */
12056 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12057 index 1654662..5af4157 100644
12058 --- a/arch/x86/include/asm/segment.h
12059 +++ b/arch/x86/include/asm/segment.h
12060 @@ -64,10 +64,15 @@
12061 * 26 - ESPFIX small SS
12062 * 27 - per-cpu [ offset to per-cpu data area ]
12063 * 28 - stack_canary-20 [ for stack protector ]
12064 - * 29 - unused
12065 - * 30 - unused
12066 + * 29 - PCI BIOS CS
12067 + * 30 - PCI BIOS DS
12068 * 31 - TSS for double fault handler
12069 */
12070 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12071 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12072 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12073 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12074 +
12075 #define GDT_ENTRY_TLS_MIN 6
12076 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12077
12078 @@ -79,6 +84,8 @@
12079
12080 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12081
12082 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12083 +
12084 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12085
12086 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12087 @@ -104,6 +111,12 @@
12088 #define __KERNEL_STACK_CANARY 0
12089 #endif
12090
12091 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12092 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12093 +
12094 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12095 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12096 +
12097 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12098
12099 /*
12100 @@ -141,7 +154,7 @@
12101 */
12102
12103 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12104 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12105 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12106
12107
12108 #else
12109 @@ -165,6 +178,8 @@
12110 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12111 #define __USER32_DS __USER_DS
12112
12113 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12114 +
12115 #define GDT_ENTRY_TSS 8 /* needs two entries */
12116 #define GDT_ENTRY_LDT 10 /* needs two entries */
12117 #define GDT_ENTRY_TLS_MIN 12
12118 @@ -185,6 +200,7 @@
12119 #endif
12120
12121 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12122 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12123 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12124 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12125 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12126 @@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
12127 {
12128 unsigned long __limit;
12129 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12130 - return __limit + 1;
12131 + return __limit;
12132 }
12133
12134 #endif /* !__ASSEMBLY__ */
12135 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12136 index 0434c40..1714bf0 100644
12137 --- a/arch/x86/include/asm/smp.h
12138 +++ b/arch/x86/include/asm/smp.h
12139 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12140 /* cpus sharing the last level cache: */
12141 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12142 DECLARE_PER_CPU(u16, cpu_llc_id);
12143 -DECLARE_PER_CPU(int, cpu_number);
12144 +DECLARE_PER_CPU(unsigned int, cpu_number);
12145
12146 static inline struct cpumask *cpu_sibling_mask(int cpu)
12147 {
12148 @@ -77,7 +77,7 @@ struct smp_ops {
12149
12150 void (*send_call_func_ipi)(const struct cpumask *mask);
12151 void (*send_call_func_single_ipi)(int cpu);
12152 -};
12153 +} __no_const;
12154
12155 /* Globals due to paravirt */
12156 extern void set_cpu_sibling_map(int cpu);
12157 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12158 extern int safe_smp_processor_id(void);
12159
12160 #elif defined(CONFIG_X86_64_SMP)
12161 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12162 -
12163 -#define stack_smp_processor_id() \
12164 -({ \
12165 - struct thread_info *ti; \
12166 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12167 - ti->cpu; \
12168 -})
12169 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12170 +#define stack_smp_processor_id() raw_smp_processor_id()
12171 #define safe_smp_processor_id() smp_processor_id()
12172
12173 #endif
12174 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12175 index 76bfa2c..12d3fe7 100644
12176 --- a/arch/x86/include/asm/spinlock.h
12177 +++ b/arch/x86/include/asm/spinlock.h
12178 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12179 static inline void arch_read_lock(arch_rwlock_t *rw)
12180 {
12181 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12182 +
12183 +#ifdef CONFIG_PAX_REFCOUNT
12184 + "jno 0f\n"
12185 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12186 + "int $4\n0:\n"
12187 + _ASM_EXTABLE(0b, 0b)
12188 +#endif
12189 +
12190 "jns 1f\n"
12191 "call __read_lock_failed\n\t"
12192 "1:\n"
12193 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12194 static inline void arch_write_lock(arch_rwlock_t *rw)
12195 {
12196 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12197 +
12198 +#ifdef CONFIG_PAX_REFCOUNT
12199 + "jno 0f\n"
12200 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12201 + "int $4\n0:\n"
12202 + _ASM_EXTABLE(0b, 0b)
12203 +#endif
12204 +
12205 "jz 1f\n"
12206 "call __write_lock_failed\n\t"
12207 "1:\n"
12208 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12209
12210 static inline void arch_read_unlock(arch_rwlock_t *rw)
12211 {
12212 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12213 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12214 +
12215 +#ifdef CONFIG_PAX_REFCOUNT
12216 + "jno 0f\n"
12217 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12218 + "int $4\n0:\n"
12219 + _ASM_EXTABLE(0b, 0b)
12220 +#endif
12221 +
12222 :"+m" (rw->lock) : : "memory");
12223 }
12224
12225 static inline void arch_write_unlock(arch_rwlock_t *rw)
12226 {
12227 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12228 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12229 +
12230 +#ifdef CONFIG_PAX_REFCOUNT
12231 + "jno 0f\n"
12232 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12233 + "int $4\n0:\n"
12234 + _ASM_EXTABLE(0b, 0b)
12235 +#endif
12236 +
12237 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12238 }
12239
12240 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12241 index b5d9533..41655fa 100644
12242 --- a/arch/x86/include/asm/stackprotector.h
12243 +++ b/arch/x86/include/asm/stackprotector.h
12244 @@ -47,7 +47,7 @@
12245 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12246 */
12247 #define GDT_STACK_CANARY_INIT \
12248 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12249 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12250
12251 /*
12252 * Initialize the stackprotector canary value.
12253 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12254
12255 static inline void load_stack_canary_segment(void)
12256 {
12257 -#ifdef CONFIG_X86_32
12258 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12259 asm volatile ("mov %0, %%gs" : : "r" (0));
12260 #endif
12261 }
12262 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12263 index 70bbe39..4ae2bd4 100644
12264 --- a/arch/x86/include/asm/stacktrace.h
12265 +++ b/arch/x86/include/asm/stacktrace.h
12266 @@ -11,28 +11,20 @@
12267
12268 extern int kstack_depth_to_print;
12269
12270 -struct thread_info;
12271 +struct task_struct;
12272 struct stacktrace_ops;
12273
12274 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12275 - unsigned long *stack,
12276 - unsigned long bp,
12277 - const struct stacktrace_ops *ops,
12278 - void *data,
12279 - unsigned long *end,
12280 - int *graph);
12281 +typedef unsigned long walk_stack_t(struct task_struct *task,
12282 + void *stack_start,
12283 + unsigned long *stack,
12284 + unsigned long bp,
12285 + const struct stacktrace_ops *ops,
12286 + void *data,
12287 + unsigned long *end,
12288 + int *graph);
12289
12290 -extern unsigned long
12291 -print_context_stack(struct thread_info *tinfo,
12292 - unsigned long *stack, unsigned long bp,
12293 - const struct stacktrace_ops *ops, void *data,
12294 - unsigned long *end, int *graph);
12295 -
12296 -extern unsigned long
12297 -print_context_stack_bp(struct thread_info *tinfo,
12298 - unsigned long *stack, unsigned long bp,
12299 - const struct stacktrace_ops *ops, void *data,
12300 - unsigned long *end, int *graph);
12301 +extern walk_stack_t print_context_stack;
12302 +extern walk_stack_t print_context_stack_bp;
12303
12304 /* Generic stack tracer with callbacks */
12305
12306 @@ -40,7 +32,7 @@ struct stacktrace_ops {
12307 void (*address)(void *data, unsigned long address, int reliable);
12308 /* On negative return stop dumping */
12309 int (*stack)(void *data, char *name);
12310 - walk_stack_t walk_stack;
12311 + walk_stack_t *walk_stack;
12312 };
12313
12314 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12315 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12316 index 4ec45b3..a4f0a8a 100644
12317 --- a/arch/x86/include/asm/switch_to.h
12318 +++ b/arch/x86/include/asm/switch_to.h
12319 @@ -108,7 +108,7 @@ do { \
12320 "call __switch_to\n\t" \
12321 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12322 __switch_canary \
12323 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12324 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12325 "movq %%rax,%%rdi\n\t" \
12326 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12327 "jnz ret_from_fork\n\t" \
12328 @@ -119,7 +119,7 @@ do { \
12329 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12330 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12331 [_tif_fork] "i" (_TIF_FORK), \
12332 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12333 + [thread_info] "m" (current_tinfo), \
12334 [current_task] "m" (current_task) \
12335 __switch_canary_iparam \
12336 : "memory", "cc" __EXTRA_CLOBBER)
12337 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12338 index 3fda9db4..4ca1c61 100644
12339 --- a/arch/x86/include/asm/sys_ia32.h
12340 +++ b/arch/x86/include/asm/sys_ia32.h
12341 @@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12342 struct old_sigaction32 __user *);
12343 asmlinkage long sys32_alarm(unsigned int);
12344
12345 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12346 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12347 asmlinkage long sys32_sysfs(int, u32, u32);
12348
12349 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12350 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12351 index ad6df8c..5e0cf6e 100644
12352 --- a/arch/x86/include/asm/thread_info.h
12353 +++ b/arch/x86/include/asm/thread_info.h
12354 @@ -10,6 +10,7 @@
12355 #include <linux/compiler.h>
12356 #include <asm/page.h>
12357 #include <asm/types.h>
12358 +#include <asm/percpu.h>
12359
12360 /*
12361 * low level task data that entry.S needs immediate access to
12362 @@ -24,7 +25,6 @@ struct exec_domain;
12363 #include <linux/atomic.h>
12364
12365 struct thread_info {
12366 - struct task_struct *task; /* main task structure */
12367 struct exec_domain *exec_domain; /* execution domain */
12368 __u32 flags; /* low level flags */
12369 __u32 status; /* thread synchronous flags */
12370 @@ -34,19 +34,13 @@ struct thread_info {
12371 mm_segment_t addr_limit;
12372 struct restart_block restart_block;
12373 void __user *sysenter_return;
12374 -#ifdef CONFIG_X86_32
12375 - unsigned long previous_esp; /* ESP of the previous stack in
12376 - case of nested (IRQ) stacks
12377 - */
12378 - __u8 supervisor_stack[0];
12379 -#endif
12380 + unsigned long lowest_stack;
12381 unsigned int sig_on_uaccess_error:1;
12382 unsigned int uaccess_err:1; /* uaccess failed */
12383 };
12384
12385 -#define INIT_THREAD_INFO(tsk) \
12386 +#define INIT_THREAD_INFO \
12387 { \
12388 - .task = &tsk, \
12389 .exec_domain = &default_exec_domain, \
12390 .flags = 0, \
12391 .cpu = 0, \
12392 @@ -57,7 +51,7 @@ struct thread_info {
12393 }, \
12394 }
12395
12396 -#define init_thread_info (init_thread_union.thread_info)
12397 +#define init_thread_info (init_thread_union.stack)
12398 #define init_stack (init_thread_union.stack)
12399
12400 #else /* !__ASSEMBLY__ */
12401 @@ -97,6 +91,7 @@ struct thread_info {
12402 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12403 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12404 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12405 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12406
12407 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12408 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12409 @@ -120,16 +115,18 @@ struct thread_info {
12410 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12411 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12412 #define _TIF_X32 (1 << TIF_X32)
12413 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12414
12415 /* work to do in syscall_trace_enter() */
12416 #define _TIF_WORK_SYSCALL_ENTRY \
12417 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12418 - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12419 + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12420 + _TIF_GRSEC_SETXID)
12421
12422 /* work to do in syscall_trace_leave() */
12423 #define _TIF_WORK_SYSCALL_EXIT \
12424 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12425 - _TIF_SYSCALL_TRACEPOINT)
12426 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12427
12428 /* work to do on interrupt/exception return */
12429 #define _TIF_WORK_MASK \
12430 @@ -139,7 +136,8 @@ struct thread_info {
12431
12432 /* work to do on any return to user space */
12433 #define _TIF_ALLWORK_MASK \
12434 - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12435 + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12436 + _TIF_GRSEC_SETXID)
12437
12438 /* Only used for 64 bit */
12439 #define _TIF_DO_NOTIFY_MASK \
12440 @@ -173,45 +171,40 @@ struct thread_info {
12441 ret; \
12442 })
12443
12444 -#ifdef CONFIG_X86_32
12445 -
12446 -#define STACK_WARN (THREAD_SIZE/8)
12447 -/*
12448 - * macros/functions for gaining access to the thread information structure
12449 - *
12450 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12451 - */
12452 -#ifndef __ASSEMBLY__
12453 -
12454 -
12455 -/* how to get the current stack pointer from C */
12456 -register unsigned long current_stack_pointer asm("esp") __used;
12457 -
12458 -/* how to get the thread information struct from C */
12459 -static inline struct thread_info *current_thread_info(void)
12460 -{
12461 - return (struct thread_info *)
12462 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12463 -}
12464 -
12465 -#else /* !__ASSEMBLY__ */
12466 -
12467 +#ifdef __ASSEMBLY__
12468 /* how to get the thread information struct from ASM */
12469 #define GET_THREAD_INFO(reg) \
12470 - movl $-THREAD_SIZE, reg; \
12471 - andl %esp, reg
12472 + mov PER_CPU_VAR(current_tinfo), reg
12473
12474 /* use this one if reg already contains %esp */
12475 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12476 - andl $-THREAD_SIZE, reg
12477 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12478 +#else
12479 +/* how to get the thread information struct from C */
12480 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12481 +
12482 +static __always_inline struct thread_info *current_thread_info(void)
12483 +{
12484 + return percpu_read_stable(current_tinfo);
12485 +}
12486 +#endif
12487 +
12488 +#ifdef CONFIG_X86_32
12489 +
12490 +#define STACK_WARN (THREAD_SIZE/8)
12491 +/*
12492 + * macros/functions for gaining access to the thread information structure
12493 + *
12494 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12495 + */
12496 +#ifndef __ASSEMBLY__
12497 +
12498 +/* how to get the current stack pointer from C */
12499 +register unsigned long current_stack_pointer asm("esp") __used;
12500
12501 #endif
12502
12503 #else /* X86_32 */
12504
12505 -#include <asm/percpu.h>
12506 -#define KERNEL_STACK_OFFSET (5*8)
12507 -
12508 /*
12509 * macros/functions for gaining access to the thread information structure
12510 * preempt_count needs to be 1 initially, until the scheduler is functional.
12511 @@ -219,27 +212,8 @@ static inline struct thread_info *current_thread_info(void)
12512 #ifndef __ASSEMBLY__
12513 DECLARE_PER_CPU(unsigned long, kernel_stack);
12514
12515 -static inline struct thread_info *current_thread_info(void)
12516 -{
12517 - struct thread_info *ti;
12518 - ti = (void *)(percpu_read_stable(kernel_stack) +
12519 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12520 - return ti;
12521 -}
12522 -
12523 -#else /* !__ASSEMBLY__ */
12524 -
12525 -/* how to get the thread information struct from ASM */
12526 -#define GET_THREAD_INFO(reg) \
12527 - movq PER_CPU_VAR(kernel_stack),reg ; \
12528 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12529 -
12530 -/*
12531 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12532 - * a certain register (to be used in assembler memory operands).
12533 - */
12534 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12535 -
12536 +/* how to get the current stack pointer from C */
12537 +register unsigned long current_stack_pointer asm("rsp") __used;
12538 #endif
12539
12540 #endif /* !X86_32 */
12541 @@ -285,5 +259,16 @@ extern void arch_task_cache_init(void);
12542 extern void free_thread_info(struct thread_info *ti);
12543 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12544 #define arch_task_cache_init arch_task_cache_init
12545 +
12546 +#define __HAVE_THREAD_FUNCTIONS
12547 +#define task_thread_info(task) (&(task)->tinfo)
12548 +#define task_stack_page(task) ((task)->stack)
12549 +#define setup_thread_stack(p, org) do {} while (0)
12550 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12551 +
12552 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12553 +extern struct task_struct *alloc_task_struct_node(int node);
12554 +extern void free_task_struct(struct task_struct *);
12555 +
12556 #endif
12557 #endif /* _ASM_X86_THREAD_INFO_H */
12558 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12559 index e054459..14bc8a7 100644
12560 --- a/arch/x86/include/asm/uaccess.h
12561 +++ b/arch/x86/include/asm/uaccess.h
12562 @@ -7,12 +7,15 @@
12563 #include <linux/compiler.h>
12564 #include <linux/thread_info.h>
12565 #include <linux/string.h>
12566 +#include <linux/sched.h>
12567 #include <asm/asm.h>
12568 #include <asm/page.h>
12569
12570 #define VERIFY_READ 0
12571 #define VERIFY_WRITE 1
12572
12573 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12574 +
12575 /*
12576 * The fs value determines whether argument validity checking should be
12577 * performed or not. If get_fs() == USER_DS, checking is performed, with
12578 @@ -28,7 +31,12 @@
12579
12580 #define get_ds() (KERNEL_DS)
12581 #define get_fs() (current_thread_info()->addr_limit)
12582 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12583 +void __set_fs(mm_segment_t x);
12584 +void set_fs(mm_segment_t x);
12585 +#else
12586 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12587 +#endif
12588
12589 #define segment_eq(a, b) ((a).seg == (b).seg)
12590
12591 @@ -76,7 +84,33 @@
12592 * checks that the pointer is in the user space range - after calling
12593 * this function, memory access functions may still return -EFAULT.
12594 */
12595 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12596 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12597 +#define access_ok(type, addr, size) \
12598 +({ \
12599 + long __size = size; \
12600 + unsigned long __addr = (unsigned long)addr; \
12601 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12602 + unsigned long __end_ao = __addr + __size - 1; \
12603 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12604 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12605 + while(__addr_ao <= __end_ao) { \
12606 + char __c_ao; \
12607 + __addr_ao += PAGE_SIZE; \
12608 + if (__size > PAGE_SIZE) \
12609 + cond_resched(); \
12610 + if (__get_user(__c_ao, (char __user *)__addr)) \
12611 + break; \
12612 + if (type != VERIFY_WRITE) { \
12613 + __addr = __addr_ao; \
12614 + continue; \
12615 + } \
12616 + if (__put_user(__c_ao, (char __user *)__addr)) \
12617 + break; \
12618 + __addr = __addr_ao; \
12619 + } \
12620 + } \
12621 + __ret_ao; \
12622 +})
12623
12624 /*
12625 * The exception table consists of pairs of addresses: the first is the
12626 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12627 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12628 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12629
12630 -
12631 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12632 +#define __copyuser_seg "gs;"
12633 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12634 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12635 +#else
12636 +#define __copyuser_seg
12637 +#define __COPYUSER_SET_ES
12638 +#define __COPYUSER_RESTORE_ES
12639 +#endif
12640
12641 #ifdef CONFIG_X86_32
12642 #define __put_user_asm_u64(x, addr, err, errret) \
12643 - asm volatile("1: movl %%eax,0(%2)\n" \
12644 - "2: movl %%edx,4(%2)\n" \
12645 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12646 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12647 "3:\n" \
12648 ".section .fixup,\"ax\"\n" \
12649 "4: movl %3,%0\n" \
12650 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12651 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12652
12653 #define __put_user_asm_ex_u64(x, addr) \
12654 - asm volatile("1: movl %%eax,0(%1)\n" \
12655 - "2: movl %%edx,4(%1)\n" \
12656 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12657 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12658 "3:\n" \
12659 _ASM_EXTABLE(1b, 2b - 1b) \
12660 _ASM_EXTABLE(2b, 3b - 2b) \
12661 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12662 __typeof__(*(ptr)) __pu_val; \
12663 __chk_user_ptr(ptr); \
12664 might_fault(); \
12665 - __pu_val = x; \
12666 + __pu_val = (x); \
12667 switch (sizeof(*(ptr))) { \
12668 case 1: \
12669 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12670 @@ -373,7 +415,7 @@ do { \
12671 } while (0)
12672
12673 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12674 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12675 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12676 "2:\n" \
12677 ".section .fixup,\"ax\"\n" \
12678 "3: mov %3,%0\n" \
12679 @@ -381,7 +423,7 @@ do { \
12680 " jmp 2b\n" \
12681 ".previous\n" \
12682 _ASM_EXTABLE(1b, 3b) \
12683 - : "=r" (err), ltype(x) \
12684 + : "=r" (err), ltype (x) \
12685 : "m" (__m(addr)), "i" (errret), "0" (err))
12686
12687 #define __get_user_size_ex(x, ptr, size) \
12688 @@ -406,7 +448,7 @@ do { \
12689 } while (0)
12690
12691 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12692 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12693 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12694 "2:\n" \
12695 _ASM_EXTABLE(1b, 2b - 1b) \
12696 : ltype(x) : "m" (__m(addr)))
12697 @@ -423,13 +465,24 @@ do { \
12698 int __gu_err; \
12699 unsigned long __gu_val; \
12700 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12701 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12702 + (x) = (__typeof__(*(ptr)))__gu_val; \
12703 __gu_err; \
12704 })
12705
12706 /* FIXME: this hack is definitely wrong -AK */
12707 struct __large_struct { unsigned long buf[100]; };
12708 -#define __m(x) (*(struct __large_struct __user *)(x))
12709 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12710 +#define ____m(x) \
12711 +({ \
12712 + unsigned long ____x = (unsigned long)(x); \
12713 + if (____x < PAX_USER_SHADOW_BASE) \
12714 + ____x += PAX_USER_SHADOW_BASE; \
12715 + (void __user *)____x; \
12716 +})
12717 +#else
12718 +#define ____m(x) (x)
12719 +#endif
12720 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12721
12722 /*
12723 * Tell gcc we read from memory instead of writing: this is because
12724 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12725 * aliasing issues.
12726 */
12727 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12728 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12729 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12730 "2:\n" \
12731 ".section .fixup,\"ax\"\n" \
12732 "3: mov %3,%0\n" \
12733 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12734 ".previous\n" \
12735 _ASM_EXTABLE(1b, 3b) \
12736 : "=r"(err) \
12737 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12738 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12739
12740 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12741 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12742 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12743 "2:\n" \
12744 _ASM_EXTABLE(1b, 2b - 1b) \
12745 : : ltype(x), "m" (__m(addr)))
12746 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12747 * On error, the variable @x is set to zero.
12748 */
12749
12750 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12751 +#define __get_user(x, ptr) get_user((x), (ptr))
12752 +#else
12753 #define __get_user(x, ptr) \
12754 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12755 +#endif
12756
12757 /**
12758 * __put_user: - Write a simple value into user space, with less checking.
12759 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12760 * Returns zero on success, or -EFAULT on error.
12761 */
12762
12763 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12764 +#define __put_user(x, ptr) put_user((x), (ptr))
12765 +#else
12766 #define __put_user(x, ptr) \
12767 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12768 +#endif
12769
12770 #define __get_user_unaligned __get_user
12771 #define __put_user_unaligned __put_user
12772 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12773 #define get_user_ex(x, ptr) do { \
12774 unsigned long __gue_val; \
12775 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12776 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12777 + (x) = (__typeof__(*(ptr)))__gue_val; \
12778 } while (0)
12779
12780 #ifdef CONFIG_X86_WP_WORKS_OK
12781 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12782 index 8084bc7..cc139cb 100644
12783 --- a/arch/x86/include/asm/uaccess_32.h
12784 +++ b/arch/x86/include/asm/uaccess_32.h
12785 @@ -11,15 +11,15 @@
12786 #include <asm/page.h>
12787
12788 unsigned long __must_check __copy_to_user_ll
12789 - (void __user *to, const void *from, unsigned long n);
12790 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12791 unsigned long __must_check __copy_from_user_ll
12792 - (void *to, const void __user *from, unsigned long n);
12793 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12794 unsigned long __must_check __copy_from_user_ll_nozero
12795 - (void *to, const void __user *from, unsigned long n);
12796 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12797 unsigned long __must_check __copy_from_user_ll_nocache
12798 - (void *to, const void __user *from, unsigned long n);
12799 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12800 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12801 - (void *to, const void __user *from, unsigned long n);
12802 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12803
12804 /**
12805 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12806 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12807 static __always_inline unsigned long __must_check
12808 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12809 {
12810 + if ((long)n < 0)
12811 + return n;
12812 +
12813 if (__builtin_constant_p(n)) {
12814 unsigned long ret;
12815
12816 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12817 return ret;
12818 }
12819 }
12820 + if (!__builtin_constant_p(n))
12821 + check_object_size(from, n, true);
12822 return __copy_to_user_ll(to, from, n);
12823 }
12824
12825 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12826 __copy_to_user(void __user *to, const void *from, unsigned long n)
12827 {
12828 might_fault();
12829 +
12830 return __copy_to_user_inatomic(to, from, n);
12831 }
12832
12833 static __always_inline unsigned long
12834 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12835 {
12836 + if ((long)n < 0)
12837 + return n;
12838 +
12839 /* Avoid zeroing the tail if the copy fails..
12840 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12841 * but as the zeroing behaviour is only significant when n is not
12842 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12843 __copy_from_user(void *to, const void __user *from, unsigned long n)
12844 {
12845 might_fault();
12846 +
12847 + if ((long)n < 0)
12848 + return n;
12849 +
12850 if (__builtin_constant_p(n)) {
12851 unsigned long ret;
12852
12853 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12854 return ret;
12855 }
12856 }
12857 + if (!__builtin_constant_p(n))
12858 + check_object_size(to, n, false);
12859 return __copy_from_user_ll(to, from, n);
12860 }
12861
12862 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12863 const void __user *from, unsigned long n)
12864 {
12865 might_fault();
12866 +
12867 + if ((long)n < 0)
12868 + return n;
12869 +
12870 if (__builtin_constant_p(n)) {
12871 unsigned long ret;
12872
12873 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12874 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12875 unsigned long n)
12876 {
12877 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12878 + if ((long)n < 0)
12879 + return n;
12880 +
12881 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12882 }
12883
12884 -unsigned long __must_check copy_to_user(void __user *to,
12885 - const void *from, unsigned long n);
12886 -unsigned long __must_check _copy_from_user(void *to,
12887 - const void __user *from,
12888 - unsigned long n);
12889 -
12890 +extern void copy_to_user_overflow(void)
12891 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12892 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12893 +#else
12894 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12895 +#endif
12896 +;
12897
12898 extern void copy_from_user_overflow(void)
12899 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12900 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12901 #endif
12902 ;
12903
12904 -static inline unsigned long __must_check copy_from_user(void *to,
12905 - const void __user *from,
12906 - unsigned long n)
12907 +/**
12908 + * copy_to_user: - Copy a block of data into user space.
12909 + * @to: Destination address, in user space.
12910 + * @from: Source address, in kernel space.
12911 + * @n: Number of bytes to copy.
12912 + *
12913 + * Context: User context only. This function may sleep.
12914 + *
12915 + * Copy data from kernel space to user space.
12916 + *
12917 + * Returns number of bytes that could not be copied.
12918 + * On success, this will be zero.
12919 + */
12920 +static inline unsigned long __must_check
12921 +copy_to_user(void __user *to, const void *from, unsigned long n)
12922 +{
12923 + int sz = __compiletime_object_size(from);
12924 +
12925 + if (unlikely(sz != -1 && sz < n))
12926 + copy_to_user_overflow();
12927 + else if (access_ok(VERIFY_WRITE, to, n))
12928 + n = __copy_to_user(to, from, n);
12929 + return n;
12930 +}
12931 +
12932 +/**
12933 + * copy_from_user: - Copy a block of data from user space.
12934 + * @to: Destination address, in kernel space.
12935 + * @from: Source address, in user space.
12936 + * @n: Number of bytes to copy.
12937 + *
12938 + * Context: User context only. This function may sleep.
12939 + *
12940 + * Copy data from user space to kernel space.
12941 + *
12942 + * Returns number of bytes that could not be copied.
12943 + * On success, this will be zero.
12944 + *
12945 + * If some data could not be copied, this function will pad the copied
12946 + * data to the requested size using zero bytes.
12947 + */
12948 +static inline unsigned long __must_check
12949 +copy_from_user(void *to, const void __user *from, unsigned long n)
12950 {
12951 int sz = __compiletime_object_size(to);
12952
12953 - if (likely(sz == -1 || sz >= n))
12954 - n = _copy_from_user(to, from, n);
12955 - else
12956 + if (unlikely(sz != -1 && sz < n))
12957 copy_from_user_overflow();
12958 -
12959 + else if (access_ok(VERIFY_READ, from, n))
12960 + n = __copy_from_user(to, from, n);
12961 + else if ((long)n > 0) {
12962 + if (!__builtin_constant_p(n))
12963 + check_object_size(to, n, false);
12964 + memset(to, 0, n);
12965 + }
12966 return n;
12967 }
12968
12969 @@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
12970 #define strlen_user(str) strnlen_user(str, LONG_MAX)
12971
12972 long strnlen_user(const char __user *str, long n);
12973 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
12974 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
12975 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12976 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12977
12978 #endif /* _ASM_X86_UACCESS_32_H */
12979 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12980 index fcd4b6f..f4631a0 100644
12981 --- a/arch/x86/include/asm/uaccess_64.h
12982 +++ b/arch/x86/include/asm/uaccess_64.h
12983 @@ -10,6 +10,9 @@
12984 #include <asm/alternative.h>
12985 #include <asm/cpufeature.h>
12986 #include <asm/page.h>
12987 +#include <asm/pgtable.h>
12988 +
12989 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12990
12991 /*
12992 * Copy To/From Userspace
12993 @@ -17,12 +20,14 @@
12994
12995 /* Handles exceptions in both to and from, but doesn't do access_ok */
12996 __must_check unsigned long
12997 -copy_user_generic_string(void *to, const void *from, unsigned len);
12998 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
12999 __must_check unsigned long
13000 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13001 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13002
13003 static __always_inline __must_check unsigned long
13004 -copy_user_generic(void *to, const void *from, unsigned len)
13005 +copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13006 +static __always_inline __must_check unsigned long
13007 +copy_user_generic(void *to, const void *from, unsigned long len)
13008 {
13009 unsigned ret;
13010
13011 @@ -32,142 +37,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
13012 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13013 "=d" (len)),
13014 "1" (to), "2" (from), "3" (len)
13015 - : "memory", "rcx", "r8", "r9", "r10", "r11");
13016 + : "memory", "rcx", "r8", "r9", "r11");
13017 return ret;
13018 }
13019
13020 +static __always_inline __must_check unsigned long
13021 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13022 +static __always_inline __must_check unsigned long
13023 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13024 __must_check unsigned long
13025 -_copy_to_user(void __user *to, const void *from, unsigned len);
13026 -__must_check unsigned long
13027 -_copy_from_user(void *to, const void __user *from, unsigned len);
13028 -__must_check unsigned long
13029 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13030 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13031
13032 static inline unsigned long __must_check copy_from_user(void *to,
13033 const void __user *from,
13034 unsigned long n)
13035 {
13036 - int sz = __compiletime_object_size(to);
13037 -
13038 might_fault();
13039 - if (likely(sz == -1 || sz >= n))
13040 - n = _copy_from_user(to, from, n);
13041 -#ifdef CONFIG_DEBUG_VM
13042 - else
13043 - WARN(1, "Buffer overflow detected!\n");
13044 -#endif
13045 +
13046 + if (access_ok(VERIFY_READ, from, n))
13047 + n = __copy_from_user(to, from, n);
13048 + else if (n < INT_MAX) {
13049 + if (!__builtin_constant_p(n))
13050 + check_object_size(to, n, false);
13051 + memset(to, 0, n);
13052 + }
13053 return n;
13054 }
13055
13056 static __always_inline __must_check
13057 -int copy_to_user(void __user *dst, const void *src, unsigned size)
13058 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
13059 {
13060 might_fault();
13061
13062 - return _copy_to_user(dst, src, size);
13063 + if (access_ok(VERIFY_WRITE, dst, size))
13064 + size = __copy_to_user(dst, src, size);
13065 + return size;
13066 }
13067
13068 static __always_inline __must_check
13069 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13070 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13071 {
13072 - int ret = 0;
13073 + int sz = __compiletime_object_size(dst);
13074 + unsigned ret = 0;
13075
13076 might_fault();
13077 - if (!__builtin_constant_p(size))
13078 - return copy_user_generic(dst, (__force void *)src, size);
13079 +
13080 + if (size > INT_MAX)
13081 + return size;
13082 +
13083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13084 + if (!__access_ok(VERIFY_READ, src, size))
13085 + return size;
13086 +#endif
13087 +
13088 + if (unlikely(sz != -1 && sz < size)) {
13089 +#ifdef CONFIG_DEBUG_VM
13090 + WARN(1, "Buffer overflow detected!\n");
13091 +#endif
13092 + return size;
13093 + }
13094 +
13095 + if (!__builtin_constant_p(size)) {
13096 + check_object_size(dst, size, false);
13097 +
13098 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13099 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13100 + src += PAX_USER_SHADOW_BASE;
13101 +#endif
13102 +
13103 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13104 + }
13105 switch (size) {
13106 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13107 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13108 ret, "b", "b", "=q", 1);
13109 return ret;
13110 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13111 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13112 ret, "w", "w", "=r", 2);
13113 return ret;
13114 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13115 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13116 ret, "l", "k", "=r", 4);
13117 return ret;
13118 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13119 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13120 ret, "q", "", "=r", 8);
13121 return ret;
13122 case 10:
13123 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13124 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13125 ret, "q", "", "=r", 10);
13126 if (unlikely(ret))
13127 return ret;
13128 __get_user_asm(*(u16 *)(8 + (char *)dst),
13129 - (u16 __user *)(8 + (char __user *)src),
13130 + (const u16 __user *)(8 + (const char __user *)src),
13131 ret, "w", "w", "=r", 2);
13132 return ret;
13133 case 16:
13134 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13135 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13136 ret, "q", "", "=r", 16);
13137 if (unlikely(ret))
13138 return ret;
13139 __get_user_asm(*(u64 *)(8 + (char *)dst),
13140 - (u64 __user *)(8 + (char __user *)src),
13141 + (const u64 __user *)(8 + (const char __user *)src),
13142 ret, "q", "", "=r", 8);
13143 return ret;
13144 default:
13145 - return copy_user_generic(dst, (__force void *)src, size);
13146 +
13147 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13148 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13149 + src += PAX_USER_SHADOW_BASE;
13150 +#endif
13151 +
13152 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13153 }
13154 }
13155
13156 static __always_inline __must_check
13157 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13158 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13159 {
13160 - int ret = 0;
13161 + int sz = __compiletime_object_size(src);
13162 + unsigned ret = 0;
13163
13164 might_fault();
13165 - if (!__builtin_constant_p(size))
13166 - return copy_user_generic((__force void *)dst, src, size);
13167 +
13168 + if (size > INT_MAX)
13169 + return size;
13170 +
13171 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13172 + if (!__access_ok(VERIFY_WRITE, dst, size))
13173 + return size;
13174 +#endif
13175 +
13176 + if (unlikely(sz != -1 && sz < size)) {
13177 +#ifdef CONFIG_DEBUG_VM
13178 + WARN(1, "Buffer overflow detected!\n");
13179 +#endif
13180 + return size;
13181 + }
13182 +
13183 + if (!__builtin_constant_p(size)) {
13184 + check_object_size(src, size, true);
13185 +
13186 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13187 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13188 + dst += PAX_USER_SHADOW_BASE;
13189 +#endif
13190 +
13191 + return copy_user_generic((__force_kernel void *)dst, src, size);
13192 + }
13193 switch (size) {
13194 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13195 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13196 ret, "b", "b", "iq", 1);
13197 return ret;
13198 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13199 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13200 ret, "w", "w", "ir", 2);
13201 return ret;
13202 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13203 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13204 ret, "l", "k", "ir", 4);
13205 return ret;
13206 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13207 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13208 ret, "q", "", "er", 8);
13209 return ret;
13210 case 10:
13211 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13212 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13213 ret, "q", "", "er", 10);
13214 if (unlikely(ret))
13215 return ret;
13216 asm("":::"memory");
13217 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13218 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13219 ret, "w", "w", "ir", 2);
13220 return ret;
13221 case 16:
13222 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13223 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13224 ret, "q", "", "er", 16);
13225 if (unlikely(ret))
13226 return ret;
13227 asm("":::"memory");
13228 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13229 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13230 ret, "q", "", "er", 8);
13231 return ret;
13232 default:
13233 - return copy_user_generic((__force void *)dst, src, size);
13234 +
13235 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13236 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13237 + dst += PAX_USER_SHADOW_BASE;
13238 +#endif
13239 +
13240 + return copy_user_generic((__force_kernel void *)dst, src, size);
13241 }
13242 }
13243
13244 static __always_inline __must_check
13245 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13246 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13247 {
13248 - int ret = 0;
13249 + unsigned ret = 0;
13250
13251 might_fault();
13252 - if (!__builtin_constant_p(size))
13253 - return copy_user_generic((__force void *)dst,
13254 - (__force void *)src, size);
13255 +
13256 + if (size > INT_MAX)
13257 + return size;
13258 +
13259 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13260 + if (!__access_ok(VERIFY_READ, src, size))
13261 + return size;
13262 + if (!__access_ok(VERIFY_WRITE, dst, size))
13263 + return size;
13264 +#endif
13265 +
13266 + if (!__builtin_constant_p(size)) {
13267 +
13268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13269 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13270 + src += PAX_USER_SHADOW_BASE;
13271 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13272 + dst += PAX_USER_SHADOW_BASE;
13273 +#endif
13274 +
13275 + return copy_user_generic((__force_kernel void *)dst,
13276 + (__force_kernel const void *)src, size);
13277 + }
13278 switch (size) {
13279 case 1: {
13280 u8 tmp;
13281 - __get_user_asm(tmp, (u8 __user *)src,
13282 + __get_user_asm(tmp, (const u8 __user *)src,
13283 ret, "b", "b", "=q", 1);
13284 if (likely(!ret))
13285 __put_user_asm(tmp, (u8 __user *)dst,
13286 @@ -176,7 +265,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13287 }
13288 case 2: {
13289 u16 tmp;
13290 - __get_user_asm(tmp, (u16 __user *)src,
13291 + __get_user_asm(tmp, (const u16 __user *)src,
13292 ret, "w", "w", "=r", 2);
13293 if (likely(!ret))
13294 __put_user_asm(tmp, (u16 __user *)dst,
13295 @@ -186,7 +275,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13296
13297 case 4: {
13298 u32 tmp;
13299 - __get_user_asm(tmp, (u32 __user *)src,
13300 + __get_user_asm(tmp, (const u32 __user *)src,
13301 ret, "l", "k", "=r", 4);
13302 if (likely(!ret))
13303 __put_user_asm(tmp, (u32 __user *)dst,
13304 @@ -195,7 +284,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13305 }
13306 case 8: {
13307 u64 tmp;
13308 - __get_user_asm(tmp, (u64 __user *)src,
13309 + __get_user_asm(tmp, (const u64 __user *)src,
13310 ret, "q", "", "=r", 8);
13311 if (likely(!ret))
13312 __put_user_asm(tmp, (u64 __user *)dst,
13313 @@ -203,47 +292,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13314 return ret;
13315 }
13316 default:
13317 - return copy_user_generic((__force void *)dst,
13318 - (__force void *)src, size);
13319 +
13320 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13321 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13322 + src += PAX_USER_SHADOW_BASE;
13323 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13324 + dst += PAX_USER_SHADOW_BASE;
13325 +#endif
13326 +
13327 + return copy_user_generic((__force_kernel void *)dst,
13328 + (__force_kernel const void *)src, size);
13329 }
13330 }
13331
13332 __must_check long strnlen_user(const char __user *str, long n);
13333 __must_check long __strnlen_user(const char __user *str, long n);
13334 __must_check long strlen_user(const char __user *str);
13335 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13336 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13337 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13338 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13339
13340 static __must_check __always_inline int
13341 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13342 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13343 {
13344 - return copy_user_generic(dst, (__force const void *)src, size);
13345 + if (size > INT_MAX)
13346 + return size;
13347 +
13348 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13349 + if (!__access_ok(VERIFY_READ, src, size))
13350 + return size;
13351 +
13352 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13353 + src += PAX_USER_SHADOW_BASE;
13354 +#endif
13355 +
13356 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13357 }
13358
13359 -static __must_check __always_inline int
13360 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13361 +static __must_check __always_inline unsigned long
13362 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13363 {
13364 - return copy_user_generic((__force void *)dst, src, size);
13365 + if (size > INT_MAX)
13366 + return size;
13367 +
13368 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13369 + if (!__access_ok(VERIFY_WRITE, dst, size))
13370 + return size;
13371 +
13372 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13373 + dst += PAX_USER_SHADOW_BASE;
13374 +#endif
13375 +
13376 + return copy_user_generic((__force_kernel void *)dst, src, size);
13377 }
13378
13379 -extern long __copy_user_nocache(void *dst, const void __user *src,
13380 - unsigned size, int zerorest);
13381 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13382 + unsigned long size, int zerorest) __size_overflow(3);
13383
13384 -static inline int
13385 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13386 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13387 {
13388 might_sleep();
13389 +
13390 + if (size > INT_MAX)
13391 + return size;
13392 +
13393 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13394 + if (!__access_ok(VERIFY_READ, src, size))
13395 + return size;
13396 +#endif
13397 +
13398 return __copy_user_nocache(dst, src, size, 1);
13399 }
13400
13401 -static inline int
13402 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13403 - unsigned size)
13404 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13405 + unsigned long size)
13406 {
13407 + if (size > INT_MAX)
13408 + return size;
13409 +
13410 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13411 + if (!__access_ok(VERIFY_READ, src, size))
13412 + return size;
13413 +#endif
13414 +
13415 return __copy_user_nocache(dst, src, size, 0);
13416 }
13417
13418 -unsigned long
13419 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13420 +extern unsigned long
13421 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13422
13423 #endif /* _ASM_X86_UACCESS_64_H */
13424 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13425 index bb05228..d763d5b 100644
13426 --- a/arch/x86/include/asm/vdso.h
13427 +++ b/arch/x86/include/asm/vdso.h
13428 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13429 #define VDSO32_SYMBOL(base, name) \
13430 ({ \
13431 extern const char VDSO32_##name[]; \
13432 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13433 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13434 })
13435 #endif
13436
13437 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13438 index 764b66a..ad3cfc8 100644
13439 --- a/arch/x86/include/asm/x86_init.h
13440 +++ b/arch/x86/include/asm/x86_init.h
13441 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13442 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13443 void (*find_smp_config)(void);
13444 void (*get_smp_config)(unsigned int early);
13445 -};
13446 +} __no_const;
13447
13448 /**
13449 * struct x86_init_resources - platform specific resource related ops
13450 @@ -43,7 +43,7 @@ struct x86_init_resources {
13451 void (*probe_roms)(void);
13452 void (*reserve_resources)(void);
13453 char *(*memory_setup)(void);
13454 -};
13455 +} __no_const;
13456
13457 /**
13458 * struct x86_init_irqs - platform specific interrupt setup
13459 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13460 void (*pre_vector_init)(void);
13461 void (*intr_init)(void);
13462 void (*trap_init)(void);
13463 -};
13464 +} __no_const;
13465
13466 /**
13467 * struct x86_init_oem - oem platform specific customizing functions
13468 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13469 struct x86_init_oem {
13470 void (*arch_setup)(void);
13471 void (*banner)(void);
13472 -};
13473 +} __no_const;
13474
13475 /**
13476 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13477 @@ -77,7 +77,7 @@ struct x86_init_oem {
13478 */
13479 struct x86_init_mapping {
13480 void (*pagetable_reserve)(u64 start, u64 end);
13481 -};
13482 +} __no_const;
13483
13484 /**
13485 * struct x86_init_paging - platform specific paging functions
13486 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13487 struct x86_init_paging {
13488 void (*pagetable_setup_start)(pgd_t *base);
13489 void (*pagetable_setup_done)(pgd_t *base);
13490 -};
13491 +} __no_const;
13492
13493 /**
13494 * struct x86_init_timers - platform specific timer setup
13495 @@ -102,7 +102,7 @@ struct x86_init_timers {
13496 void (*tsc_pre_init)(void);
13497 void (*timer_init)(void);
13498 void (*wallclock_init)(void);
13499 -};
13500 +} __no_const;
13501
13502 /**
13503 * struct x86_init_iommu - platform specific iommu setup
13504 @@ -110,7 +110,7 @@ struct x86_init_timers {
13505 */
13506 struct x86_init_iommu {
13507 int (*iommu_init)(void);
13508 -};
13509 +} __no_const;
13510
13511 /**
13512 * struct x86_init_pci - platform specific pci init functions
13513 @@ -124,7 +124,7 @@ struct x86_init_pci {
13514 int (*init)(void);
13515 void (*init_irq)(void);
13516 void (*fixup_irqs)(void);
13517 -};
13518 +} __no_const;
13519
13520 /**
13521 * struct x86_init_ops - functions for platform specific setup
13522 @@ -140,7 +140,7 @@ struct x86_init_ops {
13523 struct x86_init_timers timers;
13524 struct x86_init_iommu iommu;
13525 struct x86_init_pci pci;
13526 -};
13527 +} __no_const;
13528
13529 /**
13530 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13531 @@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13532 void (*setup_percpu_clockev)(void);
13533 void (*early_percpu_clock_init)(void);
13534 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13535 -};
13536 +} __no_const;
13537
13538 /**
13539 * struct x86_platform_ops - platform specific runtime functions
13540 @@ -177,7 +177,7 @@ struct x86_platform_ops {
13541 int (*i8042_detect)(void);
13542 void (*save_sched_clock_state)(void);
13543 void (*restore_sched_clock_state)(void);
13544 -};
13545 +} __no_const;
13546
13547 struct pci_dev;
13548
13549 @@ -186,7 +186,7 @@ struct x86_msi_ops {
13550 void (*teardown_msi_irq)(unsigned int irq);
13551 void (*teardown_msi_irqs)(struct pci_dev *dev);
13552 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13553 -};
13554 +} __no_const;
13555
13556 extern struct x86_init_ops x86_init;
13557 extern struct x86_cpuinit_ops x86_cpuinit;
13558 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13559 index c6ce245..ffbdab7 100644
13560 --- a/arch/x86/include/asm/xsave.h
13561 +++ b/arch/x86/include/asm/xsave.h
13562 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13563 {
13564 int err;
13565
13566 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13567 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13568 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13569 +#endif
13570 +
13571 /*
13572 * Clear the xsave header first, so that reserved fields are
13573 * initialized to zero.
13574 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13575 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13576 {
13577 int err;
13578 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13579 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13580 u32 lmask = mask;
13581 u32 hmask = mask >> 32;
13582
13583 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13584 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13585 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13586 +#endif
13587 +
13588 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13589 "2:\n"
13590 ".section .fixup,\"ax\"\n"
13591 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13592 index 6a564ac..9b1340c 100644
13593 --- a/arch/x86/kernel/acpi/realmode/Makefile
13594 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13595 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13596 $(call cc-option, -fno-stack-protector) \
13597 $(call cc-option, -mpreferred-stack-boundary=2)
13598 KBUILD_CFLAGS += $(call cc-option, -m32)
13599 +ifdef CONSTIFY_PLUGIN
13600 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13601 +endif
13602 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13603 GCOV_PROFILE := n
13604
13605 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13606 index b4fd836..4358fe3 100644
13607 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13608 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13609 @@ -108,6 +108,9 @@ wakeup_code:
13610 /* Do any other stuff... */
13611
13612 #ifndef CONFIG_64BIT
13613 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13614 + call verify_cpu
13615 +
13616 /* This could also be done in C code... */
13617 movl pmode_cr3, %eax
13618 movl %eax, %cr3
13619 @@ -131,6 +134,7 @@ wakeup_code:
13620 movl pmode_cr0, %eax
13621 movl %eax, %cr0
13622 jmp pmode_return
13623 +# include "../../verify_cpu.S"
13624 #else
13625 pushw $0
13626 pushw trampoline_segment
13627 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13628 index 146a49c..1b5338b 100644
13629 --- a/arch/x86/kernel/acpi/sleep.c
13630 +++ b/arch/x86/kernel/acpi/sleep.c
13631 @@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
13632 header->trampoline_segment = trampoline_address() >> 4;
13633 #ifdef CONFIG_SMP
13634 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13635 +
13636 + pax_open_kernel();
13637 early_gdt_descr.address =
13638 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13639 + pax_close_kernel();
13640 +
13641 initial_gs = per_cpu_offset(smp_processor_id());
13642 #endif
13643 initial_code = (unsigned long)wakeup_long64;
13644 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13645 index 7261083..5c12053 100644
13646 --- a/arch/x86/kernel/acpi/wakeup_32.S
13647 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13648 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13649 # and restore the stack ... but you need gdt for this to work
13650 movl saved_context_esp, %esp
13651
13652 - movl %cs:saved_magic, %eax
13653 - cmpl $0x12345678, %eax
13654 + cmpl $0x12345678, saved_magic
13655 jne bogus_magic
13656
13657 # jump to place where we left off
13658 - movl saved_eip, %eax
13659 - jmp *%eax
13660 + jmp *(saved_eip)
13661
13662 bogus_magic:
13663 jmp bogus_magic
13664 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13665 index 1f84794..e23f862 100644
13666 --- a/arch/x86/kernel/alternative.c
13667 +++ b/arch/x86/kernel/alternative.c
13668 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13669 */
13670 for (a = start; a < end; a++) {
13671 instr = (u8 *)&a->instr_offset + a->instr_offset;
13672 +
13673 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13674 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13675 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13676 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13677 +#endif
13678 +
13679 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13680 BUG_ON(a->replacementlen > a->instrlen);
13681 BUG_ON(a->instrlen > sizeof(insnbuf));
13682 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13683 for (poff = start; poff < end; poff++) {
13684 u8 *ptr = (u8 *)poff + *poff;
13685
13686 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13687 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13688 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13689 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13690 +#endif
13691 +
13692 if (!*poff || ptr < text || ptr >= text_end)
13693 continue;
13694 /* turn DS segment override prefix into lock prefix */
13695 - if (*ptr == 0x3e)
13696 + if (*ktla_ktva(ptr) == 0x3e)
13697 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13698 };
13699 mutex_unlock(&text_mutex);
13700 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13701 for (poff = start; poff < end; poff++) {
13702 u8 *ptr = (u8 *)poff + *poff;
13703
13704 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13705 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13706 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13707 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13708 +#endif
13709 +
13710 if (!*poff || ptr < text || ptr >= text_end)
13711 continue;
13712 /* turn lock prefix into DS segment override prefix */
13713 - if (*ptr == 0xf0)
13714 + if (*ktla_ktva(ptr) == 0xf0)
13715 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13716 };
13717 mutex_unlock(&text_mutex);
13718 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13719
13720 BUG_ON(p->len > MAX_PATCH_LEN);
13721 /* prep the buffer with the original instructions */
13722 - memcpy(insnbuf, p->instr, p->len);
13723 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13724 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13725 (unsigned long)p->instr, p->len);
13726
13727 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13728 if (smp_alt_once)
13729 free_init_pages("SMP alternatives",
13730 (unsigned long)__smp_locks,
13731 - (unsigned long)__smp_locks_end);
13732 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13733
13734 restart_nmi();
13735 }
13736 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13737 * instructions. And on the local CPU you need to be protected again NMI or MCE
13738 * handlers seeing an inconsistent instruction while you patch.
13739 */
13740 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13741 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13742 size_t len)
13743 {
13744 unsigned long flags;
13745 local_irq_save(flags);
13746 - memcpy(addr, opcode, len);
13747 +
13748 + pax_open_kernel();
13749 + memcpy(ktla_ktva(addr), opcode, len);
13750 sync_core();
13751 + pax_close_kernel();
13752 +
13753 local_irq_restore(flags);
13754 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13755 that causes hangs on some VIA CPUs. */
13756 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13757 */
13758 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13759 {
13760 - unsigned long flags;
13761 - char *vaddr;
13762 + unsigned char *vaddr = ktla_ktva(addr);
13763 struct page *pages[2];
13764 - int i;
13765 + size_t i;
13766
13767 if (!core_kernel_text((unsigned long)addr)) {
13768 - pages[0] = vmalloc_to_page(addr);
13769 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13770 + pages[0] = vmalloc_to_page(vaddr);
13771 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13772 } else {
13773 - pages[0] = virt_to_page(addr);
13774 + pages[0] = virt_to_page(vaddr);
13775 WARN_ON(!PageReserved(pages[0]));
13776 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13777 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13778 }
13779 BUG_ON(!pages[0]);
13780 - local_irq_save(flags);
13781 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13782 - if (pages[1])
13783 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13784 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13785 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13786 - clear_fixmap(FIX_TEXT_POKE0);
13787 - if (pages[1])
13788 - clear_fixmap(FIX_TEXT_POKE1);
13789 - local_flush_tlb();
13790 - sync_core();
13791 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13792 - that causes hangs on some VIA CPUs. */
13793 + text_poke_early(addr, opcode, len);
13794 for (i = 0; i < len; i++)
13795 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13796 - local_irq_restore(flags);
13797 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13798 return addr;
13799 }
13800
13801 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13802 index edc2448..553e7c5 100644
13803 --- a/arch/x86/kernel/apic/apic.c
13804 +++ b/arch/x86/kernel/apic/apic.c
13805 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13806 /*
13807 * Debug level, exported for io_apic.c
13808 */
13809 -unsigned int apic_verbosity;
13810 +int apic_verbosity;
13811
13812 int pic_mode;
13813
13814 @@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13815 apic_write(APIC_ESR, 0);
13816 v1 = apic_read(APIC_ESR);
13817 ack_APIC_irq();
13818 - atomic_inc(&irq_err_count);
13819 + atomic_inc_unchecked(&irq_err_count);
13820
13821 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13822 smp_processor_id(), v0 , v1);
13823 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13824 index e88300d..cd5a87a 100644
13825 --- a/arch/x86/kernel/apic/io_apic.c
13826 +++ b/arch/x86/kernel/apic/io_apic.c
13827 @@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
13828
13829 void __init set_io_apic_ops(const struct io_apic_ops *ops)
13830 {
13831 - io_apic_ops = *ops;
13832 + pax_open_kernel();
13833 + memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
13834 + pax_close_kernel();
13835 }
13836
13837 /*
13838 @@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13839 }
13840 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13841
13842 -void lock_vector_lock(void)
13843 +void lock_vector_lock(void) __acquires(vector_lock)
13844 {
13845 /* Used to the online set of cpus does not change
13846 * during assign_irq_vector.
13847 @@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
13848 raw_spin_lock(&vector_lock);
13849 }
13850
13851 -void unlock_vector_lock(void)
13852 +void unlock_vector_lock(void) __releases(vector_lock)
13853 {
13854 raw_spin_unlock(&vector_lock);
13855 }
13856 @@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
13857 ack_APIC_irq();
13858 }
13859
13860 -atomic_t irq_mis_count;
13861 +atomic_unchecked_t irq_mis_count;
13862
13863 #ifdef CONFIG_GENERIC_PENDING_IRQ
13864 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
13865 @@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
13866 * at the cpu.
13867 */
13868 if (!(v & (1 << (i & 0x1f)))) {
13869 - atomic_inc(&irq_mis_count);
13870 + atomic_inc_unchecked(&irq_mis_count);
13871
13872 eoi_ioapic_irq(irq, cfg);
13873 }
13874 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13875 index 459e78c..f037006 100644
13876 --- a/arch/x86/kernel/apm_32.c
13877 +++ b/arch/x86/kernel/apm_32.c
13878 @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13879 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13880 * even though they are called in protected mode.
13881 */
13882 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13883 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13884 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13885
13886 static const char driver_version[] = "1.16ac"; /* no spaces */
13887 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13888 BUG_ON(cpu != 0);
13889 gdt = get_cpu_gdt_table(cpu);
13890 save_desc_40 = gdt[0x40 / 8];
13891 +
13892 + pax_open_kernel();
13893 gdt[0x40 / 8] = bad_bios_desc;
13894 + pax_close_kernel();
13895
13896 apm_irq_save(flags);
13897 APM_DO_SAVE_SEGS;
13898 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13899 &call->esi);
13900 APM_DO_RESTORE_SEGS;
13901 apm_irq_restore(flags);
13902 +
13903 + pax_open_kernel();
13904 gdt[0x40 / 8] = save_desc_40;
13905 + pax_close_kernel();
13906 +
13907 put_cpu();
13908
13909 return call->eax & 0xff;
13910 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13911 BUG_ON(cpu != 0);
13912 gdt = get_cpu_gdt_table(cpu);
13913 save_desc_40 = gdt[0x40 / 8];
13914 +
13915 + pax_open_kernel();
13916 gdt[0x40 / 8] = bad_bios_desc;
13917 + pax_close_kernel();
13918
13919 apm_irq_save(flags);
13920 APM_DO_SAVE_SEGS;
13921 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13922 &call->eax);
13923 APM_DO_RESTORE_SEGS;
13924 apm_irq_restore(flags);
13925 +
13926 + pax_open_kernel();
13927 gdt[0x40 / 8] = save_desc_40;
13928 + pax_close_kernel();
13929 +
13930 put_cpu();
13931 return error;
13932 }
13933 @@ -2345,12 +2359,15 @@ static int __init apm_init(void)
13934 * code to that CPU.
13935 */
13936 gdt = get_cpu_gdt_table(0);
13937 +
13938 + pax_open_kernel();
13939 set_desc_base(&gdt[APM_CS >> 3],
13940 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13941 set_desc_base(&gdt[APM_CS_16 >> 3],
13942 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13943 set_desc_base(&gdt[APM_DS >> 3],
13944 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13945 + pax_close_kernel();
13946
13947 proc_create("apm", 0, NULL, &apm_file_ops);
13948
13949 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13950 index 68de2dc..1f3c720 100644
13951 --- a/arch/x86/kernel/asm-offsets.c
13952 +++ b/arch/x86/kernel/asm-offsets.c
13953 @@ -33,6 +33,8 @@ void common(void) {
13954 OFFSET(TI_status, thread_info, status);
13955 OFFSET(TI_addr_limit, thread_info, addr_limit);
13956 OFFSET(TI_preempt_count, thread_info, preempt_count);
13957 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13958 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13959
13960 BLANK();
13961 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13962 @@ -53,8 +55,26 @@ void common(void) {
13963 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13964 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13965 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13966 +
13967 +#ifdef CONFIG_PAX_KERNEXEC
13968 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13969 #endif
13970
13971 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13972 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13973 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13974 +#ifdef CONFIG_X86_64
13975 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13976 +#endif
13977 +#endif
13978 +
13979 +#endif
13980 +
13981 + BLANK();
13982 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13983 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13984 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13985 +
13986 #ifdef CONFIG_XEN
13987 BLANK();
13988 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13989 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13990 index 1b4754f..fbb4227 100644
13991 --- a/arch/x86/kernel/asm-offsets_64.c
13992 +++ b/arch/x86/kernel/asm-offsets_64.c
13993 @@ -76,6 +76,7 @@ int main(void)
13994 BLANK();
13995 #undef ENTRY
13996
13997 + DEFINE(TSS_size, sizeof(struct tss_struct));
13998 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13999 BLANK();
14000
14001 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14002 index 6ab6aa2..8f71507 100644
14003 --- a/arch/x86/kernel/cpu/Makefile
14004 +++ b/arch/x86/kernel/cpu/Makefile
14005 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14006 CFLAGS_REMOVE_perf_event.o = -pg
14007 endif
14008
14009 -# Make sure load_percpu_segment has no stackprotector
14010 -nostackp := $(call cc-option, -fno-stack-protector)
14011 -CFLAGS_common.o := $(nostackp)
14012 -
14013 obj-y := intel_cacheinfo.o scattered.o topology.o
14014 obj-y += proc.o capflags.o powerflags.o common.o
14015 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14016 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14017 index 146bb62..ac9c74a 100644
14018 --- a/arch/x86/kernel/cpu/amd.c
14019 +++ b/arch/x86/kernel/cpu/amd.c
14020 @@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14021 unsigned int size)
14022 {
14023 /* AMD errata T13 (order #21922) */
14024 - if ((c->x86 == 6)) {
14025 + if (c->x86 == 6) {
14026 /* Duron Rev A0 */
14027 if (c->x86_model == 3 && c->x86_mask == 0)
14028 size = 64;
14029 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14030 index cf79302..b1b28ae 100644
14031 --- a/arch/x86/kernel/cpu/common.c
14032 +++ b/arch/x86/kernel/cpu/common.c
14033 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14034
14035 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14036
14037 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14038 -#ifdef CONFIG_X86_64
14039 - /*
14040 - * We need valid kernel segments for data and code in long mode too
14041 - * IRET will check the segment types kkeil 2000/10/28
14042 - * Also sysret mandates a special GDT layout
14043 - *
14044 - * TLS descriptors are currently at a different place compared to i386.
14045 - * Hopefully nobody expects them at a fixed place (Wine?)
14046 - */
14047 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14048 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14049 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14050 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14051 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14052 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14053 -#else
14054 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14055 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14056 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14057 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14058 - /*
14059 - * Segments used for calling PnP BIOS have byte granularity.
14060 - * They code segments and data segments have fixed 64k limits,
14061 - * the transfer segment sizes are set at run time.
14062 - */
14063 - /* 32-bit code */
14064 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14065 - /* 16-bit code */
14066 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14067 - /* 16-bit data */
14068 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14069 - /* 16-bit data */
14070 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14071 - /* 16-bit data */
14072 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14073 - /*
14074 - * The APM segments have byte granularity and their bases
14075 - * are set at run time. All have 64k limits.
14076 - */
14077 - /* 32-bit code */
14078 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14079 - /* 16-bit code */
14080 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14081 - /* data */
14082 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14083 -
14084 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14085 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14086 - GDT_STACK_CANARY_INIT
14087 -#endif
14088 -} };
14089 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14090 -
14091 static int __init x86_xsave_setup(char *s)
14092 {
14093 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14094 @@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14095 {
14096 struct desc_ptr gdt_descr;
14097
14098 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14099 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14100 gdt_descr.size = GDT_SIZE - 1;
14101 load_gdt(&gdt_descr);
14102 /* Reload the per-cpu base */
14103 @@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14104 /* Filter out anything that depends on CPUID levels we don't have */
14105 filter_cpuid_features(c, true);
14106
14107 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14108 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14109 +#endif
14110 +
14111 /* If the model name is still unset, do table lookup. */
14112 if (!c->x86_model_id[0]) {
14113 const char *p;
14114 @@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14115 }
14116 __setup("clearcpuid=", setup_disablecpuid);
14117
14118 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14119 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14120 +
14121 #ifdef CONFIG_X86_64
14122 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14123 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14124 - (unsigned long) nmi_idt_table };
14125 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14126
14127 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14128 irq_stack_union) __aligned(PAGE_SIZE);
14129 @@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14130 EXPORT_PER_CPU_SYMBOL(current_task);
14131
14132 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14133 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14134 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14135 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14136
14137 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14138 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14139 {
14140 memset(regs, 0, sizeof(struct pt_regs));
14141 regs->fs = __KERNEL_PERCPU;
14142 - regs->gs = __KERNEL_STACK_CANARY;
14143 + savesegment(gs, regs->gs);
14144
14145 return regs;
14146 }
14147 @@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14148 int i;
14149
14150 cpu = stack_smp_processor_id();
14151 - t = &per_cpu(init_tss, cpu);
14152 + t = init_tss + cpu;
14153 oist = &per_cpu(orig_ist, cpu);
14154
14155 #ifdef CONFIG_NUMA
14156 @@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14157 switch_to_new_gdt(cpu);
14158 loadsegment(fs, 0);
14159
14160 - load_idt((const struct desc_ptr *)&idt_descr);
14161 + load_idt(&idt_descr);
14162
14163 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14164 syscall_init();
14165 @@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14166 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14167 barrier();
14168
14169 - x86_configure_nx();
14170 if (cpu != 0)
14171 enable_x2apic();
14172
14173 @@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14174 {
14175 int cpu = smp_processor_id();
14176 struct task_struct *curr = current;
14177 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14178 + struct tss_struct *t = init_tss + cpu;
14179 struct thread_struct *thread = &curr->thread;
14180
14181 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14182 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14183 index 3e6ff6c..54b4992 100644
14184 --- a/arch/x86/kernel/cpu/intel.c
14185 +++ b/arch/x86/kernel/cpu/intel.c
14186 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14187 * Update the IDT descriptor and reload the IDT so that
14188 * it uses the read-only mapped virtual address.
14189 */
14190 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14191 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14192 load_idt(&idt_descr);
14193 }
14194 #endif
14195 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14196 index 61604ae..98250a5 100644
14197 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14198 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14199 @@ -42,6 +42,7 @@
14200 #include <asm/processor.h>
14201 #include <asm/mce.h>
14202 #include <asm/msr.h>
14203 +#include <asm/local.h>
14204
14205 #include "mce-internal.h"
14206
14207 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14208 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14209 m->cs, m->ip);
14210
14211 - if (m->cs == __KERNEL_CS)
14212 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14213 print_symbol("{%s}", m->ip);
14214 pr_cont("\n");
14215 }
14216 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14217
14218 #define PANIC_TIMEOUT 5 /* 5 seconds */
14219
14220 -static atomic_t mce_paniced;
14221 +static atomic_unchecked_t mce_paniced;
14222
14223 static int fake_panic;
14224 -static atomic_t mce_fake_paniced;
14225 +static atomic_unchecked_t mce_fake_paniced;
14226
14227 /* Panic in progress. Enable interrupts and wait for final IPI */
14228 static void wait_for_panic(void)
14229 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14230 /*
14231 * Make sure only one CPU runs in machine check panic
14232 */
14233 - if (atomic_inc_return(&mce_paniced) > 1)
14234 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14235 wait_for_panic();
14236 barrier();
14237
14238 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14239 console_verbose();
14240 } else {
14241 /* Don't log too much for fake panic */
14242 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14243 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14244 return;
14245 }
14246 /* First print corrected ones that are still unlogged */
14247 @@ -684,7 +685,7 @@ static int mce_timed_out(u64 *t)
14248 * might have been modified by someone else.
14249 */
14250 rmb();
14251 - if (atomic_read(&mce_paniced))
14252 + if (atomic_read_unchecked(&mce_paniced))
14253 wait_for_panic();
14254 if (!monarch_timeout)
14255 goto out;
14256 @@ -1535,7 +1536,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14257 }
14258
14259 /* Call the installed machine check handler for this CPU setup. */
14260 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14261 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14262 unexpected_machine_check;
14263
14264 /*
14265 @@ -1558,7 +1559,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14266 return;
14267 }
14268
14269 + pax_open_kernel();
14270 machine_check_vector = do_machine_check;
14271 + pax_close_kernel();
14272
14273 __mcheck_cpu_init_generic();
14274 __mcheck_cpu_init_vendor(c);
14275 @@ -1572,7 +1575,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14276 */
14277
14278 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14279 -static int mce_chrdev_open_count; /* #times opened */
14280 +static local_t mce_chrdev_open_count; /* #times opened */
14281 static int mce_chrdev_open_exclu; /* already open exclusive? */
14282
14283 static int mce_chrdev_open(struct inode *inode, struct file *file)
14284 @@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14285 spin_lock(&mce_chrdev_state_lock);
14286
14287 if (mce_chrdev_open_exclu ||
14288 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14289 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14290 spin_unlock(&mce_chrdev_state_lock);
14291
14292 return -EBUSY;
14293 @@ -1588,7 +1591,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14294
14295 if (file->f_flags & O_EXCL)
14296 mce_chrdev_open_exclu = 1;
14297 - mce_chrdev_open_count++;
14298 + local_inc(&mce_chrdev_open_count);
14299
14300 spin_unlock(&mce_chrdev_state_lock);
14301
14302 @@ -1599,7 +1602,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14303 {
14304 spin_lock(&mce_chrdev_state_lock);
14305
14306 - mce_chrdev_open_count--;
14307 + local_dec(&mce_chrdev_open_count);
14308 mce_chrdev_open_exclu = 0;
14309
14310 spin_unlock(&mce_chrdev_state_lock);
14311 @@ -2324,7 +2327,7 @@ struct dentry *mce_get_debugfs_dir(void)
14312 static void mce_reset(void)
14313 {
14314 cpu_missing = 0;
14315 - atomic_set(&mce_fake_paniced, 0);
14316 + atomic_set_unchecked(&mce_fake_paniced, 0);
14317 atomic_set(&mce_executing, 0);
14318 atomic_set(&mce_callin, 0);
14319 atomic_set(&global_nwo, 0);
14320 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14321 index 2d5454c..51987eb 100644
14322 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14323 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14324 @@ -11,6 +11,7 @@
14325 #include <asm/processor.h>
14326 #include <asm/mce.h>
14327 #include <asm/msr.h>
14328 +#include <asm/pgtable.h>
14329
14330 /* By default disabled */
14331 int mce_p5_enabled __read_mostly;
14332 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14333 if (!cpu_has(c, X86_FEATURE_MCE))
14334 return;
14335
14336 + pax_open_kernel();
14337 machine_check_vector = pentium_machine_check;
14338 + pax_close_kernel();
14339 /* Make sure the vector pointer is visible before we enable MCEs: */
14340 wmb();
14341
14342 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14343 index 2d7998f..17c9de1 100644
14344 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14345 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14346 @@ -10,6 +10,7 @@
14347 #include <asm/processor.h>
14348 #include <asm/mce.h>
14349 #include <asm/msr.h>
14350 +#include <asm/pgtable.h>
14351
14352 /* Machine check handler for WinChip C6: */
14353 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14354 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14355 {
14356 u32 lo, hi;
14357
14358 + pax_open_kernel();
14359 machine_check_vector = winchip_machine_check;
14360 + pax_close_kernel();
14361 /* Make sure the vector pointer is visible before we enable MCEs: */
14362 wmb();
14363
14364 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14365 index 6b96110..0da73eb 100644
14366 --- a/arch/x86/kernel/cpu/mtrr/main.c
14367 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14368 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14369 u64 size_or_mask, size_and_mask;
14370 static bool mtrr_aps_delayed_init;
14371
14372 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14373 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14374
14375 const struct mtrr_ops *mtrr_if;
14376
14377 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14378 index df5e41f..816c719 100644
14379 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14380 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14381 @@ -25,7 +25,7 @@ struct mtrr_ops {
14382 int (*validate_add_page)(unsigned long base, unsigned long size,
14383 unsigned int type);
14384 int (*have_wrcomb)(void);
14385 -};
14386 +} __do_const;
14387
14388 extern int generic_get_free_region(unsigned long base, unsigned long size,
14389 int replace_reg);
14390 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14391 index bb8e034..fb9020b 100644
14392 --- a/arch/x86/kernel/cpu/perf_event.c
14393 +++ b/arch/x86/kernel/cpu/perf_event.c
14394 @@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14395 break;
14396
14397 perf_callchain_store(entry, frame.return_address);
14398 - fp = frame.next_frame;
14399 + fp = (const void __force_user *)frame.next_frame;
14400 }
14401 }
14402
14403 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14404 index 13ad899..f642b9a 100644
14405 --- a/arch/x86/kernel/crash.c
14406 +++ b/arch/x86/kernel/crash.c
14407 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14408 {
14409 #ifdef CONFIG_X86_32
14410 struct pt_regs fixed_regs;
14411 -#endif
14412
14413 -#ifdef CONFIG_X86_32
14414 - if (!user_mode_vm(regs)) {
14415 + if (!user_mode(regs)) {
14416 crash_fixup_ss_esp(&fixed_regs, regs);
14417 regs = &fixed_regs;
14418 }
14419 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14420 index 37250fe..bf2ec74 100644
14421 --- a/arch/x86/kernel/doublefault_32.c
14422 +++ b/arch/x86/kernel/doublefault_32.c
14423 @@ -11,7 +11,7 @@
14424
14425 #define DOUBLEFAULT_STACKSIZE (1024)
14426 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14427 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14428 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14429
14430 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14431
14432 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14433 unsigned long gdt, tss;
14434
14435 store_gdt(&gdt_desc);
14436 - gdt = gdt_desc.address;
14437 + gdt = (unsigned long)gdt_desc.address;
14438
14439 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14440
14441 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14442 /* 0x2 bit is always set */
14443 .flags = X86_EFLAGS_SF | 0x2,
14444 .sp = STACK_START,
14445 - .es = __USER_DS,
14446 + .es = __KERNEL_DS,
14447 .cs = __KERNEL_CS,
14448 .ss = __KERNEL_DS,
14449 - .ds = __USER_DS,
14450 + .ds = __KERNEL_DS,
14451 .fs = __KERNEL_PERCPU,
14452
14453 .__cr3 = __pa_nodebug(swapper_pg_dir),
14454 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14455 index 1b81839..0b4e7b0 100644
14456 --- a/arch/x86/kernel/dumpstack.c
14457 +++ b/arch/x86/kernel/dumpstack.c
14458 @@ -2,6 +2,9 @@
14459 * Copyright (C) 1991, 1992 Linus Torvalds
14460 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14461 */
14462 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14463 +#define __INCLUDED_BY_HIDESYM 1
14464 +#endif
14465 #include <linux/kallsyms.h>
14466 #include <linux/kprobes.h>
14467 #include <linux/uaccess.h>
14468 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14469 static void
14470 print_ftrace_graph_addr(unsigned long addr, void *data,
14471 const struct stacktrace_ops *ops,
14472 - struct thread_info *tinfo, int *graph)
14473 + struct task_struct *task, int *graph)
14474 {
14475 - struct task_struct *task;
14476 unsigned long ret_addr;
14477 int index;
14478
14479 if (addr != (unsigned long)return_to_handler)
14480 return;
14481
14482 - task = tinfo->task;
14483 index = task->curr_ret_stack;
14484
14485 if (!task->ret_stack || index < *graph)
14486 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14487 static inline void
14488 print_ftrace_graph_addr(unsigned long addr, void *data,
14489 const struct stacktrace_ops *ops,
14490 - struct thread_info *tinfo, int *graph)
14491 + struct task_struct *task, int *graph)
14492 { }
14493 #endif
14494
14495 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14496 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14497 */
14498
14499 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14500 - void *p, unsigned int size, void *end)
14501 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14502 {
14503 - void *t = tinfo;
14504 if (end) {
14505 if (p < end && p >= (end-THREAD_SIZE))
14506 return 1;
14507 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14508 }
14509
14510 unsigned long
14511 -print_context_stack(struct thread_info *tinfo,
14512 +print_context_stack(struct task_struct *task, void *stack_start,
14513 unsigned long *stack, unsigned long bp,
14514 const struct stacktrace_ops *ops, void *data,
14515 unsigned long *end, int *graph)
14516 {
14517 struct stack_frame *frame = (struct stack_frame *)bp;
14518
14519 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14520 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14521 unsigned long addr;
14522
14523 addr = *stack;
14524 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14525 } else {
14526 ops->address(data, addr, 0);
14527 }
14528 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14529 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14530 }
14531 stack++;
14532 }
14533 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14534 EXPORT_SYMBOL_GPL(print_context_stack);
14535
14536 unsigned long
14537 -print_context_stack_bp(struct thread_info *tinfo,
14538 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14539 unsigned long *stack, unsigned long bp,
14540 const struct stacktrace_ops *ops, void *data,
14541 unsigned long *end, int *graph)
14542 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14543 struct stack_frame *frame = (struct stack_frame *)bp;
14544 unsigned long *ret_addr = &frame->return_address;
14545
14546 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14547 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14548 unsigned long addr = *ret_addr;
14549
14550 if (!__kernel_text_address(addr))
14551 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14552 ops->address(data, addr, 1);
14553 frame = frame->next_frame;
14554 ret_addr = &frame->return_address;
14555 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14556 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14557 }
14558
14559 return (unsigned long)frame;
14560 @@ -189,7 +188,7 @@ void dump_stack(void)
14561
14562 bp = stack_frame(current, NULL);
14563 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14564 - current->pid, current->comm, print_tainted(),
14565 + task_pid_nr(current), current->comm, print_tainted(),
14566 init_utsname()->release,
14567 (int)strcspn(init_utsname()->version, " "),
14568 init_utsname()->version);
14569 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14570 }
14571 EXPORT_SYMBOL_GPL(oops_begin);
14572
14573 +extern void gr_handle_kernel_exploit(void);
14574 +
14575 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14576 {
14577 if (regs && kexec_should_crash(current))
14578 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14579 panic("Fatal exception in interrupt");
14580 if (panic_on_oops)
14581 panic("Fatal exception");
14582 - do_exit(signr);
14583 +
14584 + gr_handle_kernel_exploit();
14585 +
14586 + do_group_exit(signr);
14587 }
14588
14589 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14590 @@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14591
14592 show_registers(regs);
14593 #ifdef CONFIG_X86_32
14594 - if (user_mode_vm(regs)) {
14595 + if (user_mode(regs)) {
14596 sp = regs->sp;
14597 ss = regs->ss & 0xffff;
14598 } else {
14599 @@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14600 unsigned long flags = oops_begin();
14601 int sig = SIGSEGV;
14602
14603 - if (!user_mode_vm(regs))
14604 + if (!user_mode(regs))
14605 report_bug(regs->ip, regs);
14606
14607 if (__die(str, regs, err))
14608 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14609 index 88ec912..e95e935 100644
14610 --- a/arch/x86/kernel/dumpstack_32.c
14611 +++ b/arch/x86/kernel/dumpstack_32.c
14612 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14613 bp = stack_frame(task, regs);
14614
14615 for (;;) {
14616 - struct thread_info *context;
14617 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14618
14619 - context = (struct thread_info *)
14620 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14621 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14622 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14623
14624 - stack = (unsigned long *)context->previous_esp;
14625 - if (!stack)
14626 + if (stack_start == task_stack_page(task))
14627 break;
14628 + stack = *(unsigned long **)stack_start;
14629 if (ops->stack(data, "IRQ") < 0)
14630 break;
14631 touch_nmi_watchdog();
14632 @@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
14633 int i;
14634
14635 print_modules();
14636 - __show_regs(regs, !user_mode_vm(regs));
14637 + __show_regs(regs, !user_mode(regs));
14638
14639 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14640 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14641 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14642 * When in-kernel, we also print out the stack and code at the
14643 * time of the fault..
14644 */
14645 - if (!user_mode_vm(regs)) {
14646 + if (!user_mode(regs)) {
14647 unsigned int code_prologue = code_bytes * 43 / 64;
14648 unsigned int code_len = code_bytes;
14649 unsigned char c;
14650 u8 *ip;
14651 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14652
14653 printk(KERN_EMERG "Stack:\n");
14654 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14655
14656 printk(KERN_EMERG "Code: ");
14657
14658 - ip = (u8 *)regs->ip - code_prologue;
14659 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14660 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14661 /* try starting at IP */
14662 - ip = (u8 *)regs->ip;
14663 + ip = (u8 *)regs->ip + cs_base;
14664 code_len = code_len - code_prologue + 1;
14665 }
14666 for (i = 0; i < code_len; i++, ip++) {
14667 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14668 printk(KERN_CONT " Bad EIP value.");
14669 break;
14670 }
14671 - if (ip == (u8 *)regs->ip)
14672 + if (ip == (u8 *)regs->ip + cs_base)
14673 printk(KERN_CONT "<%02x> ", c);
14674 else
14675 printk(KERN_CONT "%02x ", c);
14676 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14677 {
14678 unsigned short ud2;
14679
14680 + ip = ktla_ktva(ip);
14681 if (ip < PAGE_OFFSET)
14682 return 0;
14683 if (probe_kernel_address((unsigned short *)ip, ud2))
14684 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14685
14686 return ud2 == 0x0b0f;
14687 }
14688 +
14689 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14690 +void pax_check_alloca(unsigned long size)
14691 +{
14692 + unsigned long sp = (unsigned long)&sp, stack_left;
14693 +
14694 + /* all kernel stacks are of the same size */
14695 + stack_left = sp & (THREAD_SIZE - 1);
14696 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14697 +}
14698 +EXPORT_SYMBOL(pax_check_alloca);
14699 +#endif
14700 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14701 index 17107bd..9623722 100644
14702 --- a/arch/x86/kernel/dumpstack_64.c
14703 +++ b/arch/x86/kernel/dumpstack_64.c
14704 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14705 unsigned long *irq_stack_end =
14706 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14707 unsigned used = 0;
14708 - struct thread_info *tinfo;
14709 int graph = 0;
14710 unsigned long dummy;
14711 + void *stack_start;
14712
14713 if (!task)
14714 task = current;
14715 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14716 * current stack address. If the stacks consist of nested
14717 * exceptions
14718 */
14719 - tinfo = task_thread_info(task);
14720 for (;;) {
14721 char *id;
14722 unsigned long *estack_end;
14723 +
14724 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14725 &used, &id);
14726
14727 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14728 if (ops->stack(data, id) < 0)
14729 break;
14730
14731 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14732 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14733 data, estack_end, &graph);
14734 ops->stack(data, "<EOE>");
14735 /*
14736 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14737 * second-to-last pointer (index -2 to end) in the
14738 * exception stack:
14739 */
14740 + if ((u16)estack_end[-1] != __KERNEL_DS)
14741 + goto out;
14742 stack = (unsigned long *) estack_end[-2];
14743 continue;
14744 }
14745 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14746 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14747 if (ops->stack(data, "IRQ") < 0)
14748 break;
14749 - bp = ops->walk_stack(tinfo, stack, bp,
14750 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14751 ops, data, irq_stack_end, &graph);
14752 /*
14753 * We link to the next stack (which would be
14754 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14755 /*
14756 * This handles the process stack:
14757 */
14758 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14759 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14760 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14761 +out:
14762 put_cpu();
14763 }
14764 EXPORT_SYMBOL(dump_trace);
14765 @@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14766
14767 return ud2 == 0x0b0f;
14768 }
14769 +
14770 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14771 +void pax_check_alloca(unsigned long size)
14772 +{
14773 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14774 + unsigned cpu, used;
14775 + char *id;
14776 +
14777 + /* check the process stack first */
14778 + stack_start = (unsigned long)task_stack_page(current);
14779 + stack_end = stack_start + THREAD_SIZE;
14780 + if (likely(stack_start <= sp && sp < stack_end)) {
14781 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14782 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14783 + return;
14784 + }
14785 +
14786 + cpu = get_cpu();
14787 +
14788 + /* check the irq stacks */
14789 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14790 + stack_start = stack_end - IRQ_STACK_SIZE;
14791 + if (stack_start <= sp && sp < stack_end) {
14792 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14793 + put_cpu();
14794 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14795 + return;
14796 + }
14797 +
14798 + /* check the exception stacks */
14799 + used = 0;
14800 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14801 + stack_start = stack_end - EXCEPTION_STKSZ;
14802 + if (stack_end && stack_start <= sp && sp < stack_end) {
14803 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14804 + put_cpu();
14805 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14806 + return;
14807 + }
14808 +
14809 + put_cpu();
14810 +
14811 + /* unknown stack */
14812 + BUG();
14813 +}
14814 +EXPORT_SYMBOL(pax_check_alloca);
14815 +#endif
14816 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14817 index 9b9f18b..9fcaa04 100644
14818 --- a/arch/x86/kernel/early_printk.c
14819 +++ b/arch/x86/kernel/early_printk.c
14820 @@ -7,6 +7,7 @@
14821 #include <linux/pci_regs.h>
14822 #include <linux/pci_ids.h>
14823 #include <linux/errno.h>
14824 +#include <linux/sched.h>
14825 #include <asm/io.h>
14826 #include <asm/processor.h>
14827 #include <asm/fcntl.h>
14828 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14829 index 7b784f4..db6b628 100644
14830 --- a/arch/x86/kernel/entry_32.S
14831 +++ b/arch/x86/kernel/entry_32.S
14832 @@ -179,13 +179,146 @@
14833 /*CFI_REL_OFFSET gs, PT_GS*/
14834 .endm
14835 .macro SET_KERNEL_GS reg
14836 +
14837 +#ifdef CONFIG_CC_STACKPROTECTOR
14838 movl $(__KERNEL_STACK_CANARY), \reg
14839 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14840 + movl $(__USER_DS), \reg
14841 +#else
14842 + xorl \reg, \reg
14843 +#endif
14844 +
14845 movl \reg, %gs
14846 .endm
14847
14848 #endif /* CONFIG_X86_32_LAZY_GS */
14849
14850 -.macro SAVE_ALL
14851 +.macro pax_enter_kernel
14852 +#ifdef CONFIG_PAX_KERNEXEC
14853 + call pax_enter_kernel
14854 +#endif
14855 +.endm
14856 +
14857 +.macro pax_exit_kernel
14858 +#ifdef CONFIG_PAX_KERNEXEC
14859 + call pax_exit_kernel
14860 +#endif
14861 +.endm
14862 +
14863 +#ifdef CONFIG_PAX_KERNEXEC
14864 +ENTRY(pax_enter_kernel)
14865 +#ifdef CONFIG_PARAVIRT
14866 + pushl %eax
14867 + pushl %ecx
14868 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14869 + mov %eax, %esi
14870 +#else
14871 + mov %cr0, %esi
14872 +#endif
14873 + bts $16, %esi
14874 + jnc 1f
14875 + mov %cs, %esi
14876 + cmp $__KERNEL_CS, %esi
14877 + jz 3f
14878 + ljmp $__KERNEL_CS, $3f
14879 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14880 +2:
14881 +#ifdef CONFIG_PARAVIRT
14882 + mov %esi, %eax
14883 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14884 +#else
14885 + mov %esi, %cr0
14886 +#endif
14887 +3:
14888 +#ifdef CONFIG_PARAVIRT
14889 + popl %ecx
14890 + popl %eax
14891 +#endif
14892 + ret
14893 +ENDPROC(pax_enter_kernel)
14894 +
14895 +ENTRY(pax_exit_kernel)
14896 +#ifdef CONFIG_PARAVIRT
14897 + pushl %eax
14898 + pushl %ecx
14899 +#endif
14900 + mov %cs, %esi
14901 + cmp $__KERNEXEC_KERNEL_CS, %esi
14902 + jnz 2f
14903 +#ifdef CONFIG_PARAVIRT
14904 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14905 + mov %eax, %esi
14906 +#else
14907 + mov %cr0, %esi
14908 +#endif
14909 + btr $16, %esi
14910 + ljmp $__KERNEL_CS, $1f
14911 +1:
14912 +#ifdef CONFIG_PARAVIRT
14913 + mov %esi, %eax
14914 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14915 +#else
14916 + mov %esi, %cr0
14917 +#endif
14918 +2:
14919 +#ifdef CONFIG_PARAVIRT
14920 + popl %ecx
14921 + popl %eax
14922 +#endif
14923 + ret
14924 +ENDPROC(pax_exit_kernel)
14925 +#endif
14926 +
14927 +.macro pax_erase_kstack
14928 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14929 + call pax_erase_kstack
14930 +#endif
14931 +.endm
14932 +
14933 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14934 +/*
14935 + * ebp: thread_info
14936 + * ecx, edx: can be clobbered
14937 + */
14938 +ENTRY(pax_erase_kstack)
14939 + pushl %edi
14940 + pushl %eax
14941 +
14942 + mov TI_lowest_stack(%ebp), %edi
14943 + mov $-0xBEEF, %eax
14944 + std
14945 +
14946 +1: mov %edi, %ecx
14947 + and $THREAD_SIZE_asm - 1, %ecx
14948 + shr $2, %ecx
14949 + repne scasl
14950 + jecxz 2f
14951 +
14952 + cmp $2*16, %ecx
14953 + jc 2f
14954 +
14955 + mov $2*16, %ecx
14956 + repe scasl
14957 + jecxz 2f
14958 + jne 1b
14959 +
14960 +2: cld
14961 + mov %esp, %ecx
14962 + sub %edi, %ecx
14963 + shr $2, %ecx
14964 + rep stosl
14965 +
14966 + mov TI_task_thread_sp0(%ebp), %edi
14967 + sub $128, %edi
14968 + mov %edi, TI_lowest_stack(%ebp)
14969 +
14970 + popl %eax
14971 + popl %edi
14972 + ret
14973 +ENDPROC(pax_erase_kstack)
14974 +#endif
14975 +
14976 +.macro __SAVE_ALL _DS
14977 cld
14978 PUSH_GS
14979 pushl_cfi %fs
14980 @@ -208,7 +341,7 @@
14981 CFI_REL_OFFSET ecx, 0
14982 pushl_cfi %ebx
14983 CFI_REL_OFFSET ebx, 0
14984 - movl $(__USER_DS), %edx
14985 + movl $\_DS, %edx
14986 movl %edx, %ds
14987 movl %edx, %es
14988 movl $(__KERNEL_PERCPU), %edx
14989 @@ -216,6 +349,15 @@
14990 SET_KERNEL_GS %edx
14991 .endm
14992
14993 +.macro SAVE_ALL
14994 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14995 + __SAVE_ALL __KERNEL_DS
14996 + pax_enter_kernel
14997 +#else
14998 + __SAVE_ALL __USER_DS
14999 +#endif
15000 +.endm
15001 +
15002 .macro RESTORE_INT_REGS
15003 popl_cfi %ebx
15004 CFI_RESTORE ebx
15005 @@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15006 popfl_cfi
15007 jmp syscall_exit
15008 CFI_ENDPROC
15009 -END(ret_from_fork)
15010 +ENDPROC(ret_from_fork)
15011
15012 /*
15013 * Interrupt exit functions should be protected against kprobes
15014 @@ -335,7 +477,15 @@ resume_userspace_sig:
15015 andl $SEGMENT_RPL_MASK, %eax
15016 #endif
15017 cmpl $USER_RPL, %eax
15018 +
15019 +#ifdef CONFIG_PAX_KERNEXEC
15020 + jae resume_userspace
15021 +
15022 + pax_exit_kernel
15023 + jmp resume_kernel
15024 +#else
15025 jb resume_kernel # not returning to v8086 or userspace
15026 +#endif
15027
15028 ENTRY(resume_userspace)
15029 LOCKDEP_SYS_EXIT
15030 @@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15031 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15032 # int/exception return?
15033 jne work_pending
15034 - jmp restore_all
15035 -END(ret_from_exception)
15036 + jmp restore_all_pax
15037 +ENDPROC(ret_from_exception)
15038
15039 #ifdef CONFIG_PREEMPT
15040 ENTRY(resume_kernel)
15041 @@ -363,7 +513,7 @@ need_resched:
15042 jz restore_all
15043 call preempt_schedule_irq
15044 jmp need_resched
15045 -END(resume_kernel)
15046 +ENDPROC(resume_kernel)
15047 #endif
15048 CFI_ENDPROC
15049 /*
15050 @@ -397,23 +547,34 @@ sysenter_past_esp:
15051 /*CFI_REL_OFFSET cs, 0*/
15052 /*
15053 * Push current_thread_info()->sysenter_return to the stack.
15054 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15055 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15056 */
15057 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15058 + pushl_cfi $0
15059 CFI_REL_OFFSET eip, 0
15060
15061 pushl_cfi %eax
15062 SAVE_ALL
15063 + GET_THREAD_INFO(%ebp)
15064 + movl TI_sysenter_return(%ebp),%ebp
15065 + movl %ebp,PT_EIP(%esp)
15066 ENABLE_INTERRUPTS(CLBR_NONE)
15067
15068 /*
15069 * Load the potential sixth argument from user stack.
15070 * Careful about security.
15071 */
15072 + movl PT_OLDESP(%esp),%ebp
15073 +
15074 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15075 + mov PT_OLDSS(%esp),%ds
15076 +1: movl %ds:(%ebp),%ebp
15077 + push %ss
15078 + pop %ds
15079 +#else
15080 cmpl $__PAGE_OFFSET-3,%ebp
15081 jae syscall_fault
15082 1: movl (%ebp),%ebp
15083 +#endif
15084 +
15085 movl %ebp,PT_EBP(%esp)
15086 .section __ex_table,"a"
15087 .align 4
15088 @@ -436,12 +597,24 @@ sysenter_do_call:
15089 testl $_TIF_ALLWORK_MASK, %ecx
15090 jne sysexit_audit
15091 sysenter_exit:
15092 +
15093 +#ifdef CONFIG_PAX_RANDKSTACK
15094 + pushl_cfi %eax
15095 + movl %esp, %eax
15096 + call pax_randomize_kstack
15097 + popl_cfi %eax
15098 +#endif
15099 +
15100 + pax_erase_kstack
15101 +
15102 /* if something modifies registers it must also disable sysexit */
15103 movl PT_EIP(%esp), %edx
15104 movl PT_OLDESP(%esp), %ecx
15105 xorl %ebp,%ebp
15106 TRACE_IRQS_ON
15107 1: mov PT_FS(%esp), %fs
15108 +2: mov PT_DS(%esp), %ds
15109 +3: mov PT_ES(%esp), %es
15110 PTGS_TO_GS
15111 ENABLE_INTERRUPTS_SYSEXIT
15112
15113 @@ -458,6 +631,9 @@ sysenter_audit:
15114 movl %eax,%edx /* 2nd arg: syscall number */
15115 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15116 call __audit_syscall_entry
15117 +
15118 + pax_erase_kstack
15119 +
15120 pushl_cfi %ebx
15121 movl PT_EAX(%esp),%eax /* reload syscall number */
15122 jmp sysenter_do_call
15123 @@ -483,11 +659,17 @@ sysexit_audit:
15124
15125 CFI_ENDPROC
15126 .pushsection .fixup,"ax"
15127 -2: movl $0,PT_FS(%esp)
15128 +4: movl $0,PT_FS(%esp)
15129 + jmp 1b
15130 +5: movl $0,PT_DS(%esp)
15131 + jmp 1b
15132 +6: movl $0,PT_ES(%esp)
15133 jmp 1b
15134 .section __ex_table,"a"
15135 .align 4
15136 - .long 1b,2b
15137 + .long 1b,4b
15138 + .long 2b,5b
15139 + .long 3b,6b
15140 .popsection
15141 PTGS_TO_GS_EX
15142 ENDPROC(ia32_sysenter_target)
15143 @@ -520,6 +702,15 @@ syscall_exit:
15144 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15145 jne syscall_exit_work
15146
15147 +restore_all_pax:
15148 +
15149 +#ifdef CONFIG_PAX_RANDKSTACK
15150 + movl %esp, %eax
15151 + call pax_randomize_kstack
15152 +#endif
15153 +
15154 + pax_erase_kstack
15155 +
15156 restore_all:
15157 TRACE_IRQS_IRET
15158 restore_all_notrace:
15159 @@ -579,14 +770,34 @@ ldt_ss:
15160 * compensating for the offset by changing to the ESPFIX segment with
15161 * a base address that matches for the difference.
15162 */
15163 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15164 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15165 mov %esp, %edx /* load kernel esp */
15166 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15167 mov %dx, %ax /* eax: new kernel esp */
15168 sub %eax, %edx /* offset (low word is 0) */
15169 +#ifdef CONFIG_SMP
15170 + movl PER_CPU_VAR(cpu_number), %ebx
15171 + shll $PAGE_SHIFT_asm, %ebx
15172 + addl $cpu_gdt_table, %ebx
15173 +#else
15174 + movl $cpu_gdt_table, %ebx
15175 +#endif
15176 shr $16, %edx
15177 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15178 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15179 +
15180 +#ifdef CONFIG_PAX_KERNEXEC
15181 + mov %cr0, %esi
15182 + btr $16, %esi
15183 + mov %esi, %cr0
15184 +#endif
15185 +
15186 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15187 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15188 +
15189 +#ifdef CONFIG_PAX_KERNEXEC
15190 + bts $16, %esi
15191 + mov %esi, %cr0
15192 +#endif
15193 +
15194 pushl_cfi $__ESPFIX_SS
15195 pushl_cfi %eax /* new kernel esp */
15196 /* Disable interrupts, but do not irqtrace this section: we
15197 @@ -615,38 +826,30 @@ work_resched:
15198 movl TI_flags(%ebp), %ecx
15199 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15200 # than syscall tracing?
15201 - jz restore_all
15202 + jz restore_all_pax
15203 testb $_TIF_NEED_RESCHED, %cl
15204 jnz work_resched
15205
15206 work_notifysig: # deal with pending signals and
15207 # notify-resume requests
15208 + movl %esp, %eax
15209 #ifdef CONFIG_VM86
15210 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15211 - movl %esp, %eax
15212 - jne work_notifysig_v86 # returning to kernel-space or
15213 + jz 1f # returning to kernel-space or
15214 # vm86-space
15215 - TRACE_IRQS_ON
15216 - ENABLE_INTERRUPTS(CLBR_NONE)
15217 - xorl %edx, %edx
15218 - call do_notify_resume
15219 - jmp resume_userspace_sig
15220
15221 - ALIGN
15222 -work_notifysig_v86:
15223 pushl_cfi %ecx # save ti_flags for do_notify_resume
15224 call save_v86_state # %eax contains pt_regs pointer
15225 popl_cfi %ecx
15226 movl %eax, %esp
15227 -#else
15228 - movl %esp, %eax
15229 +1:
15230 #endif
15231 TRACE_IRQS_ON
15232 ENABLE_INTERRUPTS(CLBR_NONE)
15233 xorl %edx, %edx
15234 call do_notify_resume
15235 jmp resume_userspace_sig
15236 -END(work_pending)
15237 +ENDPROC(work_pending)
15238
15239 # perform syscall exit tracing
15240 ALIGN
15241 @@ -654,11 +857,14 @@ syscall_trace_entry:
15242 movl $-ENOSYS,PT_EAX(%esp)
15243 movl %esp, %eax
15244 call syscall_trace_enter
15245 +
15246 + pax_erase_kstack
15247 +
15248 /* What it returned is what we'll actually use. */
15249 cmpl $(NR_syscalls), %eax
15250 jnae syscall_call
15251 jmp syscall_exit
15252 -END(syscall_trace_entry)
15253 +ENDPROC(syscall_trace_entry)
15254
15255 # perform syscall exit tracing
15256 ALIGN
15257 @@ -671,20 +877,24 @@ syscall_exit_work:
15258 movl %esp, %eax
15259 call syscall_trace_leave
15260 jmp resume_userspace
15261 -END(syscall_exit_work)
15262 +ENDPROC(syscall_exit_work)
15263 CFI_ENDPROC
15264
15265 RING0_INT_FRAME # can't unwind into user space anyway
15266 syscall_fault:
15267 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15268 + push %ss
15269 + pop %ds
15270 +#endif
15271 GET_THREAD_INFO(%ebp)
15272 movl $-EFAULT,PT_EAX(%esp)
15273 jmp resume_userspace
15274 -END(syscall_fault)
15275 +ENDPROC(syscall_fault)
15276
15277 syscall_badsys:
15278 movl $-ENOSYS,PT_EAX(%esp)
15279 jmp resume_userspace
15280 -END(syscall_badsys)
15281 +ENDPROC(syscall_badsys)
15282 CFI_ENDPROC
15283 /*
15284 * End of kprobes section
15285 @@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15286 CFI_ENDPROC
15287 ENDPROC(ptregs_clone)
15288
15289 + ALIGN;
15290 +ENTRY(kernel_execve)
15291 + CFI_STARTPROC
15292 + pushl_cfi %ebp
15293 + sub $PT_OLDSS+4,%esp
15294 + pushl_cfi %edi
15295 + pushl_cfi %ecx
15296 + pushl_cfi %eax
15297 + lea 3*4(%esp),%edi
15298 + mov $PT_OLDSS/4+1,%ecx
15299 + xorl %eax,%eax
15300 + rep stosl
15301 + popl_cfi %eax
15302 + popl_cfi %ecx
15303 + popl_cfi %edi
15304 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15305 + pushl_cfi %esp
15306 + call sys_execve
15307 + add $4,%esp
15308 + CFI_ADJUST_CFA_OFFSET -4
15309 + GET_THREAD_INFO(%ebp)
15310 + test %eax,%eax
15311 + jz syscall_exit
15312 + add $PT_OLDSS+4,%esp
15313 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15314 + popl_cfi %ebp
15315 + ret
15316 + CFI_ENDPROC
15317 +ENDPROC(kernel_execve)
15318 +
15319 .macro FIXUP_ESPFIX_STACK
15320 /*
15321 * Switch back for ESPFIX stack to the normal zerobased stack
15322 @@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15323 * normal stack and adjusts ESP with the matching offset.
15324 */
15325 /* fixup the stack */
15326 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15327 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15328 +#ifdef CONFIG_SMP
15329 + movl PER_CPU_VAR(cpu_number), %ebx
15330 + shll $PAGE_SHIFT_asm, %ebx
15331 + addl $cpu_gdt_table, %ebx
15332 +#else
15333 + movl $cpu_gdt_table, %ebx
15334 +#endif
15335 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15336 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15337 shl $16, %eax
15338 addl %esp, %eax /* the adjusted stack pointer */
15339 pushl_cfi $__KERNEL_DS
15340 @@ -819,7 +1066,7 @@ vector=vector+1
15341 .endr
15342 2: jmp common_interrupt
15343 .endr
15344 -END(irq_entries_start)
15345 +ENDPROC(irq_entries_start)
15346
15347 .previous
15348 END(interrupt)
15349 @@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15350 pushl_cfi $do_coprocessor_error
15351 jmp error_code
15352 CFI_ENDPROC
15353 -END(coprocessor_error)
15354 +ENDPROC(coprocessor_error)
15355
15356 ENTRY(simd_coprocessor_error)
15357 RING0_INT_FRAME
15358 @@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15359 #endif
15360 jmp error_code
15361 CFI_ENDPROC
15362 -END(simd_coprocessor_error)
15363 +ENDPROC(simd_coprocessor_error)
15364
15365 ENTRY(device_not_available)
15366 RING0_INT_FRAME
15367 @@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15368 pushl_cfi $do_device_not_available
15369 jmp error_code
15370 CFI_ENDPROC
15371 -END(device_not_available)
15372 +ENDPROC(device_not_available)
15373
15374 #ifdef CONFIG_PARAVIRT
15375 ENTRY(native_iret)
15376 @@ -905,12 +1152,12 @@ ENTRY(native_iret)
15377 .align 4
15378 .long native_iret, iret_exc
15379 .previous
15380 -END(native_iret)
15381 +ENDPROC(native_iret)
15382
15383 ENTRY(native_irq_enable_sysexit)
15384 sti
15385 sysexit
15386 -END(native_irq_enable_sysexit)
15387 +ENDPROC(native_irq_enable_sysexit)
15388 #endif
15389
15390 ENTRY(overflow)
15391 @@ -919,7 +1166,7 @@ ENTRY(overflow)
15392 pushl_cfi $do_overflow
15393 jmp error_code
15394 CFI_ENDPROC
15395 -END(overflow)
15396 +ENDPROC(overflow)
15397
15398 ENTRY(bounds)
15399 RING0_INT_FRAME
15400 @@ -927,7 +1174,7 @@ ENTRY(bounds)
15401 pushl_cfi $do_bounds
15402 jmp error_code
15403 CFI_ENDPROC
15404 -END(bounds)
15405 +ENDPROC(bounds)
15406
15407 ENTRY(invalid_op)
15408 RING0_INT_FRAME
15409 @@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15410 pushl_cfi $do_invalid_op
15411 jmp error_code
15412 CFI_ENDPROC
15413 -END(invalid_op)
15414 +ENDPROC(invalid_op)
15415
15416 ENTRY(coprocessor_segment_overrun)
15417 RING0_INT_FRAME
15418 @@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15419 pushl_cfi $do_coprocessor_segment_overrun
15420 jmp error_code
15421 CFI_ENDPROC
15422 -END(coprocessor_segment_overrun)
15423 +ENDPROC(coprocessor_segment_overrun)
15424
15425 ENTRY(invalid_TSS)
15426 RING0_EC_FRAME
15427 pushl_cfi $do_invalid_TSS
15428 jmp error_code
15429 CFI_ENDPROC
15430 -END(invalid_TSS)
15431 +ENDPROC(invalid_TSS)
15432
15433 ENTRY(segment_not_present)
15434 RING0_EC_FRAME
15435 pushl_cfi $do_segment_not_present
15436 jmp error_code
15437 CFI_ENDPROC
15438 -END(segment_not_present)
15439 +ENDPROC(segment_not_present)
15440
15441 ENTRY(stack_segment)
15442 RING0_EC_FRAME
15443 pushl_cfi $do_stack_segment
15444 jmp error_code
15445 CFI_ENDPROC
15446 -END(stack_segment)
15447 +ENDPROC(stack_segment)
15448
15449 ENTRY(alignment_check)
15450 RING0_EC_FRAME
15451 pushl_cfi $do_alignment_check
15452 jmp error_code
15453 CFI_ENDPROC
15454 -END(alignment_check)
15455 +ENDPROC(alignment_check)
15456
15457 ENTRY(divide_error)
15458 RING0_INT_FRAME
15459 @@ -979,7 +1226,7 @@ ENTRY(divide_error)
15460 pushl_cfi $do_divide_error
15461 jmp error_code
15462 CFI_ENDPROC
15463 -END(divide_error)
15464 +ENDPROC(divide_error)
15465
15466 #ifdef CONFIG_X86_MCE
15467 ENTRY(machine_check)
15468 @@ -988,7 +1235,7 @@ ENTRY(machine_check)
15469 pushl_cfi machine_check_vector
15470 jmp error_code
15471 CFI_ENDPROC
15472 -END(machine_check)
15473 +ENDPROC(machine_check)
15474 #endif
15475
15476 ENTRY(spurious_interrupt_bug)
15477 @@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15478 pushl_cfi $do_spurious_interrupt_bug
15479 jmp error_code
15480 CFI_ENDPROC
15481 -END(spurious_interrupt_bug)
15482 +ENDPROC(spurious_interrupt_bug)
15483 /*
15484 * End of kprobes section
15485 */
15486 @@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15487
15488 ENTRY(mcount)
15489 ret
15490 -END(mcount)
15491 +ENDPROC(mcount)
15492
15493 ENTRY(ftrace_caller)
15494 cmpl $0, function_trace_stop
15495 @@ -1141,7 +1388,7 @@ ftrace_graph_call:
15496 .globl ftrace_stub
15497 ftrace_stub:
15498 ret
15499 -END(ftrace_caller)
15500 +ENDPROC(ftrace_caller)
15501
15502 #else /* ! CONFIG_DYNAMIC_FTRACE */
15503
15504 @@ -1177,7 +1424,7 @@ trace:
15505 popl %ecx
15506 popl %eax
15507 jmp ftrace_stub
15508 -END(mcount)
15509 +ENDPROC(mcount)
15510 #endif /* CONFIG_DYNAMIC_FTRACE */
15511 #endif /* CONFIG_FUNCTION_TRACER */
15512
15513 @@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15514 popl %ecx
15515 popl %eax
15516 ret
15517 -END(ftrace_graph_caller)
15518 +ENDPROC(ftrace_graph_caller)
15519
15520 .globl return_to_handler
15521 return_to_handler:
15522 @@ -1253,15 +1500,18 @@ error_code:
15523 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15524 REG_TO_PTGS %ecx
15525 SET_KERNEL_GS %ecx
15526 - movl $(__USER_DS), %ecx
15527 + movl $(__KERNEL_DS), %ecx
15528 movl %ecx, %ds
15529 movl %ecx, %es
15530 +
15531 + pax_enter_kernel
15532 +
15533 TRACE_IRQS_OFF
15534 movl %esp,%eax # pt_regs pointer
15535 call *%edi
15536 jmp ret_from_exception
15537 CFI_ENDPROC
15538 -END(page_fault)
15539 +ENDPROC(page_fault)
15540
15541 /*
15542 * Debug traps and NMI can happen at the one SYSENTER instruction
15543 @@ -1303,7 +1553,7 @@ debug_stack_correct:
15544 call do_debug
15545 jmp ret_from_exception
15546 CFI_ENDPROC
15547 -END(debug)
15548 +ENDPROC(debug)
15549
15550 /*
15551 * NMI is doubly nasty. It can happen _while_ we're handling
15552 @@ -1340,6 +1590,9 @@ nmi_stack_correct:
15553 xorl %edx,%edx # zero error code
15554 movl %esp,%eax # pt_regs pointer
15555 call do_nmi
15556 +
15557 + pax_exit_kernel
15558 +
15559 jmp restore_all_notrace
15560 CFI_ENDPROC
15561
15562 @@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15563 FIXUP_ESPFIX_STACK # %eax == %esp
15564 xorl %edx,%edx # zero error code
15565 call do_nmi
15566 +
15567 + pax_exit_kernel
15568 +
15569 RESTORE_REGS
15570 lss 12+4(%esp), %esp # back to espfix stack
15571 CFI_ADJUST_CFA_OFFSET -24
15572 jmp irq_return
15573 CFI_ENDPROC
15574 -END(nmi)
15575 +ENDPROC(nmi)
15576
15577 ENTRY(int3)
15578 RING0_INT_FRAME
15579 @@ -1393,14 +1649,14 @@ ENTRY(int3)
15580 call do_int3
15581 jmp ret_from_exception
15582 CFI_ENDPROC
15583 -END(int3)
15584 +ENDPROC(int3)
15585
15586 ENTRY(general_protection)
15587 RING0_EC_FRAME
15588 pushl_cfi $do_general_protection
15589 jmp error_code
15590 CFI_ENDPROC
15591 -END(general_protection)
15592 +ENDPROC(general_protection)
15593
15594 #ifdef CONFIG_KVM_GUEST
15595 ENTRY(async_page_fault)
15596 @@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15597 pushl_cfi $do_async_page_fault
15598 jmp error_code
15599 CFI_ENDPROC
15600 -END(async_page_fault)
15601 +ENDPROC(async_page_fault)
15602 #endif
15603
15604 /*
15605 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15606 index cdc79b5..4710a75 100644
15607 --- a/arch/x86/kernel/entry_64.S
15608 +++ b/arch/x86/kernel/entry_64.S
15609 @@ -56,6 +56,8 @@
15610 #include <asm/ftrace.h>
15611 #include <asm/percpu.h>
15612 #include <linux/err.h>
15613 +#include <asm/pgtable.h>
15614 +#include <asm/alternative-asm.h>
15615
15616 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15617 #include <linux/elf-em.h>
15618 @@ -69,8 +71,9 @@
15619 #ifdef CONFIG_FUNCTION_TRACER
15620 #ifdef CONFIG_DYNAMIC_FTRACE
15621 ENTRY(mcount)
15622 + pax_force_retaddr
15623 retq
15624 -END(mcount)
15625 +ENDPROC(mcount)
15626
15627 ENTRY(ftrace_caller)
15628 cmpl $0, function_trace_stop
15629 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15630 #endif
15631
15632 GLOBAL(ftrace_stub)
15633 + pax_force_retaddr
15634 retq
15635 -END(ftrace_caller)
15636 +ENDPROC(ftrace_caller)
15637
15638 #else /* ! CONFIG_DYNAMIC_FTRACE */
15639 ENTRY(mcount)
15640 @@ -113,6 +117,7 @@ ENTRY(mcount)
15641 #endif
15642
15643 GLOBAL(ftrace_stub)
15644 + pax_force_retaddr
15645 retq
15646
15647 trace:
15648 @@ -122,12 +127,13 @@ trace:
15649 movq 8(%rbp), %rsi
15650 subq $MCOUNT_INSN_SIZE, %rdi
15651
15652 + pax_force_fptr ftrace_trace_function
15653 call *ftrace_trace_function
15654
15655 MCOUNT_RESTORE_FRAME
15656
15657 jmp ftrace_stub
15658 -END(mcount)
15659 +ENDPROC(mcount)
15660 #endif /* CONFIG_DYNAMIC_FTRACE */
15661 #endif /* CONFIG_FUNCTION_TRACER */
15662
15663 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15664
15665 MCOUNT_RESTORE_FRAME
15666
15667 + pax_force_retaddr
15668 retq
15669 -END(ftrace_graph_caller)
15670 +ENDPROC(ftrace_graph_caller)
15671
15672 GLOBAL(return_to_handler)
15673 subq $24, %rsp
15674 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15675 movq 8(%rsp), %rdx
15676 movq (%rsp), %rax
15677 addq $24, %rsp
15678 + pax_force_fptr %rdi
15679 jmp *%rdi
15680 #endif
15681
15682 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15683 ENDPROC(native_usergs_sysret64)
15684 #endif /* CONFIG_PARAVIRT */
15685
15686 + .macro ljmpq sel, off
15687 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15688 + .byte 0x48; ljmp *1234f(%rip)
15689 + .pushsection .rodata
15690 + .align 16
15691 + 1234: .quad \off; .word \sel
15692 + .popsection
15693 +#else
15694 + pushq $\sel
15695 + pushq $\off
15696 + lretq
15697 +#endif
15698 + .endm
15699 +
15700 + .macro pax_enter_kernel
15701 + pax_set_fptr_mask
15702 +#ifdef CONFIG_PAX_KERNEXEC
15703 + call pax_enter_kernel
15704 +#endif
15705 + .endm
15706 +
15707 + .macro pax_exit_kernel
15708 +#ifdef CONFIG_PAX_KERNEXEC
15709 + call pax_exit_kernel
15710 +#endif
15711 + .endm
15712 +
15713 +#ifdef CONFIG_PAX_KERNEXEC
15714 +ENTRY(pax_enter_kernel)
15715 + pushq %rdi
15716 +
15717 +#ifdef CONFIG_PARAVIRT
15718 + PV_SAVE_REGS(CLBR_RDI)
15719 +#endif
15720 +
15721 + GET_CR0_INTO_RDI
15722 + bts $16,%rdi
15723 + jnc 3f
15724 + mov %cs,%edi
15725 + cmp $__KERNEL_CS,%edi
15726 + jnz 2f
15727 +1:
15728 +
15729 +#ifdef CONFIG_PARAVIRT
15730 + PV_RESTORE_REGS(CLBR_RDI)
15731 +#endif
15732 +
15733 + popq %rdi
15734 + pax_force_retaddr
15735 + retq
15736 +
15737 +2: ljmpq __KERNEL_CS,1f
15738 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15739 +4: SET_RDI_INTO_CR0
15740 + jmp 1b
15741 +ENDPROC(pax_enter_kernel)
15742 +
15743 +ENTRY(pax_exit_kernel)
15744 + pushq %rdi
15745 +
15746 +#ifdef CONFIG_PARAVIRT
15747 + PV_SAVE_REGS(CLBR_RDI)
15748 +#endif
15749 +
15750 + mov %cs,%rdi
15751 + cmp $__KERNEXEC_KERNEL_CS,%edi
15752 + jz 2f
15753 +1:
15754 +
15755 +#ifdef CONFIG_PARAVIRT
15756 + PV_RESTORE_REGS(CLBR_RDI);
15757 +#endif
15758 +
15759 + popq %rdi
15760 + pax_force_retaddr
15761 + retq
15762 +
15763 +2: GET_CR0_INTO_RDI
15764 + btr $16,%rdi
15765 + ljmpq __KERNEL_CS,3f
15766 +3: SET_RDI_INTO_CR0
15767 + jmp 1b
15768 +#ifdef CONFIG_PARAVIRT
15769 + PV_RESTORE_REGS(CLBR_RDI);
15770 +#endif
15771 +
15772 + popq %rdi
15773 + pax_force_retaddr
15774 + retq
15775 +ENDPROC(pax_exit_kernel)
15776 +#endif
15777 +
15778 + .macro pax_enter_kernel_user
15779 + pax_set_fptr_mask
15780 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15781 + call pax_enter_kernel_user
15782 +#endif
15783 + .endm
15784 +
15785 + .macro pax_exit_kernel_user
15786 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15787 + call pax_exit_kernel_user
15788 +#endif
15789 +#ifdef CONFIG_PAX_RANDKSTACK
15790 + pushq %rax
15791 + call pax_randomize_kstack
15792 + popq %rax
15793 +#endif
15794 + .endm
15795 +
15796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15797 +ENTRY(pax_enter_kernel_user)
15798 + pushq %rdi
15799 + pushq %rbx
15800 +
15801 +#ifdef CONFIG_PARAVIRT
15802 + PV_SAVE_REGS(CLBR_RDI)
15803 +#endif
15804 +
15805 + GET_CR3_INTO_RDI
15806 + mov %rdi,%rbx
15807 + add $__START_KERNEL_map,%rbx
15808 + sub phys_base(%rip),%rbx
15809 +
15810 +#ifdef CONFIG_PARAVIRT
15811 + pushq %rdi
15812 + cmpl $0, pv_info+PARAVIRT_enabled
15813 + jz 1f
15814 + i = 0
15815 + .rept USER_PGD_PTRS
15816 + mov i*8(%rbx),%rsi
15817 + mov $0,%sil
15818 + lea i*8(%rbx),%rdi
15819 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15820 + i = i + 1
15821 + .endr
15822 + jmp 2f
15823 +1:
15824 +#endif
15825 +
15826 + i = 0
15827 + .rept USER_PGD_PTRS
15828 + movb $0,i*8(%rbx)
15829 + i = i + 1
15830 + .endr
15831 +
15832 +#ifdef CONFIG_PARAVIRT
15833 +2: popq %rdi
15834 +#endif
15835 + SET_RDI_INTO_CR3
15836 +
15837 +#ifdef CONFIG_PAX_KERNEXEC
15838 + GET_CR0_INTO_RDI
15839 + bts $16,%rdi
15840 + SET_RDI_INTO_CR0
15841 +#endif
15842 +
15843 +#ifdef CONFIG_PARAVIRT
15844 + PV_RESTORE_REGS(CLBR_RDI)
15845 +#endif
15846 +
15847 + popq %rbx
15848 + popq %rdi
15849 + pax_force_retaddr
15850 + retq
15851 +ENDPROC(pax_enter_kernel_user)
15852 +
15853 +ENTRY(pax_exit_kernel_user)
15854 + push %rdi
15855 +
15856 +#ifdef CONFIG_PARAVIRT
15857 + pushq %rbx
15858 + PV_SAVE_REGS(CLBR_RDI)
15859 +#endif
15860 +
15861 +#ifdef CONFIG_PAX_KERNEXEC
15862 + GET_CR0_INTO_RDI
15863 + btr $16,%rdi
15864 + SET_RDI_INTO_CR0
15865 +#endif
15866 +
15867 + GET_CR3_INTO_RDI
15868 + add $__START_KERNEL_map,%rdi
15869 + sub phys_base(%rip),%rdi
15870 +
15871 +#ifdef CONFIG_PARAVIRT
15872 + cmpl $0, pv_info+PARAVIRT_enabled
15873 + jz 1f
15874 + mov %rdi,%rbx
15875 + i = 0
15876 + .rept USER_PGD_PTRS
15877 + mov i*8(%rbx),%rsi
15878 + mov $0x67,%sil
15879 + lea i*8(%rbx),%rdi
15880 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15881 + i = i + 1
15882 + .endr
15883 + jmp 2f
15884 +1:
15885 +#endif
15886 +
15887 + i = 0
15888 + .rept USER_PGD_PTRS
15889 + movb $0x67,i*8(%rdi)
15890 + i = i + 1
15891 + .endr
15892 +
15893 +#ifdef CONFIG_PARAVIRT
15894 +2: PV_RESTORE_REGS(CLBR_RDI)
15895 + popq %rbx
15896 +#endif
15897 +
15898 + popq %rdi
15899 + pax_force_retaddr
15900 + retq
15901 +ENDPROC(pax_exit_kernel_user)
15902 +#endif
15903 +
15904 +.macro pax_erase_kstack
15905 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15906 + call pax_erase_kstack
15907 +#endif
15908 +.endm
15909 +
15910 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15911 +/*
15912 + * r11: thread_info
15913 + * rcx, rdx: can be clobbered
15914 + */
15915 +ENTRY(pax_erase_kstack)
15916 + pushq %rdi
15917 + pushq %rax
15918 + pushq %r11
15919 +
15920 + GET_THREAD_INFO(%r11)
15921 + mov TI_lowest_stack(%r11), %rdi
15922 + mov $-0xBEEF, %rax
15923 + std
15924 +
15925 +1: mov %edi, %ecx
15926 + and $THREAD_SIZE_asm - 1, %ecx
15927 + shr $3, %ecx
15928 + repne scasq
15929 + jecxz 2f
15930 +
15931 + cmp $2*8, %ecx
15932 + jc 2f
15933 +
15934 + mov $2*8, %ecx
15935 + repe scasq
15936 + jecxz 2f
15937 + jne 1b
15938 +
15939 +2: cld
15940 + mov %esp, %ecx
15941 + sub %edi, %ecx
15942 +
15943 + cmp $THREAD_SIZE_asm, %rcx
15944 + jb 3f
15945 + ud2
15946 +3:
15947 +
15948 + shr $3, %ecx
15949 + rep stosq
15950 +
15951 + mov TI_task_thread_sp0(%r11), %rdi
15952 + sub $256, %rdi
15953 + mov %rdi, TI_lowest_stack(%r11)
15954 +
15955 + popq %r11
15956 + popq %rax
15957 + popq %rdi
15958 + pax_force_retaddr
15959 + ret
15960 +ENDPROC(pax_erase_kstack)
15961 +#endif
15962
15963 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15964 #ifdef CONFIG_TRACE_IRQFLAGS
15965 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
15966 .endm
15967
15968 .macro UNFAKE_STACK_FRAME
15969 - addq $8*6, %rsp
15970 - CFI_ADJUST_CFA_OFFSET -(6*8)
15971 + addq $8*6 + ARG_SKIP, %rsp
15972 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15973 .endm
15974
15975 /*
15976 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
15977 movq %rsp, %rsi
15978
15979 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15980 - testl $3, CS-RBP(%rsi)
15981 + testb $3, CS-RBP(%rsi)
15982 je 1f
15983 SWAPGS
15984 /*
15985 @@ -355,9 +639,10 @@ ENTRY(save_rest)
15986 movq_cfi r15, R15+16
15987 movq %r11, 8(%rsp) /* return address */
15988 FIXUP_TOP_OF_STACK %r11, 16
15989 + pax_force_retaddr
15990 ret
15991 CFI_ENDPROC
15992 -END(save_rest)
15993 +ENDPROC(save_rest)
15994
15995 /* save complete stack frame */
15996 .pushsection .kprobes.text, "ax"
15997 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
15998 js 1f /* negative -> in kernel */
15999 SWAPGS
16000 xorl %ebx,%ebx
16001 -1: ret
16002 +1: pax_force_retaddr_bts
16003 + ret
16004 CFI_ENDPROC
16005 -END(save_paranoid)
16006 +ENDPROC(save_paranoid)
16007 .popsection
16008
16009 /*
16010 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
16011
16012 RESTORE_REST
16013
16014 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16015 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16016 jz retint_restore_args
16017
16018 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16019 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
16020 jmp ret_from_sys_call # go to the SYSRET fastpath
16021
16022 CFI_ENDPROC
16023 -END(ret_from_fork)
16024 +ENDPROC(ret_from_fork)
16025
16026 /*
16027 * System call entry. Up to 6 arguments in registers are supported.
16028 @@ -456,7 +742,7 @@ END(ret_from_fork)
16029 ENTRY(system_call)
16030 CFI_STARTPROC simple
16031 CFI_SIGNAL_FRAME
16032 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16033 + CFI_DEF_CFA rsp,0
16034 CFI_REGISTER rip,rcx
16035 /*CFI_REGISTER rflags,r11*/
16036 SWAPGS_UNSAFE_STACK
16037 @@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
16038
16039 movq %rsp,PER_CPU_VAR(old_rsp)
16040 movq PER_CPU_VAR(kernel_stack),%rsp
16041 + SAVE_ARGS 8*6,0
16042 + pax_enter_kernel_user
16043 /*
16044 * No need to follow this irqs off/on section - it's straight
16045 * and short:
16046 */
16047 ENABLE_INTERRUPTS(CLBR_NONE)
16048 - SAVE_ARGS 8,0
16049 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16050 movq %rcx,RIP-ARGOFFSET(%rsp)
16051 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16052 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16053 + GET_THREAD_INFO(%rcx)
16054 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16055 jnz tracesys
16056 system_call_fastpath:
16057 #if __SYSCALL_MASK == ~0
16058 @@ -488,7 +776,7 @@ system_call_fastpath:
16059 cmpl $__NR_syscall_max,%eax
16060 #endif
16061 ja badsys
16062 - movq %r10,%rcx
16063 + movq R10-ARGOFFSET(%rsp),%rcx
16064 call *sys_call_table(,%rax,8) # XXX: rip relative
16065 movq %rax,RAX-ARGOFFSET(%rsp)
16066 /*
16067 @@ -502,10 +790,13 @@ sysret_check:
16068 LOCKDEP_SYS_EXIT
16069 DISABLE_INTERRUPTS(CLBR_NONE)
16070 TRACE_IRQS_OFF
16071 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16072 + GET_THREAD_INFO(%rcx)
16073 + movl TI_flags(%rcx),%edx
16074 andl %edi,%edx
16075 jnz sysret_careful
16076 CFI_REMEMBER_STATE
16077 + pax_exit_kernel_user
16078 + pax_erase_kstack
16079 /*
16080 * sysretq will re-enable interrupts:
16081 */
16082 @@ -557,14 +848,18 @@ badsys:
16083 * jump back to the normal fast path.
16084 */
16085 auditsys:
16086 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16087 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16088 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16089 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16090 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16091 movq %rax,%rsi /* 2nd arg: syscall number */
16092 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16093 call __audit_syscall_entry
16094 +
16095 + pax_erase_kstack
16096 +
16097 LOAD_ARGS 0 /* reload call-clobbered registers */
16098 + pax_set_fptr_mask
16099 jmp system_call_fastpath
16100
16101 /*
16102 @@ -585,7 +880,7 @@ sysret_audit:
16103 /* Do syscall tracing */
16104 tracesys:
16105 #ifdef CONFIG_AUDITSYSCALL
16106 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16107 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16108 jz auditsys
16109 #endif
16110 SAVE_REST
16111 @@ -593,12 +888,16 @@ tracesys:
16112 FIXUP_TOP_OF_STACK %rdi
16113 movq %rsp,%rdi
16114 call syscall_trace_enter
16115 +
16116 + pax_erase_kstack
16117 +
16118 /*
16119 * Reload arg registers from stack in case ptrace changed them.
16120 * We don't reload %rax because syscall_trace_enter() returned
16121 * the value it wants us to use in the table lookup.
16122 */
16123 LOAD_ARGS ARGOFFSET, 1
16124 + pax_set_fptr_mask
16125 RESTORE_REST
16126 #if __SYSCALL_MASK == ~0
16127 cmpq $__NR_syscall_max,%rax
16128 @@ -607,7 +906,7 @@ tracesys:
16129 cmpl $__NR_syscall_max,%eax
16130 #endif
16131 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16132 - movq %r10,%rcx /* fixup for C */
16133 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16134 call *sys_call_table(,%rax,8)
16135 movq %rax,RAX-ARGOFFSET(%rsp)
16136 /* Use IRET because user could have changed frame */
16137 @@ -628,6 +927,7 @@ GLOBAL(int_with_check)
16138 andl %edi,%edx
16139 jnz int_careful
16140 andl $~TS_COMPAT,TI_status(%rcx)
16141 + pax_erase_kstack
16142 jmp retint_swapgs
16143
16144 /* Either reschedule or signal or syscall exit tracking needed. */
16145 @@ -674,7 +974,7 @@ int_restore_rest:
16146 TRACE_IRQS_OFF
16147 jmp int_with_check
16148 CFI_ENDPROC
16149 -END(system_call)
16150 +ENDPROC(system_call)
16151
16152 /*
16153 * Certain special system calls that need to save a complete full stack frame.
16154 @@ -690,7 +990,7 @@ ENTRY(\label)
16155 call \func
16156 jmp ptregscall_common
16157 CFI_ENDPROC
16158 -END(\label)
16159 +ENDPROC(\label)
16160 .endm
16161
16162 PTREGSCALL stub_clone, sys_clone, %r8
16163 @@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
16164 movq_cfi_restore R12+8, r12
16165 movq_cfi_restore RBP+8, rbp
16166 movq_cfi_restore RBX+8, rbx
16167 + pax_force_retaddr
16168 ret $REST_SKIP /* pop extended registers */
16169 CFI_ENDPROC
16170 -END(ptregscall_common)
16171 +ENDPROC(ptregscall_common)
16172
16173 ENTRY(stub_execve)
16174 CFI_STARTPROC
16175 @@ -725,7 +1026,7 @@ ENTRY(stub_execve)
16176 RESTORE_REST
16177 jmp int_ret_from_sys_call
16178 CFI_ENDPROC
16179 -END(stub_execve)
16180 +ENDPROC(stub_execve)
16181
16182 /*
16183 * sigreturn is special because it needs to restore all registers on return.
16184 @@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16185 RESTORE_REST
16186 jmp int_ret_from_sys_call
16187 CFI_ENDPROC
16188 -END(stub_rt_sigreturn)
16189 +ENDPROC(stub_rt_sigreturn)
16190
16191 #ifdef CONFIG_X86_X32_ABI
16192 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16193 @@ -812,7 +1113,7 @@ vector=vector+1
16194 2: jmp common_interrupt
16195 .endr
16196 CFI_ENDPROC
16197 -END(irq_entries_start)
16198 +ENDPROC(irq_entries_start)
16199
16200 .previous
16201 END(interrupt)
16202 @@ -832,6 +1133,16 @@ END(interrupt)
16203 subq $ORIG_RAX-RBP, %rsp
16204 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16205 SAVE_ARGS_IRQ
16206 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16207 + testb $3, CS(%rdi)
16208 + jnz 1f
16209 + pax_enter_kernel
16210 + jmp 2f
16211 +1: pax_enter_kernel_user
16212 +2:
16213 +#else
16214 + pax_enter_kernel
16215 +#endif
16216 call \func
16217 .endm
16218
16219 @@ -863,7 +1174,7 @@ ret_from_intr:
16220
16221 exit_intr:
16222 GET_THREAD_INFO(%rcx)
16223 - testl $3,CS-ARGOFFSET(%rsp)
16224 + testb $3,CS-ARGOFFSET(%rsp)
16225 je retint_kernel
16226
16227 /* Interrupt came from user space */
16228 @@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space */
16229 * The iretq could re-enable interrupts:
16230 */
16231 DISABLE_INTERRUPTS(CLBR_ANY)
16232 + pax_exit_kernel_user
16233 TRACE_IRQS_IRETQ
16234 SWAPGS
16235 jmp restore_args
16236
16237 retint_restore_args: /* return to kernel space */
16238 DISABLE_INTERRUPTS(CLBR_ANY)
16239 + pax_exit_kernel
16240 + pax_force_retaddr RIP-ARGOFFSET
16241 /*
16242 * The iretq could re-enable interrupts:
16243 */
16244 @@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
16245 #endif
16246
16247 CFI_ENDPROC
16248 -END(common_interrupt)
16249 +ENDPROC(common_interrupt)
16250 /*
16251 * End of kprobes section
16252 */
16253 @@ -996,7 +1310,7 @@ ENTRY(\sym)
16254 interrupt \do_sym
16255 jmp ret_from_intr
16256 CFI_ENDPROC
16257 -END(\sym)
16258 +ENDPROC(\sym)
16259 .endm
16260
16261 #ifdef CONFIG_SMP
16262 @@ -1069,12 +1383,22 @@ ENTRY(\sym)
16263 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16264 call error_entry
16265 DEFAULT_FRAME 0
16266 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16267 + testb $3, CS(%rsp)
16268 + jnz 1f
16269 + pax_enter_kernel
16270 + jmp 2f
16271 +1: pax_enter_kernel_user
16272 +2:
16273 +#else
16274 + pax_enter_kernel
16275 +#endif
16276 movq %rsp,%rdi /* pt_regs pointer */
16277 xorl %esi,%esi /* no error code */
16278 call \do_sym
16279 jmp error_exit /* %ebx: no swapgs flag */
16280 CFI_ENDPROC
16281 -END(\sym)
16282 +ENDPROC(\sym)
16283 .endm
16284
16285 .macro paranoidzeroentry sym do_sym
16286 @@ -1086,15 +1410,25 @@ ENTRY(\sym)
16287 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16288 call save_paranoid
16289 TRACE_IRQS_OFF
16290 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16291 + testb $3, CS(%rsp)
16292 + jnz 1f
16293 + pax_enter_kernel
16294 + jmp 2f
16295 +1: pax_enter_kernel_user
16296 +2:
16297 +#else
16298 + pax_enter_kernel
16299 +#endif
16300 movq %rsp,%rdi /* pt_regs pointer */
16301 xorl %esi,%esi /* no error code */
16302 call \do_sym
16303 jmp paranoid_exit /* %ebx: no swapgs flag */
16304 CFI_ENDPROC
16305 -END(\sym)
16306 +ENDPROC(\sym)
16307 .endm
16308
16309 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16310 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16311 .macro paranoidzeroentry_ist sym do_sym ist
16312 ENTRY(\sym)
16313 INTR_FRAME
16314 @@ -1104,14 +1438,30 @@ ENTRY(\sym)
16315 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16316 call save_paranoid
16317 TRACE_IRQS_OFF
16318 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16319 + testb $3, CS(%rsp)
16320 + jnz 1f
16321 + pax_enter_kernel
16322 + jmp 2f
16323 +1: pax_enter_kernel_user
16324 +2:
16325 +#else
16326 + pax_enter_kernel
16327 +#endif
16328 movq %rsp,%rdi /* pt_regs pointer */
16329 xorl %esi,%esi /* no error code */
16330 +#ifdef CONFIG_SMP
16331 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16332 + lea init_tss(%r12), %r12
16333 +#else
16334 + lea init_tss(%rip), %r12
16335 +#endif
16336 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16337 call \do_sym
16338 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16339 jmp paranoid_exit /* %ebx: no swapgs flag */
16340 CFI_ENDPROC
16341 -END(\sym)
16342 +ENDPROC(\sym)
16343 .endm
16344
16345 .macro errorentry sym do_sym
16346 @@ -1122,13 +1472,23 @@ ENTRY(\sym)
16347 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16348 call error_entry
16349 DEFAULT_FRAME 0
16350 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16351 + testb $3, CS(%rsp)
16352 + jnz 1f
16353 + pax_enter_kernel
16354 + jmp 2f
16355 +1: pax_enter_kernel_user
16356 +2:
16357 +#else
16358 + pax_enter_kernel
16359 +#endif
16360 movq %rsp,%rdi /* pt_regs pointer */
16361 movq ORIG_RAX(%rsp),%rsi /* get error code */
16362 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16363 call \do_sym
16364 jmp error_exit /* %ebx: no swapgs flag */
16365 CFI_ENDPROC
16366 -END(\sym)
16367 +ENDPROC(\sym)
16368 .endm
16369
16370 /* error code is on the stack already */
16371 @@ -1141,13 +1501,23 @@ ENTRY(\sym)
16372 call save_paranoid
16373 DEFAULT_FRAME 0
16374 TRACE_IRQS_OFF
16375 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16376 + testb $3, CS(%rsp)
16377 + jnz 1f
16378 + pax_enter_kernel
16379 + jmp 2f
16380 +1: pax_enter_kernel_user
16381 +2:
16382 +#else
16383 + pax_enter_kernel
16384 +#endif
16385 movq %rsp,%rdi /* pt_regs pointer */
16386 movq ORIG_RAX(%rsp),%rsi /* get error code */
16387 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16388 call \do_sym
16389 jmp paranoid_exit /* %ebx: no swapgs flag */
16390 CFI_ENDPROC
16391 -END(\sym)
16392 +ENDPROC(\sym)
16393 .endm
16394
16395 zeroentry divide_error do_divide_error
16396 @@ -1177,9 +1547,10 @@ gs_change:
16397 2: mfence /* workaround */
16398 SWAPGS
16399 popfq_cfi
16400 + pax_force_retaddr
16401 ret
16402 CFI_ENDPROC
16403 -END(native_load_gs_index)
16404 +ENDPROC(native_load_gs_index)
16405
16406 .section __ex_table,"a"
16407 .align 8
16408 @@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
16409 * Here we are in the child and the registers are set as they were
16410 * at kernel_thread() invocation in the parent.
16411 */
16412 + pax_force_fptr %rsi
16413 call *%rsi
16414 # exit
16415 mov %eax, %edi
16416 call do_exit
16417 ud2 # padding for call trace
16418 CFI_ENDPROC
16419 -END(kernel_thread_helper)
16420 +ENDPROC(kernel_thread_helper)
16421
16422 /*
16423 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16424 @@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
16425 RESTORE_REST
16426 testq %rax,%rax
16427 je int_ret_from_sys_call
16428 - RESTORE_ARGS
16429 UNFAKE_STACK_FRAME
16430 + pax_force_retaddr
16431 ret
16432 CFI_ENDPROC
16433 -END(kernel_execve)
16434 +ENDPROC(kernel_execve)
16435
16436 /* Call softirq on interrupt stack. Interrupts are off. */
16437 ENTRY(call_softirq)
16438 @@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
16439 CFI_DEF_CFA_REGISTER rsp
16440 CFI_ADJUST_CFA_OFFSET -8
16441 decl PER_CPU_VAR(irq_count)
16442 + pax_force_retaddr
16443 ret
16444 CFI_ENDPROC
16445 -END(call_softirq)
16446 +ENDPROC(call_softirq)
16447
16448 #ifdef CONFIG_XEN
16449 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16450 @@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16451 decl PER_CPU_VAR(irq_count)
16452 jmp error_exit
16453 CFI_ENDPROC
16454 -END(xen_do_hypervisor_callback)
16455 +ENDPROC(xen_do_hypervisor_callback)
16456
16457 /*
16458 * Hypervisor uses this for application faults while it executes.
16459 @@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
16460 SAVE_ALL
16461 jmp error_exit
16462 CFI_ENDPROC
16463 -END(xen_failsafe_callback)
16464 +ENDPROC(xen_failsafe_callback)
16465
16466 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16467 xen_hvm_callback_vector xen_evtchn_do_upcall
16468 @@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
16469 TRACE_IRQS_OFF
16470 testl %ebx,%ebx /* swapgs needed? */
16471 jnz paranoid_restore
16472 - testl $3,CS(%rsp)
16473 + testb $3,CS(%rsp)
16474 jnz paranoid_userspace
16475 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16476 + pax_exit_kernel
16477 + TRACE_IRQS_IRETQ 0
16478 + SWAPGS_UNSAFE_STACK
16479 + RESTORE_ALL 8
16480 + pax_force_retaddr_bts
16481 + jmp irq_return
16482 +#endif
16483 paranoid_swapgs:
16484 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16485 + pax_exit_kernel_user
16486 +#else
16487 + pax_exit_kernel
16488 +#endif
16489 TRACE_IRQS_IRETQ 0
16490 SWAPGS_UNSAFE_STACK
16491 RESTORE_ALL 8
16492 jmp irq_return
16493 paranoid_restore:
16494 + pax_exit_kernel
16495 TRACE_IRQS_IRETQ 0
16496 RESTORE_ALL 8
16497 + pax_force_retaddr_bts
16498 jmp irq_return
16499 paranoid_userspace:
16500 GET_THREAD_INFO(%rcx)
16501 @@ -1442,7 +1830,7 @@ paranoid_schedule:
16502 TRACE_IRQS_OFF
16503 jmp paranoid_userspace
16504 CFI_ENDPROC
16505 -END(paranoid_exit)
16506 +ENDPROC(paranoid_exit)
16507
16508 /*
16509 * Exception entry point. This expects an error code/orig_rax on the stack.
16510 @@ -1469,12 +1857,13 @@ ENTRY(error_entry)
16511 movq_cfi r14, R14+8
16512 movq_cfi r15, R15+8
16513 xorl %ebx,%ebx
16514 - testl $3,CS+8(%rsp)
16515 + testb $3,CS+8(%rsp)
16516 je error_kernelspace
16517 error_swapgs:
16518 SWAPGS
16519 error_sti:
16520 TRACE_IRQS_OFF
16521 + pax_force_retaddr_bts
16522 ret
16523
16524 /*
16525 @@ -1501,7 +1890,7 @@ bstep_iret:
16526 movq %rcx,RIP+8(%rsp)
16527 jmp error_swapgs
16528 CFI_ENDPROC
16529 -END(error_entry)
16530 +ENDPROC(error_entry)
16531
16532
16533 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16534 @@ -1521,7 +1910,7 @@ ENTRY(error_exit)
16535 jnz retint_careful
16536 jmp retint_swapgs
16537 CFI_ENDPROC
16538 -END(error_exit)
16539 +ENDPROC(error_exit)
16540
16541 /*
16542 * Test if a given stack is an NMI stack or not.
16543 @@ -1579,9 +1968,11 @@ ENTRY(nmi)
16544 * If %cs was not the kernel segment, then the NMI triggered in user
16545 * space, which means it is definitely not nested.
16546 */
16547 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16548 + je 1f
16549 cmpl $__KERNEL_CS, 16(%rsp)
16550 jne first_nmi
16551 -
16552 +1:
16553 /*
16554 * Check the special variable on the stack to see if NMIs are
16555 * executing.
16556 @@ -1728,6 +2119,16 @@ end_repeat_nmi:
16557 */
16558 call save_paranoid
16559 DEFAULT_FRAME 0
16560 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16561 + testb $3, CS(%rsp)
16562 + jnz 1f
16563 + pax_enter_kernel
16564 + jmp 2f
16565 +1: pax_enter_kernel_user
16566 +2:
16567 +#else
16568 + pax_enter_kernel
16569 +#endif
16570 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16571 movq %rsp,%rdi
16572 movq $-1,%rsi
16573 @@ -1735,21 +2136,32 @@ end_repeat_nmi:
16574 testl %ebx,%ebx /* swapgs needed? */
16575 jnz nmi_restore
16576 nmi_swapgs:
16577 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16578 + pax_exit_kernel_user
16579 +#else
16580 + pax_exit_kernel
16581 +#endif
16582 SWAPGS_UNSAFE_STACK
16583 + RESTORE_ALL 8
16584 + /* Clear the NMI executing stack variable */
16585 + movq $0, 10*8(%rsp)
16586 + jmp irq_return
16587 nmi_restore:
16588 + pax_exit_kernel
16589 RESTORE_ALL 8
16590 + pax_force_retaddr_bts
16591 /* Clear the NMI executing stack variable */
16592 movq $0, 10*8(%rsp)
16593 jmp irq_return
16594 CFI_ENDPROC
16595 -END(nmi)
16596 +ENDPROC(nmi)
16597
16598 ENTRY(ignore_sysret)
16599 CFI_STARTPROC
16600 mov $-ENOSYS,%eax
16601 sysret
16602 CFI_ENDPROC
16603 -END(ignore_sysret)
16604 +ENDPROC(ignore_sysret)
16605
16606 /*
16607 * End of kprobes section
16608 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16609 index c9a281f..ce2f317 100644
16610 --- a/arch/x86/kernel/ftrace.c
16611 +++ b/arch/x86/kernel/ftrace.c
16612 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16613 static const void *mod_code_newcode; /* holds the text to write to the IP */
16614
16615 static unsigned nmi_wait_count;
16616 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16617 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16618
16619 int ftrace_arch_read_dyn_info(char *buf, int size)
16620 {
16621 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16622
16623 r = snprintf(buf, size, "%u %u",
16624 nmi_wait_count,
16625 - atomic_read(&nmi_update_count));
16626 + atomic_read_unchecked(&nmi_update_count));
16627 return r;
16628 }
16629
16630 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16631
16632 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16633 smp_rmb();
16634 + pax_open_kernel();
16635 ftrace_mod_code();
16636 - atomic_inc(&nmi_update_count);
16637 + pax_close_kernel();
16638 + atomic_inc_unchecked(&nmi_update_count);
16639 }
16640 /* Must have previous changes seen before executions */
16641 smp_mb();
16642 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16643 {
16644 unsigned char replaced[MCOUNT_INSN_SIZE];
16645
16646 + ip = ktla_ktva(ip);
16647 +
16648 /*
16649 * Note: Due to modules and __init, code can
16650 * disappear and change, we need to protect against faulting
16651 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16652 unsigned char old[MCOUNT_INSN_SIZE], *new;
16653 int ret;
16654
16655 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16656 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16657 new = ftrace_call_replace(ip, (unsigned long)func);
16658 ret = ftrace_modify_code(ip, old, new);
16659
16660 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16661 {
16662 unsigned char code[MCOUNT_INSN_SIZE];
16663
16664 + ip = ktla_ktva(ip);
16665 +
16666 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16667 return -EFAULT;
16668
16669 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16670 index 51ff186..9e77418 100644
16671 --- a/arch/x86/kernel/head32.c
16672 +++ b/arch/x86/kernel/head32.c
16673 @@ -19,6 +19,7 @@
16674 #include <asm/io_apic.h>
16675 #include <asm/bios_ebda.h>
16676 #include <asm/tlbflush.h>
16677 +#include <asm/boot.h>
16678
16679 static void __init i386_default_early_setup(void)
16680 {
16681 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16682
16683 void __init i386_start_kernel(void)
16684 {
16685 - memblock_reserve(__pa_symbol(&_text),
16686 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16687 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16688
16689 #ifdef CONFIG_BLK_DEV_INITRD
16690 /* Reserve INITRD */
16691 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16692 index ce0be7c..c41476e 100644
16693 --- a/arch/x86/kernel/head_32.S
16694 +++ b/arch/x86/kernel/head_32.S
16695 @@ -25,6 +25,12 @@
16696 /* Physical address */
16697 #define pa(X) ((X) - __PAGE_OFFSET)
16698
16699 +#ifdef CONFIG_PAX_KERNEXEC
16700 +#define ta(X) (X)
16701 +#else
16702 +#define ta(X) ((X) - __PAGE_OFFSET)
16703 +#endif
16704 +
16705 /*
16706 * References to members of the new_cpu_data structure.
16707 */
16708 @@ -54,11 +60,7 @@
16709 * and small than max_low_pfn, otherwise will waste some page table entries
16710 */
16711
16712 -#if PTRS_PER_PMD > 1
16713 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16714 -#else
16715 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16716 -#endif
16717 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16718
16719 /* Number of possible pages in the lowmem region */
16720 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16721 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16722 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16723
16724 /*
16725 + * Real beginning of normal "text" segment
16726 + */
16727 +ENTRY(stext)
16728 +ENTRY(_stext)
16729 +
16730 +/*
16731 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16732 * %esi points to the real-mode code as a 32-bit pointer.
16733 * CS and DS must be 4 GB flat segments, but we don't depend on
16734 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16735 * can.
16736 */
16737 __HEAD
16738 +
16739 +#ifdef CONFIG_PAX_KERNEXEC
16740 + jmp startup_32
16741 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16742 +.fill PAGE_SIZE-5,1,0xcc
16743 +#endif
16744 +
16745 ENTRY(startup_32)
16746 movl pa(stack_start),%ecx
16747
16748 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16749 2:
16750 leal -__PAGE_OFFSET(%ecx),%esp
16751
16752 +#ifdef CONFIG_SMP
16753 + movl $pa(cpu_gdt_table),%edi
16754 + movl $__per_cpu_load,%eax
16755 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16756 + rorl $16,%eax
16757 + movb %al,__KERNEL_PERCPU + 4(%edi)
16758 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16759 + movl $__per_cpu_end - 1,%eax
16760 + subl $__per_cpu_start,%eax
16761 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16762 +#endif
16763 +
16764 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16765 + movl $NR_CPUS,%ecx
16766 + movl $pa(cpu_gdt_table),%edi
16767 +1:
16768 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16769 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16770 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16771 + addl $PAGE_SIZE_asm,%edi
16772 + loop 1b
16773 +#endif
16774 +
16775 +#ifdef CONFIG_PAX_KERNEXEC
16776 + movl $pa(boot_gdt),%edi
16777 + movl $__LOAD_PHYSICAL_ADDR,%eax
16778 + movw %ax,__BOOT_CS + 2(%edi)
16779 + rorl $16,%eax
16780 + movb %al,__BOOT_CS + 4(%edi)
16781 + movb %ah,__BOOT_CS + 7(%edi)
16782 + rorl $16,%eax
16783 +
16784 + ljmp $(__BOOT_CS),$1f
16785 +1:
16786 +
16787 + movl $NR_CPUS,%ecx
16788 + movl $pa(cpu_gdt_table),%edi
16789 + addl $__PAGE_OFFSET,%eax
16790 +1:
16791 + movw %ax,__KERNEL_CS + 2(%edi)
16792 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16793 + rorl $16,%eax
16794 + movb %al,__KERNEL_CS + 4(%edi)
16795 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16796 + movb %ah,__KERNEL_CS + 7(%edi)
16797 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16798 + rorl $16,%eax
16799 + addl $PAGE_SIZE_asm,%edi
16800 + loop 1b
16801 +#endif
16802 +
16803 /*
16804 * Clear BSS first so that there are no surprises...
16805 */
16806 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16807 movl %eax, pa(max_pfn_mapped)
16808
16809 /* Do early initialization of the fixmap area */
16810 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16811 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16812 +#ifdef CONFIG_COMPAT_VDSO
16813 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16814 +#else
16815 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16816 +#endif
16817 #else /* Not PAE */
16818
16819 page_pde_offset = (__PAGE_OFFSET >> 20);
16820 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16821 movl %eax, pa(max_pfn_mapped)
16822
16823 /* Do early initialization of the fixmap area */
16824 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16825 - movl %eax,pa(initial_page_table+0xffc)
16826 +#ifdef CONFIG_COMPAT_VDSO
16827 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16828 +#else
16829 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16830 +#endif
16831 #endif
16832
16833 #ifdef CONFIG_PARAVIRT
16834 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16835 cmpl $num_subarch_entries, %eax
16836 jae bad_subarch
16837
16838 - movl pa(subarch_entries)(,%eax,4), %eax
16839 - subl $__PAGE_OFFSET, %eax
16840 - jmp *%eax
16841 + jmp *pa(subarch_entries)(,%eax,4)
16842
16843 bad_subarch:
16844 WEAK(lguest_entry)
16845 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16846 __INITDATA
16847
16848 subarch_entries:
16849 - .long default_entry /* normal x86/PC */
16850 - .long lguest_entry /* lguest hypervisor */
16851 - .long xen_entry /* Xen hypervisor */
16852 - .long default_entry /* Moorestown MID */
16853 + .long ta(default_entry) /* normal x86/PC */
16854 + .long ta(lguest_entry) /* lguest hypervisor */
16855 + .long ta(xen_entry) /* Xen hypervisor */
16856 + .long ta(default_entry) /* Moorestown MID */
16857 num_subarch_entries = (. - subarch_entries) / 4
16858 .previous
16859 #else
16860 @@ -312,6 +382,7 @@ default_entry:
16861 orl %edx,%eax
16862 movl %eax,%cr4
16863
16864 +#ifdef CONFIG_X86_PAE
16865 testb $X86_CR4_PAE, %al # check if PAE is enabled
16866 jz 6f
16867
16868 @@ -340,6 +411,9 @@ default_entry:
16869 /* Make changes effective */
16870 wrmsr
16871
16872 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16873 +#endif
16874 +
16875 6:
16876
16877 /*
16878 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16879 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16880 movl %eax,%ss # after changing gdt.
16881
16882 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16883 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16884 movl %eax,%ds
16885 movl %eax,%es
16886
16887 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16888 */
16889 cmpb $0,ready
16890 jne 1f
16891 - movl $gdt_page,%eax
16892 + movl $cpu_gdt_table,%eax
16893 movl $stack_canary,%ecx
16894 +#ifdef CONFIG_SMP
16895 + addl $__per_cpu_load,%ecx
16896 +#endif
16897 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16898 shrl $16, %ecx
16899 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16900 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16901 1:
16902 -#endif
16903 movl $(__KERNEL_STACK_CANARY),%eax
16904 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16905 + movl $(__USER_DS),%eax
16906 +#else
16907 + xorl %eax,%eax
16908 +#endif
16909 movl %eax,%gs
16910
16911 xorl %eax,%eax # Clear LDT
16912 @@ -558,22 +639,22 @@ early_page_fault:
16913 jmp early_fault
16914
16915 early_fault:
16916 - cld
16917 #ifdef CONFIG_PRINTK
16918 + cmpl $1,%ss:early_recursion_flag
16919 + je hlt_loop
16920 + incl %ss:early_recursion_flag
16921 + cld
16922 pusha
16923 movl $(__KERNEL_DS),%eax
16924 movl %eax,%ds
16925 movl %eax,%es
16926 - cmpl $2,early_recursion_flag
16927 - je hlt_loop
16928 - incl early_recursion_flag
16929 movl %cr2,%eax
16930 pushl %eax
16931 pushl %edx /* trapno */
16932 pushl $fault_msg
16933 call printk
16934 +; call dump_stack
16935 #endif
16936 - call dump_stack
16937 hlt_loop:
16938 hlt
16939 jmp hlt_loop
16940 @@ -581,8 +662,11 @@ hlt_loop:
16941 /* This is the default interrupt "handler" :-) */
16942 ALIGN
16943 ignore_int:
16944 - cld
16945 #ifdef CONFIG_PRINTK
16946 + cmpl $2,%ss:early_recursion_flag
16947 + je hlt_loop
16948 + incl %ss:early_recursion_flag
16949 + cld
16950 pushl %eax
16951 pushl %ecx
16952 pushl %edx
16953 @@ -591,9 +675,6 @@ ignore_int:
16954 movl $(__KERNEL_DS),%eax
16955 movl %eax,%ds
16956 movl %eax,%es
16957 - cmpl $2,early_recursion_flag
16958 - je hlt_loop
16959 - incl early_recursion_flag
16960 pushl 16(%esp)
16961 pushl 24(%esp)
16962 pushl 32(%esp)
16963 @@ -622,29 +703,43 @@ ENTRY(initial_code)
16964 /*
16965 * BSS section
16966 */
16967 -__PAGE_ALIGNED_BSS
16968 - .align PAGE_SIZE
16969 #ifdef CONFIG_X86_PAE
16970 +.section .initial_pg_pmd,"a",@progbits
16971 initial_pg_pmd:
16972 .fill 1024*KPMDS,4,0
16973 #else
16974 +.section .initial_page_table,"a",@progbits
16975 ENTRY(initial_page_table)
16976 .fill 1024,4,0
16977 #endif
16978 +.section .initial_pg_fixmap,"a",@progbits
16979 initial_pg_fixmap:
16980 .fill 1024,4,0
16981 +.section .empty_zero_page,"a",@progbits
16982 ENTRY(empty_zero_page)
16983 .fill 4096,1,0
16984 +.section .swapper_pg_dir,"a",@progbits
16985 ENTRY(swapper_pg_dir)
16986 +#ifdef CONFIG_X86_PAE
16987 + .fill 4,8,0
16988 +#else
16989 .fill 1024,4,0
16990 +#endif
16991 +
16992 +/*
16993 + * The IDT has to be page-aligned to simplify the Pentium
16994 + * F0 0F bug workaround.. We have a special link segment
16995 + * for this.
16996 + */
16997 +.section .idt,"a",@progbits
16998 +ENTRY(idt_table)
16999 + .fill 256,8,0
17000
17001 /*
17002 * This starts the data section.
17003 */
17004 #ifdef CONFIG_X86_PAE
17005 -__PAGE_ALIGNED_DATA
17006 - /* Page-aligned for the benefit of paravirt? */
17007 - .align PAGE_SIZE
17008 +.section .initial_page_table,"a",@progbits
17009 ENTRY(initial_page_table)
17010 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17011 # if KPMDS == 3
17012 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17013 # error "Kernel PMDs should be 1, 2 or 3"
17014 # endif
17015 .align PAGE_SIZE /* needs to be page-sized too */
17016 +
17017 +#ifdef CONFIG_PAX_PER_CPU_PGD
17018 +ENTRY(cpu_pgd)
17019 + .rept NR_CPUS
17020 + .fill 4,8,0
17021 + .endr
17022 +#endif
17023 +
17024 #endif
17025
17026 .data
17027 .balign 4
17028 ENTRY(stack_start)
17029 - .long init_thread_union+THREAD_SIZE
17030 + .long init_thread_union+THREAD_SIZE-8
17031
17032 +ready: .byte 0
17033 +
17034 +.section .rodata,"a",@progbits
17035 early_recursion_flag:
17036 .long 0
17037
17038 -ready: .byte 0
17039 -
17040 int_msg:
17041 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17042
17043 @@ -707,7 +811,7 @@ fault_msg:
17044 .word 0 # 32 bit align gdt_desc.address
17045 boot_gdt_descr:
17046 .word __BOOT_DS+7
17047 - .long boot_gdt - __PAGE_OFFSET
17048 + .long pa(boot_gdt)
17049
17050 .word 0 # 32-bit align idt_desc.address
17051 idt_descr:
17052 @@ -718,7 +822,7 @@ idt_descr:
17053 .word 0 # 32 bit align gdt_desc.address
17054 ENTRY(early_gdt_descr)
17055 .word GDT_ENTRIES*8-1
17056 - .long gdt_page /* Overwritten for secondary CPUs */
17057 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17058
17059 /*
17060 * The boot_gdt must mirror the equivalent in setup.S and is
17061 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17062 .align L1_CACHE_BYTES
17063 ENTRY(boot_gdt)
17064 .fill GDT_ENTRY_BOOT_CS,8,0
17065 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17066 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17067 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17068 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17069 +
17070 + .align PAGE_SIZE_asm
17071 +ENTRY(cpu_gdt_table)
17072 + .rept NR_CPUS
17073 + .quad 0x0000000000000000 /* NULL descriptor */
17074 + .quad 0x0000000000000000 /* 0x0b reserved */
17075 + .quad 0x0000000000000000 /* 0x13 reserved */
17076 + .quad 0x0000000000000000 /* 0x1b reserved */
17077 +
17078 +#ifdef CONFIG_PAX_KERNEXEC
17079 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17080 +#else
17081 + .quad 0x0000000000000000 /* 0x20 unused */
17082 +#endif
17083 +
17084 + .quad 0x0000000000000000 /* 0x28 unused */
17085 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17086 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17087 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17088 + .quad 0x0000000000000000 /* 0x4b reserved */
17089 + .quad 0x0000000000000000 /* 0x53 reserved */
17090 + .quad 0x0000000000000000 /* 0x5b reserved */
17091 +
17092 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17093 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17094 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17095 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17096 +
17097 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17098 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17099 +
17100 + /*
17101 + * Segments used for calling PnP BIOS have byte granularity.
17102 + * The code segments and data segments have fixed 64k limits,
17103 + * the transfer segment sizes are set at run time.
17104 + */
17105 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17106 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17107 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17108 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17109 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17110 +
17111 + /*
17112 + * The APM segments have byte granularity and their bases
17113 + * are set at run time. All have 64k limits.
17114 + */
17115 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17116 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17117 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17118 +
17119 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17120 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17121 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17122 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17123 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17124 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17125 +
17126 + /* Be sure this is zeroed to avoid false validations in Xen */
17127 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17128 + .endr
17129 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17130 index 40f4eb3..6d24d9d 100644
17131 --- a/arch/x86/kernel/head_64.S
17132 +++ b/arch/x86/kernel/head_64.S
17133 @@ -19,6 +19,8 @@
17134 #include <asm/cache.h>
17135 #include <asm/processor-flags.h>
17136 #include <asm/percpu.h>
17137 +#include <asm/cpufeature.h>
17138 +#include <asm/alternative-asm.h>
17139
17140 #ifdef CONFIG_PARAVIRT
17141 #include <asm/asm-offsets.h>
17142 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17143 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17144 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17145 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17146 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17147 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17148 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17149 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17150 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17151 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17152
17153 .text
17154 __HEAD
17155 @@ -85,35 +93,23 @@ startup_64:
17156 */
17157 addq %rbp, init_level4_pgt + 0(%rip)
17158 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17159 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17160 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17161 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17162 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17163
17164 addq %rbp, level3_ident_pgt + 0(%rip)
17165 +#ifndef CONFIG_XEN
17166 + addq %rbp, level3_ident_pgt + 8(%rip)
17167 +#endif
17168
17169 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17170 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17171 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17172 +
17173 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17174 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17175
17176 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17177 -
17178 - /* Add an Identity mapping if I am above 1G */
17179 - leaq _text(%rip), %rdi
17180 - andq $PMD_PAGE_MASK, %rdi
17181 -
17182 - movq %rdi, %rax
17183 - shrq $PUD_SHIFT, %rax
17184 - andq $(PTRS_PER_PUD - 1), %rax
17185 - jz ident_complete
17186 -
17187 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17188 - leaq level3_ident_pgt(%rip), %rbx
17189 - movq %rdx, 0(%rbx, %rax, 8)
17190 -
17191 - movq %rdi, %rax
17192 - shrq $PMD_SHIFT, %rax
17193 - andq $(PTRS_PER_PMD - 1), %rax
17194 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17195 - leaq level2_spare_pgt(%rip), %rbx
17196 - movq %rdx, 0(%rbx, %rax, 8)
17197 -ident_complete:
17198 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17199
17200 /*
17201 * Fixup the kernel text+data virtual addresses. Note that
17202 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17203 * after the boot processor executes this code.
17204 */
17205
17206 - /* Enable PAE mode and PGE */
17207 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17208 + /* Enable PAE mode and PSE/PGE */
17209 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17210 movq %rax, %cr4
17211
17212 /* Setup early boot stage 4 level pagetables. */
17213 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17214 movl $MSR_EFER, %ecx
17215 rdmsr
17216 btsl $_EFER_SCE, %eax /* Enable System Call */
17217 - btl $20,%edi /* No Execute supported? */
17218 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17219 jnc 1f
17220 btsl $_EFER_NX, %eax
17221 + leaq init_level4_pgt(%rip), %rdi
17222 +#ifndef CONFIG_EFI
17223 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17224 +#endif
17225 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17226 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17227 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17228 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17229 1: wrmsr /* Make changes effective */
17230
17231 /* Setup cr0 */
17232 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17233 * jump. In addition we need to ensure %cs is set so we make this
17234 * a far return.
17235 */
17236 + pax_set_fptr_mask
17237 movq initial_code(%rip),%rax
17238 pushq $0 # fake return address to stop unwinder
17239 pushq $__KERNEL_CS # set correct cs
17240 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17241 bad_address:
17242 jmp bad_address
17243
17244 - .section ".init.text","ax"
17245 + __INIT
17246 #ifdef CONFIG_EARLY_PRINTK
17247 .globl early_idt_handlers
17248 early_idt_handlers:
17249 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17250 #endif /* EARLY_PRINTK */
17251 1: hlt
17252 jmp 1b
17253 + .previous
17254
17255 #ifdef CONFIG_EARLY_PRINTK
17256 + __INITDATA
17257 early_recursion_flag:
17258 .long 0
17259 + .previous
17260
17261 + .section .rodata,"a",@progbits
17262 early_idt_msg:
17263 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17264 early_idt_ripmsg:
17265 .asciz "RIP %s\n"
17266 + .previous
17267 #endif /* CONFIG_EARLY_PRINTK */
17268 - .previous
17269
17270 + .section .rodata,"a",@progbits
17271 #define NEXT_PAGE(name) \
17272 .balign PAGE_SIZE; \
17273 ENTRY(name)
17274 @@ -338,7 +348,6 @@ ENTRY(name)
17275 i = i + 1 ; \
17276 .endr
17277
17278 - .data
17279 /*
17280 * This default setting generates an ident mapping at address 0x100000
17281 * and a mapping for the kernel that precisely maps virtual address
17282 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17283 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17284 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17285 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17286 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17287 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17288 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17289 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17290 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17291 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17292 .org init_level4_pgt + L4_START_KERNEL*8, 0
17293 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17294 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17295
17296 +#ifdef CONFIG_PAX_PER_CPU_PGD
17297 +NEXT_PAGE(cpu_pgd)
17298 + .rept NR_CPUS
17299 + .fill 512,8,0
17300 + .endr
17301 +#endif
17302 +
17303 NEXT_PAGE(level3_ident_pgt)
17304 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17305 +#ifdef CONFIG_XEN
17306 .fill 511,8,0
17307 +#else
17308 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17309 + .fill 510,8,0
17310 +#endif
17311 +
17312 +NEXT_PAGE(level3_vmalloc_start_pgt)
17313 + .fill 512,8,0
17314 +
17315 +NEXT_PAGE(level3_vmalloc_end_pgt)
17316 + .fill 512,8,0
17317 +
17318 +NEXT_PAGE(level3_vmemmap_pgt)
17319 + .fill L3_VMEMMAP_START,8,0
17320 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17321
17322 NEXT_PAGE(level3_kernel_pgt)
17323 .fill L3_START_KERNEL,8,0
17324 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17325 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17326 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17327
17328 +NEXT_PAGE(level2_vmemmap_pgt)
17329 + .fill 512,8,0
17330 +
17331 NEXT_PAGE(level2_fixmap_pgt)
17332 - .fill 506,8,0
17333 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17334 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17335 - .fill 5,8,0
17336 + .fill 507,8,0
17337 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17338 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17339 + .fill 4,8,0
17340
17341 -NEXT_PAGE(level1_fixmap_pgt)
17342 +NEXT_PAGE(level1_vsyscall_pgt)
17343 .fill 512,8,0
17344
17345 -NEXT_PAGE(level2_ident_pgt)
17346 - /* Since I easily can, map the first 1G.
17347 + /* Since I easily can, map the first 2G.
17348 * Don't set NX because code runs from these pages.
17349 */
17350 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17351 +NEXT_PAGE(level2_ident_pgt)
17352 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17353
17354 NEXT_PAGE(level2_kernel_pgt)
17355 /*
17356 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17357 * If you want to increase this then increase MODULES_VADDR
17358 * too.)
17359 */
17360 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17361 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17362 -
17363 -NEXT_PAGE(level2_spare_pgt)
17364 - .fill 512, 8, 0
17365 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17366
17367 #undef PMDS
17368 #undef NEXT_PAGE
17369
17370 - .data
17371 + .align PAGE_SIZE
17372 +ENTRY(cpu_gdt_table)
17373 + .rept NR_CPUS
17374 + .quad 0x0000000000000000 /* NULL descriptor */
17375 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17376 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17377 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17378 + .quad 0x00cffb000000ffff /* __USER32_CS */
17379 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17380 + .quad 0x00affb000000ffff /* __USER_CS */
17381 +
17382 +#ifdef CONFIG_PAX_KERNEXEC
17383 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17384 +#else
17385 + .quad 0x0 /* unused */
17386 +#endif
17387 +
17388 + .quad 0,0 /* TSS */
17389 + .quad 0,0 /* LDT */
17390 + .quad 0,0,0 /* three TLS descriptors */
17391 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17392 + /* asm/segment.h:GDT_ENTRIES must match this */
17393 +
17394 + /* zero the remaining page */
17395 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17396 + .endr
17397 +
17398 .align 16
17399 .globl early_gdt_descr
17400 early_gdt_descr:
17401 .word GDT_ENTRIES*8-1
17402 early_gdt_descr_base:
17403 - .quad INIT_PER_CPU_VAR(gdt_page)
17404 + .quad cpu_gdt_table
17405
17406 ENTRY(phys_base)
17407 /* This must match the first entry in level2_kernel_pgt */
17408 .quad 0x0000000000000000
17409
17410 #include "../../x86/xen/xen-head.S"
17411 -
17412 - .section .bss, "aw", @nobits
17413 +
17414 + .section .rodata,"a",@progbits
17415 .align L1_CACHE_BYTES
17416 ENTRY(idt_table)
17417 - .skip IDT_ENTRIES * 16
17418 + .fill 512,8,0
17419
17420 .align L1_CACHE_BYTES
17421 ENTRY(nmi_idt_table)
17422 - .skip IDT_ENTRIES * 16
17423 + .fill 512,8,0
17424
17425 __PAGE_ALIGNED_BSS
17426 .align PAGE_SIZE
17427 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17428 index 9c3bd4a..e1d9b35 100644
17429 --- a/arch/x86/kernel/i386_ksyms_32.c
17430 +++ b/arch/x86/kernel/i386_ksyms_32.c
17431 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17432 EXPORT_SYMBOL(cmpxchg8b_emu);
17433 #endif
17434
17435 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17436 +
17437 /* Networking helper routines. */
17438 EXPORT_SYMBOL(csum_partial_copy_generic);
17439 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17440 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17441
17442 EXPORT_SYMBOL(__get_user_1);
17443 EXPORT_SYMBOL(__get_user_2);
17444 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17445
17446 EXPORT_SYMBOL(csum_partial);
17447 EXPORT_SYMBOL(empty_zero_page);
17448 +
17449 +#ifdef CONFIG_PAX_KERNEXEC
17450 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17451 +#endif
17452 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17453 index 2d6e649..df6e1af 100644
17454 --- a/arch/x86/kernel/i387.c
17455 +++ b/arch/x86/kernel/i387.c
17456 @@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17457 static inline bool interrupted_user_mode(void)
17458 {
17459 struct pt_regs *regs = get_irq_regs();
17460 - return regs && user_mode_vm(regs);
17461 + return regs && user_mode(regs);
17462 }
17463
17464 /*
17465 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17466 index 36d1853..bf25736 100644
17467 --- a/arch/x86/kernel/i8259.c
17468 +++ b/arch/x86/kernel/i8259.c
17469 @@ -209,7 +209,7 @@ spurious_8259A_irq:
17470 "spurious 8259A interrupt: IRQ%d.\n", irq);
17471 spurious_irq_mask |= irqmask;
17472 }
17473 - atomic_inc(&irq_err_count);
17474 + atomic_inc_unchecked(&irq_err_count);
17475 /*
17476 * Theoretically we do not have to handle this IRQ,
17477 * but in Linux this does not cause problems and is
17478 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17479 index 43e9ccf..44ccf6f 100644
17480 --- a/arch/x86/kernel/init_task.c
17481 +++ b/arch/x86/kernel/init_task.c
17482 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17483 * way process stacks are handled. This is done by having a special
17484 * "init_task" linker map entry..
17485 */
17486 -union thread_union init_thread_union __init_task_data =
17487 - { INIT_THREAD_INFO(init_task) };
17488 +union thread_union init_thread_union __init_task_data;
17489
17490 /*
17491 * Initial task structure.
17492 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17493 * section. Since TSS's are completely CPU-local, we want them
17494 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17495 */
17496 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17497 -
17498 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17499 +EXPORT_SYMBOL(init_tss);
17500 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17501 index 8c96897..be66bfa 100644
17502 --- a/arch/x86/kernel/ioport.c
17503 +++ b/arch/x86/kernel/ioport.c
17504 @@ -6,6 +6,7 @@
17505 #include <linux/sched.h>
17506 #include <linux/kernel.h>
17507 #include <linux/capability.h>
17508 +#include <linux/security.h>
17509 #include <linux/errno.h>
17510 #include <linux/types.h>
17511 #include <linux/ioport.h>
17512 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17513
17514 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17515 return -EINVAL;
17516 +#ifdef CONFIG_GRKERNSEC_IO
17517 + if (turn_on && grsec_disable_privio) {
17518 + gr_handle_ioperm();
17519 + return -EPERM;
17520 + }
17521 +#endif
17522 if (turn_on && !capable(CAP_SYS_RAWIO))
17523 return -EPERM;
17524
17525 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17526 * because the ->io_bitmap_max value must match the bitmap
17527 * contents:
17528 */
17529 - tss = &per_cpu(init_tss, get_cpu());
17530 + tss = init_tss + get_cpu();
17531
17532 if (turn_on)
17533 bitmap_clear(t->io_bitmap_ptr, from, num);
17534 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17535 return -EINVAL;
17536 /* Trying to gain more privileges? */
17537 if (level > old) {
17538 +#ifdef CONFIG_GRKERNSEC_IO
17539 + if (grsec_disable_privio) {
17540 + gr_handle_iopl();
17541 + return -EPERM;
17542 + }
17543 +#endif
17544 if (!capable(CAP_SYS_RAWIO))
17545 return -EPERM;
17546 }
17547 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17548 index 3dafc60..aa8e9c4 100644
17549 --- a/arch/x86/kernel/irq.c
17550 +++ b/arch/x86/kernel/irq.c
17551 @@ -18,7 +18,7 @@
17552 #include <asm/mce.h>
17553 #include <asm/hw_irq.h>
17554
17555 -atomic_t irq_err_count;
17556 +atomic_unchecked_t irq_err_count;
17557
17558 /* Function pointer for generic interrupt vector handling */
17559 void (*x86_platform_ipi_callback)(void) = NULL;
17560 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17561 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17562 seq_printf(p, " Machine check polls\n");
17563 #endif
17564 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17565 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17566 #if defined(CONFIG_X86_IO_APIC)
17567 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17568 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17569 #endif
17570 return 0;
17571 }
17572 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17573
17574 u64 arch_irq_stat(void)
17575 {
17576 - u64 sum = atomic_read(&irq_err_count);
17577 + u64 sum = atomic_read_unchecked(&irq_err_count);
17578
17579 #ifdef CONFIG_X86_IO_APIC
17580 - sum += atomic_read(&irq_mis_count);
17581 + sum += atomic_read_unchecked(&irq_mis_count);
17582 #endif
17583 return sum;
17584 }
17585 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17586 index 58b7f27..e112d08 100644
17587 --- a/arch/x86/kernel/irq_32.c
17588 +++ b/arch/x86/kernel/irq_32.c
17589 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17590 __asm__ __volatile__("andl %%esp,%0" :
17591 "=r" (sp) : "0" (THREAD_SIZE - 1));
17592
17593 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17594 + return sp < STACK_WARN;
17595 }
17596
17597 static void print_stack_overflow(void)
17598 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17599 * per-CPU IRQ handling contexts (thread information and stack)
17600 */
17601 union irq_ctx {
17602 - struct thread_info tinfo;
17603 - u32 stack[THREAD_SIZE/sizeof(u32)];
17604 + unsigned long previous_esp;
17605 + u32 stack[THREAD_SIZE/sizeof(u32)];
17606 } __attribute__((aligned(THREAD_SIZE)));
17607
17608 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17609 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17610 static inline int
17611 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17612 {
17613 - union irq_ctx *curctx, *irqctx;
17614 + union irq_ctx *irqctx;
17615 u32 *isp, arg1, arg2;
17616
17617 - curctx = (union irq_ctx *) current_thread_info();
17618 irqctx = __this_cpu_read(hardirq_ctx);
17619
17620 /*
17621 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17622 * handler) we can't do that and just have to keep using the
17623 * current stack (which is the irq stack already after all)
17624 */
17625 - if (unlikely(curctx == irqctx))
17626 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17627 return 0;
17628
17629 /* build the stack frame on the IRQ stack */
17630 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17631 - irqctx->tinfo.task = curctx->tinfo.task;
17632 - irqctx->tinfo.previous_esp = current_stack_pointer;
17633 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17634 + irqctx->previous_esp = current_stack_pointer;
17635
17636 - /* Copy the preempt_count so that the [soft]irq checks work. */
17637 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17638 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17639 + __set_fs(MAKE_MM_SEG(0));
17640 +#endif
17641
17642 if (unlikely(overflow))
17643 call_on_stack(print_stack_overflow, isp);
17644 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17645 : "0" (irq), "1" (desc), "2" (isp),
17646 "D" (desc->handle_irq)
17647 : "memory", "cc", "ecx");
17648 +
17649 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17650 + __set_fs(current_thread_info()->addr_limit);
17651 +#endif
17652 +
17653 return 1;
17654 }
17655
17656 @@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17657 */
17658 void __cpuinit irq_ctx_init(int cpu)
17659 {
17660 - union irq_ctx *irqctx;
17661 -
17662 if (per_cpu(hardirq_ctx, cpu))
17663 return;
17664
17665 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17666 - THREAD_FLAGS,
17667 - THREAD_ORDER));
17668 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17669 - irqctx->tinfo.cpu = cpu;
17670 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17671 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17672 -
17673 - per_cpu(hardirq_ctx, cpu) = irqctx;
17674 -
17675 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17676 - THREAD_FLAGS,
17677 - THREAD_ORDER));
17678 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17679 - irqctx->tinfo.cpu = cpu;
17680 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17681 -
17682 - per_cpu(softirq_ctx, cpu) = irqctx;
17683 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17684 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17685
17686 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17687 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17688 @@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17689 asmlinkage void do_softirq(void)
17690 {
17691 unsigned long flags;
17692 - struct thread_info *curctx;
17693 union irq_ctx *irqctx;
17694 u32 *isp;
17695
17696 @@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
17697 local_irq_save(flags);
17698
17699 if (local_softirq_pending()) {
17700 - curctx = current_thread_info();
17701 irqctx = __this_cpu_read(softirq_ctx);
17702 - irqctx->tinfo.task = curctx->task;
17703 - irqctx->tinfo.previous_esp = current_stack_pointer;
17704 + irqctx->previous_esp = current_stack_pointer;
17705
17706 /* build the stack frame on the softirq stack */
17707 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17708 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17709 +
17710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17711 + __set_fs(MAKE_MM_SEG(0));
17712 +#endif
17713
17714 call_on_stack(__do_softirq, isp);
17715 +
17716 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17717 + __set_fs(current_thread_info()->addr_limit);
17718 +#endif
17719 +
17720 /*
17721 * Shouldn't happen, we returned above if in_interrupt():
17722 */
17723 @@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17724 if (unlikely(!desc))
17725 return false;
17726
17727 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17728 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17729 if (unlikely(overflow))
17730 print_stack_overflow();
17731 desc->handle_irq(irq, desc);
17732 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17733 index d04d3ec..ea4b374 100644
17734 --- a/arch/x86/kernel/irq_64.c
17735 +++ b/arch/x86/kernel/irq_64.c
17736 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17737 u64 estack_top, estack_bottom;
17738 u64 curbase = (u64)task_stack_page(current);
17739
17740 - if (user_mode_vm(regs))
17741 + if (user_mode(regs))
17742 return;
17743
17744 if (regs->sp >= curbase + sizeof(struct thread_info) +
17745 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17746 index 1d5d31e..ab846ed 100644
17747 --- a/arch/x86/kernel/kdebugfs.c
17748 +++ b/arch/x86/kernel/kdebugfs.c
17749 @@ -28,6 +28,8 @@ struct setup_data_node {
17750 };
17751
17752 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17753 + size_t count, loff_t *ppos) __size_overflow(3);
17754 +static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17755 size_t count, loff_t *ppos)
17756 {
17757 struct setup_data_node *node = file->private_data;
17758 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17759 index 8bfb614..2b3b35f 100644
17760 --- a/arch/x86/kernel/kgdb.c
17761 +++ b/arch/x86/kernel/kgdb.c
17762 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17763 #ifdef CONFIG_X86_32
17764 switch (regno) {
17765 case GDB_SS:
17766 - if (!user_mode_vm(regs))
17767 + if (!user_mode(regs))
17768 *(unsigned long *)mem = __KERNEL_DS;
17769 break;
17770 case GDB_SP:
17771 - if (!user_mode_vm(regs))
17772 + if (!user_mode(regs))
17773 *(unsigned long *)mem = kernel_stack_pointer(regs);
17774 break;
17775 case GDB_GS:
17776 @@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17777 case 'k':
17778 /* clear the trace bit */
17779 linux_regs->flags &= ~X86_EFLAGS_TF;
17780 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17781 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17782
17783 /* set the trace bit if we're stepping */
17784 if (remcomInBuffer[0] == 's') {
17785 linux_regs->flags |= X86_EFLAGS_TF;
17786 - atomic_set(&kgdb_cpu_doing_single_step,
17787 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17788 raw_smp_processor_id());
17789 }
17790
17791 @@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17792
17793 switch (cmd) {
17794 case DIE_DEBUG:
17795 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17796 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17797 if (user_mode(regs))
17798 return single_step_cont(regs, args);
17799 break;
17800 diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17801 index c5e410e..da6aaf9 100644
17802 --- a/arch/x86/kernel/kprobes-opt.c
17803 +++ b/arch/x86/kernel/kprobes-opt.c
17804 @@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17805 * Verify if the address gap is in 2GB range, because this uses
17806 * a relative jump.
17807 */
17808 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17809 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17810 if (abs(rel) > 0x7fffffff)
17811 return -ERANGE;
17812
17813 @@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17814 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17815
17816 /* Set probe function call */
17817 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17818 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17819
17820 /* Set returning jmp instruction at the tail of out-of-line buffer */
17821 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17822 - (u8 *)op->kp.addr + op->optinsn.size);
17823 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17824
17825 flush_icache_range((unsigned long) buf,
17826 (unsigned long) buf + TMPL_END_IDX +
17827 @@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17828 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17829
17830 /* Backup instructions which will be replaced by jump address */
17831 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17832 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17833 RELATIVE_ADDR_SIZE);
17834
17835 insn_buf[0] = RELATIVEJUMP_OPCODE;
17836 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17837 index e213fc8..d783ba4 100644
17838 --- a/arch/x86/kernel/kprobes.c
17839 +++ b/arch/x86/kernel/kprobes.c
17840 @@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17841 } __attribute__((packed)) *insn;
17842
17843 insn = (struct __arch_relative_insn *)from;
17844 +
17845 + pax_open_kernel();
17846 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17847 insn->op = op;
17848 + pax_close_kernel();
17849 }
17850
17851 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17852 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17853 kprobe_opcode_t opcode;
17854 kprobe_opcode_t *orig_opcodes = opcodes;
17855
17856 - if (search_exception_tables((unsigned long)opcodes))
17857 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17858 return 0; /* Page fault may occur on this address. */
17859
17860 retry:
17861 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17862 /* Another subsystem puts a breakpoint, failed to recover */
17863 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17864 return 0;
17865 + pax_open_kernel();
17866 memcpy(dest, insn.kaddr, insn.length);
17867 + pax_close_kernel();
17868
17869 #ifdef CONFIG_X86_64
17870 if (insn_rip_relative(&insn)) {
17871 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17872 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17873 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17874 disp = (u8 *) dest + insn_offset_displacement(&insn);
17875 + pax_open_kernel();
17876 *(s32 *) disp = (s32) newdisp;
17877 + pax_close_kernel();
17878 }
17879 #endif
17880 return insn.length;
17881 @@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17882 * nor set current_kprobe, because it doesn't use single
17883 * stepping.
17884 */
17885 - regs->ip = (unsigned long)p->ainsn.insn;
17886 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17887 preempt_enable_no_resched();
17888 return;
17889 }
17890 @@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17891 if (p->opcode == BREAKPOINT_INSTRUCTION)
17892 regs->ip = (unsigned long)p->addr;
17893 else
17894 - regs->ip = (unsigned long)p->ainsn.insn;
17895 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17896 }
17897
17898 /*
17899 @@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17900 setup_singlestep(p, regs, kcb, 0);
17901 return 1;
17902 }
17903 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17904 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17905 /*
17906 * The breakpoint instruction was removed right
17907 * after we hit it. Another cpu has removed
17908 @@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17909 " movq %rax, 152(%rsp)\n"
17910 RESTORE_REGS_STRING
17911 " popfq\n"
17912 +#ifdef KERNEXEC_PLUGIN
17913 + " btsq $63,(%rsp)\n"
17914 +#endif
17915 #else
17916 " pushf\n"
17917 SAVE_REGS_STRING
17918 @@ -765,7 +775,7 @@ static void __kprobes
17919 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17920 {
17921 unsigned long *tos = stack_addr(regs);
17922 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17923 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17924 unsigned long orig_ip = (unsigned long)p->addr;
17925 kprobe_opcode_t *insn = p->ainsn.insn;
17926
17927 @@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
17928 struct die_args *args = data;
17929 int ret = NOTIFY_DONE;
17930
17931 - if (args->regs && user_mode_vm(args->regs))
17932 + if (args->regs && user_mode(args->regs))
17933 return ret;
17934
17935 switch (val) {
17936 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17937 index ebc9873..1b9724b 100644
17938 --- a/arch/x86/kernel/ldt.c
17939 +++ b/arch/x86/kernel/ldt.c
17940 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17941 if (reload) {
17942 #ifdef CONFIG_SMP
17943 preempt_disable();
17944 - load_LDT(pc);
17945 + load_LDT_nolock(pc);
17946 if (!cpumask_equal(mm_cpumask(current->mm),
17947 cpumask_of(smp_processor_id())))
17948 smp_call_function(flush_ldt, current->mm, 1);
17949 preempt_enable();
17950 #else
17951 - load_LDT(pc);
17952 + load_LDT_nolock(pc);
17953 #endif
17954 }
17955 if (oldsize) {
17956 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17957 return err;
17958
17959 for (i = 0; i < old->size; i++)
17960 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17961 + write_ldt_entry(new->ldt, i, old->ldt + i);
17962 return 0;
17963 }
17964
17965 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17966 retval = copy_ldt(&mm->context, &old_mm->context);
17967 mutex_unlock(&old_mm->context.lock);
17968 }
17969 +
17970 + if (tsk == current) {
17971 + mm->context.vdso = 0;
17972 +
17973 +#ifdef CONFIG_X86_32
17974 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17975 + mm->context.user_cs_base = 0UL;
17976 + mm->context.user_cs_limit = ~0UL;
17977 +
17978 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17979 + cpus_clear(mm->context.cpu_user_cs_mask);
17980 +#endif
17981 +
17982 +#endif
17983 +#endif
17984 +
17985 + }
17986 +
17987 return retval;
17988 }
17989
17990 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17991 }
17992 }
17993
17994 +#ifdef CONFIG_PAX_SEGMEXEC
17995 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17996 + error = -EINVAL;
17997 + goto out_unlock;
17998 + }
17999 +#endif
18000 +
18001 fill_ldt(&ldt, &ldt_info);
18002 if (oldmode)
18003 ldt.avl = 0;
18004 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18005 index 5b19e4d..6476a76 100644
18006 --- a/arch/x86/kernel/machine_kexec_32.c
18007 +++ b/arch/x86/kernel/machine_kexec_32.c
18008 @@ -26,7 +26,7 @@
18009 #include <asm/cacheflush.h>
18010 #include <asm/debugreg.h>
18011
18012 -static void set_idt(void *newidt, __u16 limit)
18013 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18014 {
18015 struct desc_ptr curidt;
18016
18017 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18018 }
18019
18020
18021 -static void set_gdt(void *newgdt, __u16 limit)
18022 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18023 {
18024 struct desc_ptr curgdt;
18025
18026 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18027 }
18028
18029 control_page = page_address(image->control_code_page);
18030 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18031 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18032
18033 relocate_kernel_ptr = control_page;
18034 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18035 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18036 index 0327e2b..e43737b 100644
18037 --- a/arch/x86/kernel/microcode_intel.c
18038 +++ b/arch/x86/kernel/microcode_intel.c
18039 @@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18040
18041 static int get_ucode_user(void *to, const void *from, size_t n)
18042 {
18043 - return copy_from_user(to, from, n);
18044 + return copy_from_user(to, (const void __force_user *)from, n);
18045 }
18046
18047 static enum ucode_state
18048 request_microcode_user(int cpu, const void __user *buf, size_t size)
18049 {
18050 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18051 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18052 }
18053
18054 static void microcode_fini_cpu(int cpu)
18055 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18056 index f21fd94..61565cd 100644
18057 --- a/arch/x86/kernel/module.c
18058 +++ b/arch/x86/kernel/module.c
18059 @@ -35,15 +35,60 @@
18060 #define DEBUGP(fmt...)
18061 #endif
18062
18063 -void *module_alloc(unsigned long size)
18064 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18065 {
18066 - if (PAGE_ALIGN(size) > MODULES_LEN)
18067 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18068 return NULL;
18069 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18070 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18071 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18072 -1, __builtin_return_address(0));
18073 }
18074
18075 +void *module_alloc(unsigned long size)
18076 +{
18077 +
18078 +#ifdef CONFIG_PAX_KERNEXEC
18079 + return __module_alloc(size, PAGE_KERNEL);
18080 +#else
18081 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18082 +#endif
18083 +
18084 +}
18085 +
18086 +#ifdef CONFIG_PAX_KERNEXEC
18087 +#ifdef CONFIG_X86_32
18088 +void *module_alloc_exec(unsigned long size)
18089 +{
18090 + struct vm_struct *area;
18091 +
18092 + if (size == 0)
18093 + return NULL;
18094 +
18095 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18096 + return area ? area->addr : NULL;
18097 +}
18098 +EXPORT_SYMBOL(module_alloc_exec);
18099 +
18100 +void module_free_exec(struct module *mod, void *module_region)
18101 +{
18102 + vunmap(module_region);
18103 +}
18104 +EXPORT_SYMBOL(module_free_exec);
18105 +#else
18106 +void module_free_exec(struct module *mod, void *module_region)
18107 +{
18108 + module_free(mod, module_region);
18109 +}
18110 +EXPORT_SYMBOL(module_free_exec);
18111 +
18112 +void *module_alloc_exec(unsigned long size)
18113 +{
18114 + return __module_alloc(size, PAGE_KERNEL_RX);
18115 +}
18116 +EXPORT_SYMBOL(module_alloc_exec);
18117 +#endif
18118 +#endif
18119 +
18120 #ifdef CONFIG_X86_32
18121 int apply_relocate(Elf32_Shdr *sechdrs,
18122 const char *strtab,
18123 @@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18124 unsigned int i;
18125 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18126 Elf32_Sym *sym;
18127 - uint32_t *location;
18128 + uint32_t *plocation, location;
18129
18130 DEBUGP("Applying relocate section %u to %u\n", relsec,
18131 sechdrs[relsec].sh_info);
18132 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18133 /* This is where to make the change */
18134 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18135 - + rel[i].r_offset;
18136 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18137 + location = (uint32_t)plocation;
18138 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18139 + plocation = ktla_ktva((void *)plocation);
18140 /* This is the symbol it is referring to. Note that all
18141 undefined symbols have been resolved. */
18142 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18143 @@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18144 switch (ELF32_R_TYPE(rel[i].r_info)) {
18145 case R_386_32:
18146 /* We add the value into the location given */
18147 - *location += sym->st_value;
18148 + pax_open_kernel();
18149 + *plocation += sym->st_value;
18150 + pax_close_kernel();
18151 break;
18152 case R_386_PC32:
18153 /* Add the value, subtract its postition */
18154 - *location += sym->st_value - (uint32_t)location;
18155 + pax_open_kernel();
18156 + *plocation += sym->st_value - location;
18157 + pax_close_kernel();
18158 break;
18159 default:
18160 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18161 @@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18162 case R_X86_64_NONE:
18163 break;
18164 case R_X86_64_64:
18165 + pax_open_kernel();
18166 *(u64 *)loc = val;
18167 + pax_close_kernel();
18168 break;
18169 case R_X86_64_32:
18170 + pax_open_kernel();
18171 *(u32 *)loc = val;
18172 + pax_close_kernel();
18173 if (val != *(u32 *)loc)
18174 goto overflow;
18175 break;
18176 case R_X86_64_32S:
18177 + pax_open_kernel();
18178 *(s32 *)loc = val;
18179 + pax_close_kernel();
18180 if ((s64)val != *(s32 *)loc)
18181 goto overflow;
18182 break;
18183 case R_X86_64_PC32:
18184 val -= (u64)loc;
18185 + pax_open_kernel();
18186 *(u32 *)loc = val;
18187 + pax_close_kernel();
18188 +
18189 #if 0
18190 if ((s64)val != *(s32 *)loc)
18191 goto overflow;
18192 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18193 index 32856fa..ce95eaa 100644
18194 --- a/arch/x86/kernel/nmi.c
18195 +++ b/arch/x86/kernel/nmi.c
18196 @@ -507,6 +507,17 @@ static inline void nmi_nesting_postprocess(void)
18197 dotraplinkage notrace __kprobes void
18198 do_nmi(struct pt_regs *regs, long error_code)
18199 {
18200 +
18201 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18202 + if (!user_mode(regs)) {
18203 + unsigned long cs = regs->cs & 0xFFFF;
18204 + unsigned long ip = ktva_ktla(regs->ip);
18205 +
18206 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18207 + regs->ip = ip;
18208 + }
18209 +#endif
18210 +
18211 nmi_nesting_preprocess(regs);
18212
18213 nmi_enter();
18214 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18215 index 676b8c7..870ba04 100644
18216 --- a/arch/x86/kernel/paravirt-spinlocks.c
18217 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18218 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18219 arch_spin_lock(lock);
18220 }
18221
18222 -struct pv_lock_ops pv_lock_ops = {
18223 +struct pv_lock_ops pv_lock_ops __read_only = {
18224 #ifdef CONFIG_SMP
18225 .spin_is_locked = __ticket_spin_is_locked,
18226 .spin_is_contended = __ticket_spin_is_contended,
18227 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18228 index ab13760..01218e0 100644
18229 --- a/arch/x86/kernel/paravirt.c
18230 +++ b/arch/x86/kernel/paravirt.c
18231 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18232 {
18233 return x;
18234 }
18235 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18236 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18237 +#endif
18238
18239 void __init default_banner(void)
18240 {
18241 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18242 if (opfunc == NULL)
18243 /* If there's no function, patch it with a ud2a (BUG) */
18244 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18245 - else if (opfunc == _paravirt_nop)
18246 + else if (opfunc == (void *)_paravirt_nop)
18247 /* If the operation is a nop, then nop the callsite */
18248 ret = paravirt_patch_nop();
18249
18250 /* identity functions just return their single argument */
18251 - else if (opfunc == _paravirt_ident_32)
18252 + else if (opfunc == (void *)_paravirt_ident_32)
18253 ret = paravirt_patch_ident_32(insnbuf, len);
18254 - else if (opfunc == _paravirt_ident_64)
18255 + else if (opfunc == (void *)_paravirt_ident_64)
18256 ret = paravirt_patch_ident_64(insnbuf, len);
18257 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18258 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18259 + ret = paravirt_patch_ident_64(insnbuf, len);
18260 +#endif
18261
18262 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18263 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18264 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18265 if (insn_len > len || start == NULL)
18266 insn_len = len;
18267 else
18268 - memcpy(insnbuf, start, insn_len);
18269 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18270
18271 return insn_len;
18272 }
18273 @@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18274 preempt_enable();
18275 }
18276
18277 -struct pv_info pv_info = {
18278 +struct pv_info pv_info __read_only = {
18279 .name = "bare hardware",
18280 .paravirt_enabled = 0,
18281 .kernel_rpl = 0,
18282 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
18283 #endif
18284 };
18285
18286 -struct pv_init_ops pv_init_ops = {
18287 +struct pv_init_ops pv_init_ops __read_only = {
18288 .patch = native_patch,
18289 };
18290
18291 -struct pv_time_ops pv_time_ops = {
18292 +struct pv_time_ops pv_time_ops __read_only = {
18293 .sched_clock = native_sched_clock,
18294 .steal_clock = native_steal_clock,
18295 };
18296
18297 -struct pv_irq_ops pv_irq_ops = {
18298 +struct pv_irq_ops pv_irq_ops __read_only = {
18299 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18300 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18301 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18302 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18303 #endif
18304 };
18305
18306 -struct pv_cpu_ops pv_cpu_ops = {
18307 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18308 .cpuid = native_cpuid,
18309 .get_debugreg = native_get_debugreg,
18310 .set_debugreg = native_set_debugreg,
18311 @@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18312 .end_context_switch = paravirt_nop,
18313 };
18314
18315 -struct pv_apic_ops pv_apic_ops = {
18316 +struct pv_apic_ops pv_apic_ops __read_only = {
18317 #ifdef CONFIG_X86_LOCAL_APIC
18318 .startup_ipi_hook = paravirt_nop,
18319 #endif
18320 };
18321
18322 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18323 +#ifdef CONFIG_X86_32
18324 +#ifdef CONFIG_X86_PAE
18325 +/* 64-bit pagetable entries */
18326 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18327 +#else
18328 /* 32-bit pagetable entries */
18329 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18330 +#endif
18331 #else
18332 /* 64-bit pagetable entries */
18333 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18334 #endif
18335
18336 -struct pv_mmu_ops pv_mmu_ops = {
18337 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18338
18339 .read_cr2 = native_read_cr2,
18340 .write_cr2 = native_write_cr2,
18341 @@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18342 .make_pud = PTE_IDENT,
18343
18344 .set_pgd = native_set_pgd,
18345 + .set_pgd_batched = native_set_pgd_batched,
18346 #endif
18347 #endif /* PAGETABLE_LEVELS >= 3 */
18348
18349 @@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18350 },
18351
18352 .set_fixmap = native_set_fixmap,
18353 +
18354 +#ifdef CONFIG_PAX_KERNEXEC
18355 + .pax_open_kernel = native_pax_open_kernel,
18356 + .pax_close_kernel = native_pax_close_kernel,
18357 +#endif
18358 +
18359 };
18360
18361 EXPORT_SYMBOL_GPL(pv_time_ops);
18362 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18363 index 35ccf75..7a15747 100644
18364 --- a/arch/x86/kernel/pci-iommu_table.c
18365 +++ b/arch/x86/kernel/pci-iommu_table.c
18366 @@ -2,7 +2,7 @@
18367 #include <asm/iommu_table.h>
18368 #include <linux/string.h>
18369 #include <linux/kallsyms.h>
18370 -
18371 +#include <linux/sched.h>
18372
18373 #define DEBUG 1
18374
18375 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18376 index 1d92a5a..7bc8c29 100644
18377 --- a/arch/x86/kernel/process.c
18378 +++ b/arch/x86/kernel/process.c
18379 @@ -69,16 +69,33 @@ void free_thread_xstate(struct task_struct *tsk)
18380
18381 void free_thread_info(struct thread_info *ti)
18382 {
18383 - free_thread_xstate(ti->task);
18384 free_pages((unsigned long)ti, THREAD_ORDER);
18385 }
18386
18387 +static struct kmem_cache *task_struct_cachep;
18388 +
18389 void arch_task_cache_init(void)
18390 {
18391 - task_xstate_cachep =
18392 - kmem_cache_create("task_xstate", xstate_size,
18393 + /* create a slab on which task_structs can be allocated */
18394 + task_struct_cachep =
18395 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18396 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18397 +
18398 + task_xstate_cachep =
18399 + kmem_cache_create("task_xstate", xstate_size,
18400 __alignof__(union thread_xstate),
18401 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18402 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18403 +}
18404 +
18405 +struct task_struct *alloc_task_struct_node(int node)
18406 +{
18407 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18408 +}
18409 +
18410 +void free_task_struct(struct task_struct *task)
18411 +{
18412 + free_thread_xstate(task);
18413 + kmem_cache_free(task_struct_cachep, task);
18414 }
18415
18416 /*
18417 @@ -91,7 +108,7 @@ void exit_thread(void)
18418 unsigned long *bp = t->io_bitmap_ptr;
18419
18420 if (bp) {
18421 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18422 + struct tss_struct *tss = init_tss + get_cpu();
18423
18424 t->io_bitmap_ptr = NULL;
18425 clear_thread_flag(TIF_IO_BITMAP);
18426 @@ -127,7 +144,7 @@ void show_regs_common(void)
18427
18428 printk(KERN_CONT "\n");
18429 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18430 - current->pid, current->comm, print_tainted(),
18431 + task_pid_nr(current), current->comm, print_tainted(),
18432 init_utsname()->release,
18433 (int)strcspn(init_utsname()->version, " "),
18434 init_utsname()->version);
18435 @@ -141,6 +158,9 @@ void flush_thread(void)
18436 {
18437 struct task_struct *tsk = current;
18438
18439 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18440 + loadsegment(gs, 0);
18441 +#endif
18442 flush_ptrace_hw_breakpoint(tsk);
18443 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18444 /*
18445 @@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18446 regs.di = (unsigned long) arg;
18447
18448 #ifdef CONFIG_X86_32
18449 - regs.ds = __USER_DS;
18450 - regs.es = __USER_DS;
18451 + regs.ds = __KERNEL_DS;
18452 + regs.es = __KERNEL_DS;
18453 regs.fs = __KERNEL_PERCPU;
18454 - regs.gs = __KERNEL_STACK_CANARY;
18455 + savesegment(gs, regs.gs);
18456 #else
18457 regs.ss = __KERNEL_DS;
18458 #endif
18459 @@ -392,7 +412,7 @@ static void __exit_idle(void)
18460 void exit_idle(void)
18461 {
18462 /* idle loop has pid 0 */
18463 - if (current->pid)
18464 + if (task_pid_nr(current))
18465 return;
18466 __exit_idle();
18467 }
18468 @@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
18469
18470 return ret;
18471 }
18472 -void stop_this_cpu(void *dummy)
18473 +__noreturn void stop_this_cpu(void *dummy)
18474 {
18475 local_irq_disable();
18476 /*
18477 @@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
18478 }
18479 early_param("idle", idle_setup);
18480
18481 -unsigned long arch_align_stack(unsigned long sp)
18482 +#ifdef CONFIG_PAX_RANDKSTACK
18483 +void pax_randomize_kstack(struct pt_regs *regs)
18484 {
18485 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18486 - sp -= get_random_int() % 8192;
18487 - return sp & ~0xf;
18488 -}
18489 + struct thread_struct *thread = &current->thread;
18490 + unsigned long time;
18491
18492 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18493 -{
18494 - unsigned long range_end = mm->brk + 0x02000000;
18495 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18496 -}
18497 + if (!randomize_va_space)
18498 + return;
18499 +
18500 + if (v8086_mode(regs))
18501 + return;
18502
18503 + rdtscl(time);
18504 +
18505 + /* P4 seems to return a 0 LSB, ignore it */
18506 +#ifdef CONFIG_MPENTIUM4
18507 + time &= 0x3EUL;
18508 + time <<= 2;
18509 +#elif defined(CONFIG_X86_64)
18510 + time &= 0xFUL;
18511 + time <<= 4;
18512 +#else
18513 + time &= 0x1FUL;
18514 + time <<= 3;
18515 +#endif
18516 +
18517 + thread->sp0 ^= time;
18518 + load_sp0(init_tss + smp_processor_id(), thread);
18519 +
18520 +#ifdef CONFIG_X86_64
18521 + percpu_write(kernel_stack, thread->sp0);
18522 +#endif
18523 +}
18524 +#endif
18525 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18526 index ae68473..7b0bb71 100644
18527 --- a/arch/x86/kernel/process_32.c
18528 +++ b/arch/x86/kernel/process_32.c
18529 @@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18530 unsigned long thread_saved_pc(struct task_struct *tsk)
18531 {
18532 return ((unsigned long *)tsk->thread.sp)[3];
18533 +//XXX return tsk->thread.eip;
18534 }
18535
18536 void __show_regs(struct pt_regs *regs, int all)
18537 @@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18538 unsigned long sp;
18539 unsigned short ss, gs;
18540
18541 - if (user_mode_vm(regs)) {
18542 + if (user_mode(regs)) {
18543 sp = regs->sp;
18544 ss = regs->ss & 0xffff;
18545 - gs = get_user_gs(regs);
18546 } else {
18547 sp = kernel_stack_pointer(regs);
18548 savesegment(ss, ss);
18549 - savesegment(gs, gs);
18550 }
18551 + gs = get_user_gs(regs);
18552
18553 show_regs_common();
18554
18555 @@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18556 struct task_struct *tsk;
18557 int err;
18558
18559 - childregs = task_pt_regs(p);
18560 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18561 *childregs = *regs;
18562 childregs->ax = 0;
18563 childregs->sp = sp;
18564
18565 p->thread.sp = (unsigned long) childregs;
18566 p->thread.sp0 = (unsigned long) (childregs+1);
18567 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18568
18569 p->thread.ip = (unsigned long) ret_from_fork;
18570
18571 @@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18572 struct thread_struct *prev = &prev_p->thread,
18573 *next = &next_p->thread;
18574 int cpu = smp_processor_id();
18575 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18576 + struct tss_struct *tss = init_tss + cpu;
18577 fpu_switch_t fpu;
18578
18579 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18580 @@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18581 */
18582 lazy_save_gs(prev->gs);
18583
18584 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18585 + __set_fs(task_thread_info(next_p)->addr_limit);
18586 +#endif
18587 +
18588 /*
18589 * Load the per-thread Thread-Local Storage descriptor.
18590 */
18591 @@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18592 */
18593 arch_end_context_switch(next_p);
18594
18595 + percpu_write(current_task, next_p);
18596 + percpu_write(current_tinfo, &next_p->tinfo);
18597 +
18598 /*
18599 * Restore %gs if needed (which is common)
18600 */
18601 @@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18602
18603 switch_fpu_finish(next_p, fpu);
18604
18605 - percpu_write(current_task, next_p);
18606 -
18607 return prev_p;
18608 }
18609
18610 @@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
18611 } while (count++ < 16);
18612 return 0;
18613 }
18614 -
18615 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18616 index 43d8b48..c45d566 100644
18617 --- a/arch/x86/kernel/process_64.c
18618 +++ b/arch/x86/kernel/process_64.c
18619 @@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18620 struct pt_regs *childregs;
18621 struct task_struct *me = current;
18622
18623 - childregs = ((struct pt_regs *)
18624 - (THREAD_SIZE + task_stack_page(p))) - 1;
18625 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18626 *childregs = *regs;
18627
18628 childregs->ax = 0;
18629 @@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18630 p->thread.sp = (unsigned long) childregs;
18631 p->thread.sp0 = (unsigned long) (childregs+1);
18632 p->thread.usersp = me->thread.usersp;
18633 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18634
18635 set_tsk_thread_flag(p, TIF_FORK);
18636
18637 @@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18638 struct thread_struct *prev = &prev_p->thread;
18639 struct thread_struct *next = &next_p->thread;
18640 int cpu = smp_processor_id();
18641 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18642 + struct tss_struct *tss = init_tss + cpu;
18643 unsigned fsindex, gsindex;
18644 fpu_switch_t fpu;
18645
18646 @@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18647 prev->usersp = percpu_read(old_rsp);
18648 percpu_write(old_rsp, next->usersp);
18649 percpu_write(current_task, next_p);
18650 + percpu_write(current_tinfo, &next_p->tinfo);
18651
18652 - percpu_write(kernel_stack,
18653 - (unsigned long)task_stack_page(next_p) +
18654 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18655 + percpu_write(kernel_stack, next->sp0);
18656
18657 /*
18658 * Now maybe reload the debug registers and handle I/O bitmaps
18659 @@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_struct *p)
18660 if (!p || p == current || p->state == TASK_RUNNING)
18661 return 0;
18662 stack = (unsigned long)task_stack_page(p);
18663 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18664 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18665 return 0;
18666 fp = *(u64 *)(p->thread.sp);
18667 do {
18668 - if (fp < (unsigned long)stack ||
18669 - fp >= (unsigned long)stack+THREAD_SIZE)
18670 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18671 return 0;
18672 ip = *(u64 *)(fp+8);
18673 if (!in_sched_functions(ip))
18674 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18675 index cf11783..e7ce551 100644
18676 --- a/arch/x86/kernel/ptrace.c
18677 +++ b/arch/x86/kernel/ptrace.c
18678 @@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18679 unsigned long addr, unsigned long data)
18680 {
18681 int ret;
18682 - unsigned long __user *datap = (unsigned long __user *)data;
18683 + unsigned long __user *datap = (__force unsigned long __user *)data;
18684
18685 switch (request) {
18686 /* read the word at location addr in the USER area. */
18687 @@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18688 if ((int) addr < 0)
18689 return -EIO;
18690 ret = do_get_thread_area(child, addr,
18691 - (struct user_desc __user *)data);
18692 + (__force struct user_desc __user *) data);
18693 break;
18694
18695 case PTRACE_SET_THREAD_AREA:
18696 if ((int) addr < 0)
18697 return -EIO;
18698 ret = do_set_thread_area(child, addr,
18699 - (struct user_desc __user *)data, 0);
18700 + (__force struct user_desc __user *) data, 0);
18701 break;
18702 #endif
18703
18704 @@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18705 memset(info, 0, sizeof(*info));
18706 info->si_signo = SIGTRAP;
18707 info->si_code = si_code;
18708 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18709 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18710 }
18711
18712 void user_single_step_siginfo(struct task_struct *tsk,
18713 @@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18714 # define IS_IA32 0
18715 #endif
18716
18717 +#ifdef CONFIG_GRKERNSEC_SETXID
18718 +extern void gr_delayed_cred_worker(void);
18719 +#endif
18720 +
18721 /*
18722 * We must return the syscall number to actually look up in the table.
18723 * This can be -1L to skip running any syscall at all.
18724 @@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18725 {
18726 long ret = 0;
18727
18728 +#ifdef CONFIG_GRKERNSEC_SETXID
18729 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18730 + gr_delayed_cred_worker();
18731 +#endif
18732 +
18733 /*
18734 * If we stepped into a sysenter/syscall insn, it trapped in
18735 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18736 @@ -1506,6 +1515,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18737 {
18738 bool step;
18739
18740 +#ifdef CONFIG_GRKERNSEC_SETXID
18741 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18742 + gr_delayed_cred_worker();
18743 +#endif
18744 +
18745 audit_syscall_exit(regs);
18746
18747 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18748 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18749 index 42eb330..139955c 100644
18750 --- a/arch/x86/kernel/pvclock.c
18751 +++ b/arch/x86/kernel/pvclock.c
18752 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18753 return pv_tsc_khz;
18754 }
18755
18756 -static atomic64_t last_value = ATOMIC64_INIT(0);
18757 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18758
18759 void pvclock_resume(void)
18760 {
18761 - atomic64_set(&last_value, 0);
18762 + atomic64_set_unchecked(&last_value, 0);
18763 }
18764
18765 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18766 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18767 * updating at the same time, and one of them could be slightly behind,
18768 * making the assumption that last_value always go forward fail to hold.
18769 */
18770 - last = atomic64_read(&last_value);
18771 + last = atomic64_read_unchecked(&last_value);
18772 do {
18773 if (ret < last)
18774 return last;
18775 - last = atomic64_cmpxchg(&last_value, last, ret);
18776 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18777 } while (unlikely(last != ret));
18778
18779 return ret;
18780 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18781 index d840e69..98e9581 100644
18782 --- a/arch/x86/kernel/reboot.c
18783 +++ b/arch/x86/kernel/reboot.c
18784 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18785 EXPORT_SYMBOL(pm_power_off);
18786
18787 static const struct desc_ptr no_idt = {};
18788 -static int reboot_mode;
18789 +static unsigned short reboot_mode;
18790 enum reboot_type reboot_type = BOOT_ACPI;
18791 int reboot_force;
18792
18793 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18794 extern const unsigned char machine_real_restart_asm[];
18795 extern const u64 machine_real_restart_gdt[3];
18796
18797 -void machine_real_restart(unsigned int type)
18798 +__noreturn void machine_real_restart(unsigned int type)
18799 {
18800 void *restart_va;
18801 unsigned long restart_pa;
18802 - void (*restart_lowmem)(unsigned int);
18803 + void (* __noreturn restart_lowmem)(unsigned int);
18804 u64 *lowmem_gdt;
18805
18806 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18807 + struct desc_struct *gdt;
18808 +#endif
18809 +
18810 local_irq_disable();
18811
18812 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18813 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18814 boot)". This seems like a fairly standard thing that gets set by
18815 REBOOT.COM programs, and the previous reset routine did this
18816 too. */
18817 - *((unsigned short *)0x472) = reboot_mode;
18818 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18819
18820 /* Patch the GDT in the low memory trampoline */
18821 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18822
18823 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18824 restart_pa = virt_to_phys(restart_va);
18825 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18826 + restart_lowmem = (void *)restart_pa;
18827
18828 /* GDT[0]: GDT self-pointer */
18829 lowmem_gdt[0] =
18830 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18831 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18832
18833 /* Jump to the identity-mapped low memory code */
18834 +
18835 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18836 + gdt = get_cpu_gdt_table(smp_processor_id());
18837 + pax_open_kernel();
18838 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18839 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18840 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18841 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18842 +#endif
18843 +#ifdef CONFIG_PAX_KERNEXEC
18844 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18845 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18846 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18847 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18848 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18849 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18850 +#endif
18851 + pax_close_kernel();
18852 +#endif
18853 +
18854 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18855 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18856 + unreachable();
18857 +#else
18858 restart_lowmem(type);
18859 +#endif
18860 +
18861 }
18862 #ifdef CONFIG_APM_MODULE
18863 EXPORT_SYMBOL(machine_real_restart);
18864 @@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18865 * try to force a triple fault and then cycle between hitting the keyboard
18866 * controller and doing that
18867 */
18868 -static void native_machine_emergency_restart(void)
18869 +__noreturn static void native_machine_emergency_restart(void)
18870 {
18871 int i;
18872 int attempt = 0;
18873 @@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18874 #endif
18875 }
18876
18877 -static void __machine_emergency_restart(int emergency)
18878 +static __noreturn void __machine_emergency_restart(int emergency)
18879 {
18880 reboot_emergency = emergency;
18881 machine_ops.emergency_restart();
18882 }
18883
18884 -static void native_machine_restart(char *__unused)
18885 +static __noreturn void native_machine_restart(char *__unused)
18886 {
18887 printk("machine restart\n");
18888
18889 @@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18890 __machine_emergency_restart(0);
18891 }
18892
18893 -static void native_machine_halt(void)
18894 +static __noreturn void native_machine_halt(void)
18895 {
18896 /* stop other cpus and apics */
18897 machine_shutdown();
18898 @@ -706,7 +736,7 @@ static void native_machine_halt(void)
18899 stop_this_cpu(NULL);
18900 }
18901
18902 -static void native_machine_power_off(void)
18903 +__noreturn static void native_machine_power_off(void)
18904 {
18905 if (pm_power_off) {
18906 if (!reboot_force)
18907 @@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18908 }
18909 /* a fallback in case there is no PM info available */
18910 tboot_shutdown(TB_SHUTDOWN_HALT);
18911 + unreachable();
18912 }
18913
18914 struct machine_ops machine_ops = {
18915 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18916 index 7a6f3b3..bed145d7 100644
18917 --- a/arch/x86/kernel/relocate_kernel_64.S
18918 +++ b/arch/x86/kernel/relocate_kernel_64.S
18919 @@ -11,6 +11,7 @@
18920 #include <asm/kexec.h>
18921 #include <asm/processor-flags.h>
18922 #include <asm/pgtable_types.h>
18923 +#include <asm/alternative-asm.h>
18924
18925 /*
18926 * Must be relocatable PIC code callable as a C function
18927 @@ -160,13 +161,14 @@ identity_mapped:
18928 xorq %rbp, %rbp
18929 xorq %r8, %r8
18930 xorq %r9, %r9
18931 - xorq %r10, %r9
18932 + xorq %r10, %r10
18933 xorq %r11, %r11
18934 xorq %r12, %r12
18935 xorq %r13, %r13
18936 xorq %r14, %r14
18937 xorq %r15, %r15
18938
18939 + pax_force_retaddr 0, 1
18940 ret
18941
18942 1:
18943 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18944 index 1a29015..712f324 100644
18945 --- a/arch/x86/kernel/setup.c
18946 +++ b/arch/x86/kernel/setup.c
18947 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
18948
18949 switch (data->type) {
18950 case SETUP_E820_EXT:
18951 - parse_e820_ext(data);
18952 + parse_e820_ext((struct setup_data __force_kernel *)data);
18953 break;
18954 case SETUP_DTB:
18955 add_dtb(pa_data);
18956 @@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
18957 * area (640->1Mb) as ram even though it is not.
18958 * take them out.
18959 */
18960 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18961 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18962 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18963 }
18964
18965 @@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
18966
18967 if (!boot_params.hdr.root_flags)
18968 root_mountflags &= ~MS_RDONLY;
18969 - init_mm.start_code = (unsigned long) _text;
18970 - init_mm.end_code = (unsigned long) _etext;
18971 + init_mm.start_code = ktla_ktva((unsigned long) _text);
18972 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
18973 init_mm.end_data = (unsigned long) _edata;
18974 init_mm.brk = _brk_end;
18975
18976 - code_resource.start = virt_to_phys(_text);
18977 - code_resource.end = virt_to_phys(_etext)-1;
18978 - data_resource.start = virt_to_phys(_etext);
18979 + code_resource.start = virt_to_phys(ktla_ktva(_text));
18980 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18981 + data_resource.start = virt_to_phys(_sdata);
18982 data_resource.end = virt_to_phys(_edata)-1;
18983 bss_resource.start = virt_to_phys(&__bss_start);
18984 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18985 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18986 index 5a98aa2..2f9288d 100644
18987 --- a/arch/x86/kernel/setup_percpu.c
18988 +++ b/arch/x86/kernel/setup_percpu.c
18989 @@ -21,19 +21,17 @@
18990 #include <asm/cpu.h>
18991 #include <asm/stackprotector.h>
18992
18993 -DEFINE_PER_CPU(int, cpu_number);
18994 +#ifdef CONFIG_SMP
18995 +DEFINE_PER_CPU(unsigned int, cpu_number);
18996 EXPORT_PER_CPU_SYMBOL(cpu_number);
18997 +#endif
18998
18999 -#ifdef CONFIG_X86_64
19000 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19001 -#else
19002 -#define BOOT_PERCPU_OFFSET 0
19003 -#endif
19004
19005 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19006 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19007
19008 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19009 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19010 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19011 };
19012 EXPORT_SYMBOL(__per_cpu_offset);
19013 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19014 {
19015 #ifdef CONFIG_X86_32
19016 struct desc_struct gdt;
19017 + unsigned long base = per_cpu_offset(cpu);
19018
19019 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19020 - 0x2 | DESCTYPE_S, 0x8);
19021 - gdt.s = 1;
19022 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19023 + 0x83 | DESCTYPE_S, 0xC);
19024 write_gdt_entry(get_cpu_gdt_table(cpu),
19025 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19026 #endif
19027 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19028 /* alrighty, percpu areas up and running */
19029 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19030 for_each_possible_cpu(cpu) {
19031 +#ifdef CONFIG_CC_STACKPROTECTOR
19032 +#ifdef CONFIG_X86_32
19033 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19034 +#endif
19035 +#endif
19036 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19037 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19038 per_cpu(cpu_number, cpu) = cpu;
19039 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19040 */
19041 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19042 #endif
19043 +#ifdef CONFIG_CC_STACKPROTECTOR
19044 +#ifdef CONFIG_X86_32
19045 + if (!cpu)
19046 + per_cpu(stack_canary.canary, cpu) = canary;
19047 +#endif
19048 +#endif
19049 /*
19050 * Up to this point, the boot CPU has been using .init.data
19051 * area. Reload any changed state for the boot CPU.
19052 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19053 index 115eac4..c0591d5 100644
19054 --- a/arch/x86/kernel/signal.c
19055 +++ b/arch/x86/kernel/signal.c
19056 @@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
19057 * Align the stack pointer according to the i386 ABI,
19058 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19059 */
19060 - sp = ((sp + 4) & -16ul) - 4;
19061 + sp = ((sp - 12) & -16ul) - 4;
19062 #else /* !CONFIG_X86_32 */
19063 sp = round_down(sp, 16) - 8;
19064 #endif
19065 @@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19066 * Return an always-bogus address instead so we will die with SIGSEGV.
19067 */
19068 if (onsigstack && !likely(on_sig_stack(sp)))
19069 - return (void __user *)-1L;
19070 + return (__force void __user *)-1L;
19071
19072 /* save i387 state */
19073 if (used_math() && save_i387_xstate(*fpstate) < 0)
19074 - return (void __user *)-1L;
19075 + return (__force void __user *)-1L;
19076
19077 return (void __user *)sp;
19078 }
19079 @@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19080 }
19081
19082 if (current->mm->context.vdso)
19083 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19084 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19085 else
19086 - restorer = &frame->retcode;
19087 + restorer = (void __user *)&frame->retcode;
19088 if (ka->sa.sa_flags & SA_RESTORER)
19089 restorer = ka->sa.sa_restorer;
19090
19091 @@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19092 * reasons and because gdb uses it as a signature to notice
19093 * signal handler stack frames.
19094 */
19095 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19096 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19097
19098 if (err)
19099 return -EFAULT;
19100 @@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19101 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19102
19103 /* Set up to return from userspace. */
19104 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19105 + if (current->mm->context.vdso)
19106 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19107 + else
19108 + restorer = (void __user *)&frame->retcode;
19109 if (ka->sa.sa_flags & SA_RESTORER)
19110 restorer = ka->sa.sa_restorer;
19111 put_user_ex(restorer, &frame->pretcode);
19112 @@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19113 * reasons and because gdb uses it as a signature to notice
19114 * signal handler stack frames.
19115 */
19116 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19117 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19118 } put_user_catch(err);
19119
19120 if (err)
19121 @@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
19122 * X86_32: vm86 regs switched out by assembly code before reaching
19123 * here, so testing against kernel CS suffices.
19124 */
19125 - if (!user_mode(regs))
19126 + if (!user_mode_novm(regs))
19127 return;
19128
19129 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19130 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19131 index 6e1e406..edfb7cb 100644
19132 --- a/arch/x86/kernel/smpboot.c
19133 +++ b/arch/x86/kernel/smpboot.c
19134 @@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19135 set_idle_for_cpu(cpu, c_idle.idle);
19136 do_rest:
19137 per_cpu(current_task, cpu) = c_idle.idle;
19138 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19139 #ifdef CONFIG_X86_32
19140 /* Stack for startup_32 can be just as for start_secondary onwards */
19141 irq_ctx_init(cpu);
19142 #else
19143 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19144 initial_gs = per_cpu_offset(cpu);
19145 - per_cpu(kernel_stack, cpu) =
19146 - (unsigned long)task_stack_page(c_idle.idle) -
19147 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19148 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19149 #endif
19150 +
19151 + pax_open_kernel();
19152 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19153 + pax_close_kernel();
19154 +
19155 initial_code = (unsigned long)start_secondary;
19156 stack_start = c_idle.idle->thread.sp;
19157
19158 @@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19159
19160 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19161
19162 +#ifdef CONFIG_PAX_PER_CPU_PGD
19163 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19164 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19165 + KERNEL_PGD_PTRS);
19166 +#endif
19167 +
19168 err = do_boot_cpu(apicid, cpu);
19169 if (err) {
19170 pr_debug("do_boot_cpu failed %d\n", err);
19171 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19172 index c346d11..d43b163 100644
19173 --- a/arch/x86/kernel/step.c
19174 +++ b/arch/x86/kernel/step.c
19175 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19176 struct desc_struct *desc;
19177 unsigned long base;
19178
19179 - seg &= ~7UL;
19180 + seg >>= 3;
19181
19182 mutex_lock(&child->mm->context.lock);
19183 - if (unlikely((seg >> 3) >= child->mm->context.size))
19184 + if (unlikely(seg >= child->mm->context.size))
19185 addr = -1L; /* bogus selector, access would fault */
19186 else {
19187 desc = child->mm->context.ldt + seg;
19188 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19189 addr += base;
19190 }
19191 mutex_unlock(&child->mm->context.lock);
19192 - }
19193 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19194 + addr = ktla_ktva(addr);
19195
19196 return addr;
19197 }
19198 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19199 unsigned char opcode[15];
19200 unsigned long addr = convert_ip_to_linear(child, regs);
19201
19202 + if (addr == -EINVAL)
19203 + return 0;
19204 +
19205 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19206 for (i = 0; i < copied; i++) {
19207 switch (opcode[i]) {
19208 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19209 index 0b0cb5f..db6b9ed 100644
19210 --- a/arch/x86/kernel/sys_i386_32.c
19211 +++ b/arch/x86/kernel/sys_i386_32.c
19212 @@ -24,17 +24,224 @@
19213
19214 #include <asm/syscalls.h>
19215
19216 -/*
19217 - * Do a system call from kernel instead of calling sys_execve so we
19218 - * end up with proper pt_regs.
19219 - */
19220 -int kernel_execve(const char *filename,
19221 - const char *const argv[],
19222 - const char *const envp[])
19223 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19224 {
19225 - long __res;
19226 - asm volatile ("int $0x80"
19227 - : "=a" (__res)
19228 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19229 - return __res;
19230 + unsigned long pax_task_size = TASK_SIZE;
19231 +
19232 +#ifdef CONFIG_PAX_SEGMEXEC
19233 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19234 + pax_task_size = SEGMEXEC_TASK_SIZE;
19235 +#endif
19236 +
19237 + if (len > pax_task_size || addr > pax_task_size - len)
19238 + return -EINVAL;
19239 +
19240 + return 0;
19241 +}
19242 +
19243 +unsigned long
19244 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19245 + unsigned long len, unsigned long pgoff, unsigned long flags)
19246 +{
19247 + struct mm_struct *mm = current->mm;
19248 + struct vm_area_struct *vma;
19249 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19250 +
19251 +#ifdef CONFIG_PAX_SEGMEXEC
19252 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19253 + pax_task_size = SEGMEXEC_TASK_SIZE;
19254 +#endif
19255 +
19256 + pax_task_size -= PAGE_SIZE;
19257 +
19258 + if (len > pax_task_size)
19259 + return -ENOMEM;
19260 +
19261 + if (flags & MAP_FIXED)
19262 + return addr;
19263 +
19264 +#ifdef CONFIG_PAX_RANDMMAP
19265 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19266 +#endif
19267 +
19268 + if (addr) {
19269 + addr = PAGE_ALIGN(addr);
19270 + if (pax_task_size - len >= addr) {
19271 + vma = find_vma(mm, addr);
19272 + if (check_heap_stack_gap(vma, addr, len))
19273 + return addr;
19274 + }
19275 + }
19276 + if (len > mm->cached_hole_size) {
19277 + start_addr = addr = mm->free_area_cache;
19278 + } else {
19279 + start_addr = addr = mm->mmap_base;
19280 + mm->cached_hole_size = 0;
19281 + }
19282 +
19283 +#ifdef CONFIG_PAX_PAGEEXEC
19284 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19285 + start_addr = 0x00110000UL;
19286 +
19287 +#ifdef CONFIG_PAX_RANDMMAP
19288 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19289 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19290 +#endif
19291 +
19292 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19293 + start_addr = addr = mm->mmap_base;
19294 + else
19295 + addr = start_addr;
19296 + }
19297 +#endif
19298 +
19299 +full_search:
19300 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19301 + /* At this point: (!vma || addr < vma->vm_end). */
19302 + if (pax_task_size - len < addr) {
19303 + /*
19304 + * Start a new search - just in case we missed
19305 + * some holes.
19306 + */
19307 + if (start_addr != mm->mmap_base) {
19308 + start_addr = addr = mm->mmap_base;
19309 + mm->cached_hole_size = 0;
19310 + goto full_search;
19311 + }
19312 + return -ENOMEM;
19313 + }
19314 + if (check_heap_stack_gap(vma, addr, len))
19315 + break;
19316 + if (addr + mm->cached_hole_size < vma->vm_start)
19317 + mm->cached_hole_size = vma->vm_start - addr;
19318 + addr = vma->vm_end;
19319 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19320 + start_addr = addr = mm->mmap_base;
19321 + mm->cached_hole_size = 0;
19322 + goto full_search;
19323 + }
19324 + }
19325 +
19326 + /*
19327 + * Remember the place where we stopped the search:
19328 + */
19329 + mm->free_area_cache = addr + len;
19330 + return addr;
19331 +}
19332 +
19333 +unsigned long
19334 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19335 + const unsigned long len, const unsigned long pgoff,
19336 + const unsigned long flags)
19337 +{
19338 + struct vm_area_struct *vma;
19339 + struct mm_struct *mm = current->mm;
19340 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19341 +
19342 +#ifdef CONFIG_PAX_SEGMEXEC
19343 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19344 + pax_task_size = SEGMEXEC_TASK_SIZE;
19345 +#endif
19346 +
19347 + pax_task_size -= PAGE_SIZE;
19348 +
19349 + /* requested length too big for entire address space */
19350 + if (len > pax_task_size)
19351 + return -ENOMEM;
19352 +
19353 + if (flags & MAP_FIXED)
19354 + return addr;
19355 +
19356 +#ifdef CONFIG_PAX_PAGEEXEC
19357 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19358 + goto bottomup;
19359 +#endif
19360 +
19361 +#ifdef CONFIG_PAX_RANDMMAP
19362 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19363 +#endif
19364 +
19365 + /* requesting a specific address */
19366 + if (addr) {
19367 + addr = PAGE_ALIGN(addr);
19368 + if (pax_task_size - len >= addr) {
19369 + vma = find_vma(mm, addr);
19370 + if (check_heap_stack_gap(vma, addr, len))
19371 + return addr;
19372 + }
19373 + }
19374 +
19375 + /* check if free_area_cache is useful for us */
19376 + if (len <= mm->cached_hole_size) {
19377 + mm->cached_hole_size = 0;
19378 + mm->free_area_cache = mm->mmap_base;
19379 + }
19380 +
19381 + /* either no address requested or can't fit in requested address hole */
19382 + addr = mm->free_area_cache;
19383 +
19384 + /* make sure it can fit in the remaining address space */
19385 + if (addr > len) {
19386 + vma = find_vma(mm, addr-len);
19387 + if (check_heap_stack_gap(vma, addr - len, len))
19388 + /* remember the address as a hint for next time */
19389 + return (mm->free_area_cache = addr-len);
19390 + }
19391 +
19392 + if (mm->mmap_base < len)
19393 + goto bottomup;
19394 +
19395 + addr = mm->mmap_base-len;
19396 +
19397 + do {
19398 + /*
19399 + * Lookup failure means no vma is above this address,
19400 + * else if new region fits below vma->vm_start,
19401 + * return with success:
19402 + */
19403 + vma = find_vma(mm, addr);
19404 + if (check_heap_stack_gap(vma, addr, len))
19405 + /* remember the address as a hint for next time */
19406 + return (mm->free_area_cache = addr);
19407 +
19408 + /* remember the largest hole we saw so far */
19409 + if (addr + mm->cached_hole_size < vma->vm_start)
19410 + mm->cached_hole_size = vma->vm_start - addr;
19411 +
19412 + /* try just below the current vma->vm_start */
19413 + addr = skip_heap_stack_gap(vma, len);
19414 + } while (!IS_ERR_VALUE(addr));
19415 +
19416 +bottomup:
19417 + /*
19418 + * A failed mmap() very likely causes application failure,
19419 + * so fall back to the bottom-up function here. This scenario
19420 + * can happen with large stack limits and large mmap()
19421 + * allocations.
19422 + */
19423 +
19424 +#ifdef CONFIG_PAX_SEGMEXEC
19425 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19426 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19427 + else
19428 +#endif
19429 +
19430 + mm->mmap_base = TASK_UNMAPPED_BASE;
19431 +
19432 +#ifdef CONFIG_PAX_RANDMMAP
19433 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19434 + mm->mmap_base += mm->delta_mmap;
19435 +#endif
19436 +
19437 + mm->free_area_cache = mm->mmap_base;
19438 + mm->cached_hole_size = ~0UL;
19439 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19440 + /*
19441 + * Restore the topdown base:
19442 + */
19443 + mm->mmap_base = base;
19444 + mm->free_area_cache = base;
19445 + mm->cached_hole_size = ~0UL;
19446 +
19447 + return addr;
19448 }
19449 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19450 index b4d3c39..82bb73b 100644
19451 --- a/arch/x86/kernel/sys_x86_64.c
19452 +++ b/arch/x86/kernel/sys_x86_64.c
19453 @@ -95,8 +95,8 @@ out:
19454 return error;
19455 }
19456
19457 -static void find_start_end(unsigned long flags, unsigned long *begin,
19458 - unsigned long *end)
19459 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19460 + unsigned long *begin, unsigned long *end)
19461 {
19462 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19463 unsigned long new_begin;
19464 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19465 *begin = new_begin;
19466 }
19467 } else {
19468 - *begin = TASK_UNMAPPED_BASE;
19469 + *begin = mm->mmap_base;
19470 *end = TASK_SIZE;
19471 }
19472 }
19473 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19474 if (flags & MAP_FIXED)
19475 return addr;
19476
19477 - find_start_end(flags, &begin, &end);
19478 + find_start_end(mm, flags, &begin, &end);
19479
19480 if (len > end)
19481 return -ENOMEM;
19482
19483 +#ifdef CONFIG_PAX_RANDMMAP
19484 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19485 +#endif
19486 +
19487 if (addr) {
19488 addr = PAGE_ALIGN(addr);
19489 vma = find_vma(mm, addr);
19490 - if (end - len >= addr &&
19491 - (!vma || addr + len <= vma->vm_start))
19492 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19493 return addr;
19494 }
19495 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19496 @@ -172,7 +175,7 @@ full_search:
19497 }
19498 return -ENOMEM;
19499 }
19500 - if (!vma || addr + len <= vma->vm_start) {
19501 + if (check_heap_stack_gap(vma, addr, len)) {
19502 /*
19503 * Remember the place where we stopped the search:
19504 */
19505 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19506 {
19507 struct vm_area_struct *vma;
19508 struct mm_struct *mm = current->mm;
19509 - unsigned long addr = addr0, start_addr;
19510 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19511
19512 /* requested length too big for entire address space */
19513 if (len > TASK_SIZE)
19514 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19515 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19516 goto bottomup;
19517
19518 +#ifdef CONFIG_PAX_RANDMMAP
19519 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19520 +#endif
19521 +
19522 /* requesting a specific address */
19523 if (addr) {
19524 addr = PAGE_ALIGN(addr);
19525 - vma = find_vma(mm, addr);
19526 - if (TASK_SIZE - len >= addr &&
19527 - (!vma || addr + len <= vma->vm_start))
19528 - return addr;
19529 + if (TASK_SIZE - len >= addr) {
19530 + vma = find_vma(mm, addr);
19531 + if (check_heap_stack_gap(vma, addr, len))
19532 + return addr;
19533 + }
19534 }
19535
19536 /* check if free_area_cache is useful for us */
19537 @@ -240,7 +248,7 @@ try_again:
19538 * return with success:
19539 */
19540 vma = find_vma(mm, addr);
19541 - if (!vma || addr+len <= vma->vm_start)
19542 + if (check_heap_stack_gap(vma, addr, len))
19543 /* remember the address as a hint for next time */
19544 return mm->free_area_cache = addr;
19545
19546 @@ -249,8 +257,8 @@ try_again:
19547 mm->cached_hole_size = vma->vm_start - addr;
19548
19549 /* try just below the current vma->vm_start */
19550 - addr = vma->vm_start-len;
19551 - } while (len < vma->vm_start);
19552 + addr = skip_heap_stack_gap(vma, len);
19553 + } while (!IS_ERR_VALUE(addr));
19554
19555 fail:
19556 /*
19557 @@ -270,13 +278,21 @@ bottomup:
19558 * can happen with large stack limits and large mmap()
19559 * allocations.
19560 */
19561 + mm->mmap_base = TASK_UNMAPPED_BASE;
19562 +
19563 +#ifdef CONFIG_PAX_RANDMMAP
19564 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19565 + mm->mmap_base += mm->delta_mmap;
19566 +#endif
19567 +
19568 + mm->free_area_cache = mm->mmap_base;
19569 mm->cached_hole_size = ~0UL;
19570 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19571 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19572 /*
19573 * Restore the topdown base:
19574 */
19575 - mm->free_area_cache = mm->mmap_base;
19576 + mm->mmap_base = base;
19577 + mm->free_area_cache = base;
19578 mm->cached_hole_size = ~0UL;
19579
19580 return addr;
19581 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19582 index 6410744..79758f0 100644
19583 --- a/arch/x86/kernel/tboot.c
19584 +++ b/arch/x86/kernel/tboot.c
19585 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19586
19587 void tboot_shutdown(u32 shutdown_type)
19588 {
19589 - void (*shutdown)(void);
19590 + void (* __noreturn shutdown)(void);
19591
19592 if (!tboot_enabled())
19593 return;
19594 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19595
19596 switch_to_tboot_pt();
19597
19598 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19599 + shutdown = (void *)tboot->shutdown_entry;
19600 shutdown();
19601
19602 /* should not reach here */
19603 @@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19604 return 0;
19605 }
19606
19607 -static atomic_t ap_wfs_count;
19608 +static atomic_unchecked_t ap_wfs_count;
19609
19610 static int tboot_wait_for_aps(int num_aps)
19611 {
19612 @@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19613 {
19614 switch (action) {
19615 case CPU_DYING:
19616 - atomic_inc(&ap_wfs_count);
19617 + atomic_inc_unchecked(&ap_wfs_count);
19618 if (num_online_cpus() == 1)
19619 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19620 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19621 return NOTIFY_BAD;
19622 break;
19623 }
19624 @@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
19625
19626 tboot_create_trampoline();
19627
19628 - atomic_set(&ap_wfs_count, 0);
19629 + atomic_set_unchecked(&ap_wfs_count, 0);
19630 register_hotcpu_notifier(&tboot_cpu_notifier);
19631
19632 acpi_os_set_prepare_sleep(&tboot_sleep);
19633 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19634 index c6eba2b..3303326 100644
19635 --- a/arch/x86/kernel/time.c
19636 +++ b/arch/x86/kernel/time.c
19637 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19638 {
19639 unsigned long pc = instruction_pointer(regs);
19640
19641 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19642 + if (!user_mode(regs) && in_lock_functions(pc)) {
19643 #ifdef CONFIG_FRAME_POINTER
19644 - return *(unsigned long *)(regs->bp + sizeof(long));
19645 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19646 #else
19647 unsigned long *sp =
19648 (unsigned long *)kernel_stack_pointer(regs);
19649 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19650 * or above a saved flags. Eflags has bits 22-31 zero,
19651 * kernel addresses don't.
19652 */
19653 +
19654 +#ifdef CONFIG_PAX_KERNEXEC
19655 + return ktla_ktva(sp[0]);
19656 +#else
19657 if (sp[0] >> 22)
19658 return sp[0];
19659 if (sp[1] >> 22)
19660 return sp[1];
19661 #endif
19662 +
19663 +#endif
19664 }
19665 return pc;
19666 }
19667 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19668 index 9d9d2f9..ed344e4 100644
19669 --- a/arch/x86/kernel/tls.c
19670 +++ b/arch/x86/kernel/tls.c
19671 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19672 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19673 return -EINVAL;
19674
19675 +#ifdef CONFIG_PAX_SEGMEXEC
19676 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19677 + return -EINVAL;
19678 +#endif
19679 +
19680 set_tls_desc(p, idx, &info, 1);
19681
19682 return 0;
19683 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19684 index 451c0a7..e57f551 100644
19685 --- a/arch/x86/kernel/trampoline_32.S
19686 +++ b/arch/x86/kernel/trampoline_32.S
19687 @@ -32,6 +32,12 @@
19688 #include <asm/segment.h>
19689 #include <asm/page_types.h>
19690
19691 +#ifdef CONFIG_PAX_KERNEXEC
19692 +#define ta(X) (X)
19693 +#else
19694 +#define ta(X) ((X) - __PAGE_OFFSET)
19695 +#endif
19696 +
19697 #ifdef CONFIG_SMP
19698
19699 .section ".x86_trampoline","a"
19700 @@ -62,7 +68,7 @@ r_base = .
19701 inc %ax # protected mode (PE) bit
19702 lmsw %ax # into protected mode
19703 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19704 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19705 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19706
19707 # These need to be in the same 64K segment as the above;
19708 # hence we don't use the boot_gdt_descr defined in head.S
19709 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19710 index 09ff517..df19fbff 100644
19711 --- a/arch/x86/kernel/trampoline_64.S
19712 +++ b/arch/x86/kernel/trampoline_64.S
19713 @@ -90,7 +90,7 @@ startup_32:
19714 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19715 movl %eax, %ds
19716
19717 - movl $X86_CR4_PAE, %eax
19718 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19719 movl %eax, %cr4 # Enable PAE mode
19720
19721 # Setup trampoline 4 level pagetables
19722 @@ -138,7 +138,7 @@ tidt:
19723 # so the kernel can live anywhere
19724 .balign 4
19725 tgdt:
19726 - .short tgdt_end - tgdt # gdt limit
19727 + .short tgdt_end - tgdt - 1 # gdt limit
19728 .long tgdt - r_base
19729 .short 0
19730 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19731 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19732 index ff9281f1..30cb4ac 100644
19733 --- a/arch/x86/kernel/traps.c
19734 +++ b/arch/x86/kernel/traps.c
19735 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19736
19737 /* Do we ignore FPU interrupts ? */
19738 char ignore_fpu_irq;
19739 -
19740 -/*
19741 - * The IDT has to be page-aligned to simplify the Pentium
19742 - * F0 0F bug workaround.
19743 - */
19744 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19745 #endif
19746
19747 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19748 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19749 }
19750
19751 static void __kprobes
19752 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19753 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19754 long error_code, siginfo_t *info)
19755 {
19756 struct task_struct *tsk = current;
19757
19758 #ifdef CONFIG_X86_32
19759 - if (regs->flags & X86_VM_MASK) {
19760 + if (v8086_mode(regs)) {
19761 /*
19762 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19763 * On nmi (interrupt 2), do_trap should not be called.
19764 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19765 }
19766 #endif
19767
19768 - if (!user_mode(regs))
19769 + if (!user_mode_novm(regs))
19770 goto kernel_trap;
19771
19772 #ifdef CONFIG_X86_32
19773 @@ -148,7 +142,7 @@ trap_signal:
19774 printk_ratelimit()) {
19775 printk(KERN_INFO
19776 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19777 - tsk->comm, tsk->pid, str,
19778 + tsk->comm, task_pid_nr(tsk), str,
19779 regs->ip, regs->sp, error_code);
19780 print_vma_addr(" in ", regs->ip);
19781 printk("\n");
19782 @@ -165,8 +159,20 @@ kernel_trap:
19783 if (!fixup_exception(regs)) {
19784 tsk->thread.error_code = error_code;
19785 tsk->thread.trap_nr = trapnr;
19786 +
19787 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19788 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19789 + str = "PAX: suspicious stack segment fault";
19790 +#endif
19791 +
19792 die(str, regs, error_code);
19793 }
19794 +
19795 +#ifdef CONFIG_PAX_REFCOUNT
19796 + if (trapnr == 4)
19797 + pax_report_refcount_overflow(regs);
19798 +#endif
19799 +
19800 return;
19801
19802 #ifdef CONFIG_X86_32
19803 @@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19804 conditional_sti(regs);
19805
19806 #ifdef CONFIG_X86_32
19807 - if (regs->flags & X86_VM_MASK)
19808 + if (v8086_mode(regs))
19809 goto gp_in_vm86;
19810 #endif
19811
19812 tsk = current;
19813 - if (!user_mode(regs))
19814 + if (!user_mode_novm(regs))
19815 goto gp_in_kernel;
19816
19817 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19818 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19819 + struct mm_struct *mm = tsk->mm;
19820 + unsigned long limit;
19821 +
19822 + down_write(&mm->mmap_sem);
19823 + limit = mm->context.user_cs_limit;
19824 + if (limit < TASK_SIZE) {
19825 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19826 + up_write(&mm->mmap_sem);
19827 + return;
19828 + }
19829 + up_write(&mm->mmap_sem);
19830 + }
19831 +#endif
19832 +
19833 tsk->thread.error_code = error_code;
19834 tsk->thread.trap_nr = X86_TRAP_GP;
19835
19836 @@ -299,6 +321,13 @@ gp_in_kernel:
19837 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19838 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19839 return;
19840 +
19841 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19842 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19843 + die("PAX: suspicious general protection fault", regs, error_code);
19844 + else
19845 +#endif
19846 +
19847 die("general protection fault", regs, error_code);
19848 }
19849
19850 @@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19851 /* It's safe to allow irq's after DR6 has been saved */
19852 preempt_conditional_sti(regs);
19853
19854 - if (regs->flags & X86_VM_MASK) {
19855 + if (v8086_mode(regs)) {
19856 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19857 X86_TRAP_DB);
19858 preempt_conditional_cli(regs);
19859 @@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19860 * We already checked v86 mode above, so we can check for kernel mode
19861 * by just checking the CPL of CS.
19862 */
19863 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19864 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19865 tsk->thread.debugreg6 &= ~DR_STEP;
19866 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19867 regs->flags &= ~X86_EFLAGS_TF;
19868 @@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19869 return;
19870 conditional_sti(regs);
19871
19872 - if (!user_mode_vm(regs))
19873 + if (!user_mode(regs))
19874 {
19875 if (!fixup_exception(regs)) {
19876 task->thread.error_code = error_code;
19877 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19878 index b9242ba..50c5edd 100644
19879 --- a/arch/x86/kernel/verify_cpu.S
19880 +++ b/arch/x86/kernel/verify_cpu.S
19881 @@ -20,6 +20,7 @@
19882 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19883 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19884 * arch/x86/kernel/head_32.S: processor startup
19885 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19886 *
19887 * verify_cpu, returns the status of longmode and SSE in register %eax.
19888 * 0: Success 1: Failure
19889 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19890 index 255f58a..5e91150 100644
19891 --- a/arch/x86/kernel/vm86_32.c
19892 +++ b/arch/x86/kernel/vm86_32.c
19893 @@ -41,6 +41,7 @@
19894 #include <linux/ptrace.h>
19895 #include <linux/audit.h>
19896 #include <linux/stddef.h>
19897 +#include <linux/grsecurity.h>
19898
19899 #include <asm/uaccess.h>
19900 #include <asm/io.h>
19901 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19902 do_exit(SIGSEGV);
19903 }
19904
19905 - tss = &per_cpu(init_tss, get_cpu());
19906 + tss = init_tss + get_cpu();
19907 current->thread.sp0 = current->thread.saved_sp0;
19908 current->thread.sysenter_cs = __KERNEL_CS;
19909 load_sp0(tss, &current->thread);
19910 @@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19911 struct task_struct *tsk;
19912 int tmp, ret = -EPERM;
19913
19914 +#ifdef CONFIG_GRKERNSEC_VM86
19915 + if (!capable(CAP_SYS_RAWIO)) {
19916 + gr_handle_vm86();
19917 + goto out;
19918 + }
19919 +#endif
19920 +
19921 tsk = current;
19922 if (tsk->thread.saved_sp0)
19923 goto out;
19924 @@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19925 int tmp, ret;
19926 struct vm86plus_struct __user *v86;
19927
19928 +#ifdef CONFIG_GRKERNSEC_VM86
19929 + if (!capable(CAP_SYS_RAWIO)) {
19930 + gr_handle_vm86();
19931 + ret = -EPERM;
19932 + goto out;
19933 + }
19934 +#endif
19935 +
19936 tsk = current;
19937 switch (cmd) {
19938 case VM86_REQUEST_IRQ:
19939 @@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19940 tsk->thread.saved_fs = info->regs32->fs;
19941 tsk->thread.saved_gs = get_user_gs(info->regs32);
19942
19943 - tss = &per_cpu(init_tss, get_cpu());
19944 + tss = init_tss + get_cpu();
19945 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19946 if (cpu_has_sep)
19947 tsk->thread.sysenter_cs = 0;
19948 @@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19949 goto cannot_handle;
19950 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19951 goto cannot_handle;
19952 - intr_ptr = (unsigned long __user *) (i << 2);
19953 + intr_ptr = (__force unsigned long __user *) (i << 2);
19954 if (get_user(segoffs, intr_ptr))
19955 goto cannot_handle;
19956 if ((segoffs >> 16) == BIOSSEG)
19957 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19958 index 0f703f1..9e15f64 100644
19959 --- a/arch/x86/kernel/vmlinux.lds.S
19960 +++ b/arch/x86/kernel/vmlinux.lds.S
19961 @@ -26,6 +26,13 @@
19962 #include <asm/page_types.h>
19963 #include <asm/cache.h>
19964 #include <asm/boot.h>
19965 +#include <asm/segment.h>
19966 +
19967 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19968 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19969 +#else
19970 +#define __KERNEL_TEXT_OFFSET 0
19971 +#endif
19972
19973 #undef i386 /* in case the preprocessor is a 32bit one */
19974
19975 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19976
19977 PHDRS {
19978 text PT_LOAD FLAGS(5); /* R_E */
19979 +#ifdef CONFIG_X86_32
19980 + module PT_LOAD FLAGS(5); /* R_E */
19981 +#endif
19982 +#ifdef CONFIG_XEN
19983 + rodata PT_LOAD FLAGS(5); /* R_E */
19984 +#else
19985 + rodata PT_LOAD FLAGS(4); /* R__ */
19986 +#endif
19987 data PT_LOAD FLAGS(6); /* RW_ */
19988 -#ifdef CONFIG_X86_64
19989 + init.begin PT_LOAD FLAGS(6); /* RW_ */
19990 #ifdef CONFIG_SMP
19991 percpu PT_LOAD FLAGS(6); /* RW_ */
19992 #endif
19993 + text.init PT_LOAD FLAGS(5); /* R_E */
19994 + text.exit PT_LOAD FLAGS(5); /* R_E */
19995 init PT_LOAD FLAGS(7); /* RWE */
19996 -#endif
19997 note PT_NOTE FLAGS(0); /* ___ */
19998 }
19999
20000 SECTIONS
20001 {
20002 #ifdef CONFIG_X86_32
20003 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20004 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20005 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20006 #else
20007 - . = __START_KERNEL;
20008 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20009 + . = __START_KERNEL;
20010 #endif
20011
20012 /* Text and read-only data */
20013 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20014 - _text = .;
20015 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20016 /* bootstrapping code */
20017 +#ifdef CONFIG_X86_32
20018 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20019 +#else
20020 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20021 +#endif
20022 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20023 + _text = .;
20024 HEAD_TEXT
20025 #ifdef CONFIG_X86_32
20026 . = ALIGN(PAGE_SIZE);
20027 @@ -108,13 +128,47 @@ SECTIONS
20028 IRQENTRY_TEXT
20029 *(.fixup)
20030 *(.gnu.warning)
20031 - /* End of text section */
20032 - _etext = .;
20033 } :text = 0x9090
20034
20035 - NOTES :text :note
20036 + . += __KERNEL_TEXT_OFFSET;
20037
20038 - EXCEPTION_TABLE(16) :text = 0x9090
20039 +#ifdef CONFIG_X86_32
20040 + . = ALIGN(PAGE_SIZE);
20041 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20042 +
20043 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20044 + MODULES_EXEC_VADDR = .;
20045 + BYTE(0)
20046 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20047 + . = ALIGN(HPAGE_SIZE);
20048 + MODULES_EXEC_END = . - 1;
20049 +#endif
20050 +
20051 + } :module
20052 +#endif
20053 +
20054 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20055 + /* End of text section */
20056 + _etext = . - __KERNEL_TEXT_OFFSET;
20057 + }
20058 +
20059 +#ifdef CONFIG_X86_32
20060 + . = ALIGN(PAGE_SIZE);
20061 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20062 + *(.idt)
20063 + . = ALIGN(PAGE_SIZE);
20064 + *(.empty_zero_page)
20065 + *(.initial_pg_fixmap)
20066 + *(.initial_pg_pmd)
20067 + *(.initial_page_table)
20068 + *(.swapper_pg_dir)
20069 + } :rodata
20070 +#endif
20071 +
20072 + . = ALIGN(PAGE_SIZE);
20073 + NOTES :rodata :note
20074 +
20075 + EXCEPTION_TABLE(16) :rodata
20076
20077 #if defined(CONFIG_DEBUG_RODATA)
20078 /* .text should occupy whole number of pages */
20079 @@ -126,16 +180,20 @@ SECTIONS
20080
20081 /* Data */
20082 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20083 +
20084 +#ifdef CONFIG_PAX_KERNEXEC
20085 + . = ALIGN(HPAGE_SIZE);
20086 +#else
20087 + . = ALIGN(PAGE_SIZE);
20088 +#endif
20089 +
20090 /* Start of data section */
20091 _sdata = .;
20092
20093 /* init_task */
20094 INIT_TASK_DATA(THREAD_SIZE)
20095
20096 -#ifdef CONFIG_X86_32
20097 - /* 32 bit has nosave before _edata */
20098 NOSAVE_DATA
20099 -#endif
20100
20101 PAGE_ALIGNED_DATA(PAGE_SIZE)
20102
20103 @@ -176,12 +234,19 @@ SECTIONS
20104 #endif /* CONFIG_X86_64 */
20105
20106 /* Init code and data - will be freed after init */
20107 - . = ALIGN(PAGE_SIZE);
20108 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20109 + BYTE(0)
20110 +
20111 +#ifdef CONFIG_PAX_KERNEXEC
20112 + . = ALIGN(HPAGE_SIZE);
20113 +#else
20114 + . = ALIGN(PAGE_SIZE);
20115 +#endif
20116 +
20117 __init_begin = .; /* paired with __init_end */
20118 - }
20119 + } :init.begin
20120
20121 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20122 +#ifdef CONFIG_SMP
20123 /*
20124 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20125 * output PHDR, so the next output section - .init.text - should
20126 @@ -190,12 +255,27 @@ SECTIONS
20127 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20128 #endif
20129
20130 - INIT_TEXT_SECTION(PAGE_SIZE)
20131 -#ifdef CONFIG_X86_64
20132 - :init
20133 -#endif
20134 + . = ALIGN(PAGE_SIZE);
20135 + init_begin = .;
20136 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20137 + VMLINUX_SYMBOL(_sinittext) = .;
20138 + INIT_TEXT
20139 + VMLINUX_SYMBOL(_einittext) = .;
20140 + . = ALIGN(PAGE_SIZE);
20141 + } :text.init
20142
20143 - INIT_DATA_SECTION(16)
20144 + /*
20145 + * .exit.text is discard at runtime, not link time, to deal with
20146 + * references from .altinstructions and .eh_frame
20147 + */
20148 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20149 + EXIT_TEXT
20150 + . = ALIGN(16);
20151 + } :text.exit
20152 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20153 +
20154 + . = ALIGN(PAGE_SIZE);
20155 + INIT_DATA_SECTION(16) :init
20156
20157 /*
20158 * Code and data for a variety of lowlevel trampolines, to be
20159 @@ -269,19 +349,12 @@ SECTIONS
20160 }
20161
20162 . = ALIGN(8);
20163 - /*
20164 - * .exit.text is discard at runtime, not link time, to deal with
20165 - * references from .altinstructions and .eh_frame
20166 - */
20167 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20168 - EXIT_TEXT
20169 - }
20170
20171 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20172 EXIT_DATA
20173 }
20174
20175 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20176 +#ifndef CONFIG_SMP
20177 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20178 #endif
20179
20180 @@ -300,16 +373,10 @@ SECTIONS
20181 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20182 __smp_locks = .;
20183 *(.smp_locks)
20184 - . = ALIGN(PAGE_SIZE);
20185 __smp_locks_end = .;
20186 + . = ALIGN(PAGE_SIZE);
20187 }
20188
20189 -#ifdef CONFIG_X86_64
20190 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20191 - NOSAVE_DATA
20192 - }
20193 -#endif
20194 -
20195 /* BSS */
20196 . = ALIGN(PAGE_SIZE);
20197 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20198 @@ -325,6 +392,7 @@ SECTIONS
20199 __brk_base = .;
20200 . += 64 * 1024; /* 64k alignment slop space */
20201 *(.brk_reservation) /* areas brk users have reserved */
20202 + . = ALIGN(HPAGE_SIZE);
20203 __brk_limit = .;
20204 }
20205
20206 @@ -351,13 +419,12 @@ SECTIONS
20207 * for the boot processor.
20208 */
20209 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20210 -INIT_PER_CPU(gdt_page);
20211 INIT_PER_CPU(irq_stack_union);
20212
20213 /*
20214 * Build-time check on the image size:
20215 */
20216 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20217 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20218 "kernel image bigger than KERNEL_IMAGE_SIZE");
20219
20220 #ifdef CONFIG_SMP
20221 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20222 index 7515cf0..331a1a0 100644
20223 --- a/arch/x86/kernel/vsyscall_64.c
20224 +++ b/arch/x86/kernel/vsyscall_64.c
20225 @@ -54,15 +54,13 @@
20226 DEFINE_VVAR(int, vgetcpu_mode);
20227 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20228
20229 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20230 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20231
20232 static int __init vsyscall_setup(char *str)
20233 {
20234 if (str) {
20235 if (!strcmp("emulate", str))
20236 vsyscall_mode = EMULATE;
20237 - else if (!strcmp("native", str))
20238 - vsyscall_mode = NATIVE;
20239 else if (!strcmp("none", str))
20240 vsyscall_mode = NONE;
20241 else
20242 @@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20243
20244 tsk = current;
20245 if (seccomp_mode(&tsk->seccomp))
20246 - do_exit(SIGKILL);
20247 + do_group_exit(SIGKILL);
20248
20249 /*
20250 * With a real vsyscall, page faults cause SIGSEGV. We want to
20251 @@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20252 return true;
20253
20254 sigsegv:
20255 - force_sig(SIGSEGV, current);
20256 - return true;
20257 + do_group_exit(SIGKILL);
20258 }
20259
20260 /*
20261 @@ -332,10 +329,7 @@ void __init map_vsyscall(void)
20262 extern char __vvar_page;
20263 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20264
20265 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20266 - vsyscall_mode == NATIVE
20267 - ? PAGE_KERNEL_VSYSCALL
20268 - : PAGE_KERNEL_VVAR);
20269 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20270 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20271 (unsigned long)VSYSCALL_START);
20272
20273 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20274 index 9796c2f..f686fbf 100644
20275 --- a/arch/x86/kernel/x8664_ksyms_64.c
20276 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20277 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20278 EXPORT_SYMBOL(copy_user_generic_string);
20279 EXPORT_SYMBOL(copy_user_generic_unrolled);
20280 EXPORT_SYMBOL(__copy_user_nocache);
20281 -EXPORT_SYMBOL(_copy_from_user);
20282 -EXPORT_SYMBOL(_copy_to_user);
20283
20284 EXPORT_SYMBOL(copy_page);
20285 EXPORT_SYMBOL(clear_page);
20286 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20287 index e62728e..5fc3a07 100644
20288 --- a/arch/x86/kernel/xsave.c
20289 +++ b/arch/x86/kernel/xsave.c
20290 @@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20291 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20292 return -EINVAL;
20293
20294 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20295 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20296 fx_sw_user->extended_size -
20297 FP_XSTATE_MAGIC2_SIZE));
20298 if (err)
20299 @@ -267,7 +267,7 @@ fx_only:
20300 * the other extended state.
20301 */
20302 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20303 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20304 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20305 }
20306
20307 /*
20308 @@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
20309 if (use_xsave())
20310 err = restore_user_xstate(buf);
20311 else
20312 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
20313 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20314 buf);
20315 if (unlikely(err)) {
20316 /*
20317 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20318 index 9fed5be..18fd595 100644
20319 --- a/arch/x86/kvm/cpuid.c
20320 +++ b/arch/x86/kvm/cpuid.c
20321 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20322 struct kvm_cpuid2 *cpuid,
20323 struct kvm_cpuid_entry2 __user *entries)
20324 {
20325 - int r;
20326 + int r, i;
20327
20328 r = -E2BIG;
20329 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20330 goto out;
20331 r = -EFAULT;
20332 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20333 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20334 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20335 goto out;
20336 + for (i = 0; i < cpuid->nent; ++i) {
20337 + struct kvm_cpuid_entry2 cpuid_entry;
20338 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20339 + goto out;
20340 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20341 + }
20342 vcpu->arch.cpuid_nent = cpuid->nent;
20343 kvm_apic_set_version(vcpu);
20344 kvm_x86_ops->cpuid_update(vcpu);
20345 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20346 struct kvm_cpuid2 *cpuid,
20347 struct kvm_cpuid_entry2 __user *entries)
20348 {
20349 - int r;
20350 + int r, i;
20351
20352 r = -E2BIG;
20353 if (cpuid->nent < vcpu->arch.cpuid_nent)
20354 goto out;
20355 r = -EFAULT;
20356 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20357 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20358 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20359 goto out;
20360 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20361 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20362 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20363 + goto out;
20364 + }
20365 return 0;
20366
20367 out:
20368 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20369 index 8375622..b7bca1a 100644
20370 --- a/arch/x86/kvm/emulate.c
20371 +++ b/arch/x86/kvm/emulate.c
20372 @@ -252,6 +252,7 @@ struct gprefix {
20373
20374 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20375 do { \
20376 + unsigned long _tmp; \
20377 __asm__ __volatile__ ( \
20378 _PRE_EFLAGS("0", "4", "2") \
20379 _op _suffix " %"_x"3,%1; " \
20380 @@ -266,8 +267,6 @@ struct gprefix {
20381 /* Raw emulation: instruction has two explicit operands. */
20382 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20383 do { \
20384 - unsigned long _tmp; \
20385 - \
20386 switch ((ctxt)->dst.bytes) { \
20387 case 2: \
20388 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20389 @@ -283,7 +282,6 @@ struct gprefix {
20390
20391 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20392 do { \
20393 - unsigned long _tmp; \
20394 switch ((ctxt)->dst.bytes) { \
20395 case 1: \
20396 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20397 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20398 index 8584322..17d5955 100644
20399 --- a/arch/x86/kvm/lapic.c
20400 +++ b/arch/x86/kvm/lapic.c
20401 @@ -54,7 +54,7 @@
20402 #define APIC_BUS_CYCLE_NS 1
20403
20404 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20405 -#define apic_debug(fmt, arg...)
20406 +#define apic_debug(fmt, arg...) do {} while (0)
20407
20408 #define APIC_LVT_NUM 6
20409 /* 14 is the version for Xeon and Pentium 8.4.8*/
20410 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20411 index df5a703..63748a7 100644
20412 --- a/arch/x86/kvm/paging_tmpl.h
20413 +++ b/arch/x86/kvm/paging_tmpl.h
20414 @@ -197,7 +197,7 @@ retry_walk:
20415 if (unlikely(kvm_is_error_hva(host_addr)))
20416 goto error;
20417
20418 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20419 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20420 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20421 goto error;
20422
20423 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20424 index e334389..6839087 100644
20425 --- a/arch/x86/kvm/svm.c
20426 +++ b/arch/x86/kvm/svm.c
20427 @@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20428 int cpu = raw_smp_processor_id();
20429
20430 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20431 +
20432 + pax_open_kernel();
20433 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20434 + pax_close_kernel();
20435 +
20436 load_TR_desc();
20437 }
20438
20439 @@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20440 #endif
20441 #endif
20442
20443 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20444 + __set_fs(current_thread_info()->addr_limit);
20445 +#endif
20446 +
20447 reload_tss(vcpu);
20448
20449 local_irq_disable();
20450 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20451 index 4ff0ab9..2ff68d3 100644
20452 --- a/arch/x86/kvm/vmx.c
20453 +++ b/arch/x86/kvm/vmx.c
20454 @@ -1303,7 +1303,11 @@ static void reload_tss(void)
20455 struct desc_struct *descs;
20456
20457 descs = (void *)gdt->address;
20458 +
20459 + pax_open_kernel();
20460 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20461 + pax_close_kernel();
20462 +
20463 load_TR_desc();
20464 }
20465
20466 @@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
20467 if (!cpu_has_vmx_flexpriority())
20468 flexpriority_enabled = 0;
20469
20470 - if (!cpu_has_vmx_tpr_shadow())
20471 - kvm_x86_ops->update_cr8_intercept = NULL;
20472 + if (!cpu_has_vmx_tpr_shadow()) {
20473 + pax_open_kernel();
20474 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20475 + pax_close_kernel();
20476 + }
20477
20478 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20479 kvm_disable_largepages();
20480 @@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(void)
20481 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20482
20483 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20484 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20485 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20486
20487 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20488 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20489 @@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20490 "jmp .Lkvm_vmx_return \n\t"
20491 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20492 ".Lkvm_vmx_return: "
20493 +
20494 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20495 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20496 + ".Lkvm_vmx_return2: "
20497 +#endif
20498 +
20499 /* Save guest registers, load host registers, keep flags */
20500 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20501 "pop %0 \n\t"
20502 @@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20503 #endif
20504 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20505 [wordsize]"i"(sizeof(ulong))
20506 +
20507 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20508 + ,[cs]"i"(__KERNEL_CS)
20509 +#endif
20510 +
20511 : "cc", "memory"
20512 , R"ax", R"bx", R"di", R"si"
20513 #ifdef CONFIG_X86_64
20514 @@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20515 }
20516 }
20517
20518 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20519 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20520 +
20521 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20522 + loadsegment(fs, __KERNEL_PERCPU);
20523 +#endif
20524 +
20525 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20526 + __set_fs(current_thread_info()->addr_limit);
20527 +#endif
20528 +
20529 vmx->loaded_vmcs->launched = 1;
20530
20531 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20532 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20533 index 185a2b8..866d2a6 100644
20534 --- a/arch/x86/kvm/x86.c
20535 +++ b/arch/x86/kvm/x86.c
20536 @@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20537 {
20538 struct kvm *kvm = vcpu->kvm;
20539 int lm = is_long_mode(vcpu);
20540 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20541 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20542 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20543 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20544 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20545 : kvm->arch.xen_hvm_config.blob_size_32;
20546 u32 page_num = data & ~PAGE_MASK;
20547 @@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20548 if (n < msr_list.nmsrs)
20549 goto out;
20550 r = -EFAULT;
20551 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20552 + goto out;
20553 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20554 num_msrs_to_save * sizeof(u32)))
20555 goto out;
20556 @@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20557 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20558 struct kvm_interrupt *irq)
20559 {
20560 - if (irq->irq < 0 || irq->irq >= 256)
20561 + if (irq->irq >= 256)
20562 return -EINVAL;
20563 if (irqchip_in_kernel(vcpu->kvm))
20564 return -ENXIO;
20565 @@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
20566 kvm_mmu_set_mmio_spte_mask(mask);
20567 }
20568
20569 -int kvm_arch_init(void *opaque)
20570 +int kvm_arch_init(const void *opaque)
20571 {
20572 int r;
20573 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20574 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20575 index 642d880..44e0f3f 100644
20576 --- a/arch/x86/lguest/boot.c
20577 +++ b/arch/x86/lguest/boot.c
20578 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20579 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20580 * Launcher to reboot us.
20581 */
20582 -static void lguest_restart(char *reason)
20583 +static __noreturn void lguest_restart(char *reason)
20584 {
20585 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20586 + BUG();
20587 }
20588
20589 /*G:050
20590 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20591 index 00933d5..3a64af9 100644
20592 --- a/arch/x86/lib/atomic64_386_32.S
20593 +++ b/arch/x86/lib/atomic64_386_32.S
20594 @@ -48,6 +48,10 @@ BEGIN(read)
20595 movl (v), %eax
20596 movl 4(v), %edx
20597 RET_ENDP
20598 +BEGIN(read_unchecked)
20599 + movl (v), %eax
20600 + movl 4(v), %edx
20601 +RET_ENDP
20602 #undef v
20603
20604 #define v %esi
20605 @@ -55,6 +59,10 @@ BEGIN(set)
20606 movl %ebx, (v)
20607 movl %ecx, 4(v)
20608 RET_ENDP
20609 +BEGIN(set_unchecked)
20610 + movl %ebx, (v)
20611 + movl %ecx, 4(v)
20612 +RET_ENDP
20613 #undef v
20614
20615 #define v %esi
20616 @@ -70,6 +78,20 @@ RET_ENDP
20617 BEGIN(add)
20618 addl %eax, (v)
20619 adcl %edx, 4(v)
20620 +
20621 +#ifdef CONFIG_PAX_REFCOUNT
20622 + jno 0f
20623 + subl %eax, (v)
20624 + sbbl %edx, 4(v)
20625 + int $4
20626 +0:
20627 + _ASM_EXTABLE(0b, 0b)
20628 +#endif
20629 +
20630 +RET_ENDP
20631 +BEGIN(add_unchecked)
20632 + addl %eax, (v)
20633 + adcl %edx, 4(v)
20634 RET_ENDP
20635 #undef v
20636
20637 @@ -77,6 +99,24 @@ RET_ENDP
20638 BEGIN(add_return)
20639 addl (v), %eax
20640 adcl 4(v), %edx
20641 +
20642 +#ifdef CONFIG_PAX_REFCOUNT
20643 + into
20644 +1234:
20645 + _ASM_EXTABLE(1234b, 2f)
20646 +#endif
20647 +
20648 + movl %eax, (v)
20649 + movl %edx, 4(v)
20650 +
20651 +#ifdef CONFIG_PAX_REFCOUNT
20652 +2:
20653 +#endif
20654 +
20655 +RET_ENDP
20656 +BEGIN(add_return_unchecked)
20657 + addl (v), %eax
20658 + adcl 4(v), %edx
20659 movl %eax, (v)
20660 movl %edx, 4(v)
20661 RET_ENDP
20662 @@ -86,6 +126,20 @@ RET_ENDP
20663 BEGIN(sub)
20664 subl %eax, (v)
20665 sbbl %edx, 4(v)
20666 +
20667 +#ifdef CONFIG_PAX_REFCOUNT
20668 + jno 0f
20669 + addl %eax, (v)
20670 + adcl %edx, 4(v)
20671 + int $4
20672 +0:
20673 + _ASM_EXTABLE(0b, 0b)
20674 +#endif
20675 +
20676 +RET_ENDP
20677 +BEGIN(sub_unchecked)
20678 + subl %eax, (v)
20679 + sbbl %edx, 4(v)
20680 RET_ENDP
20681 #undef v
20682
20683 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20684 sbbl $0, %edx
20685 addl (v), %eax
20686 adcl 4(v), %edx
20687 +
20688 +#ifdef CONFIG_PAX_REFCOUNT
20689 + into
20690 +1234:
20691 + _ASM_EXTABLE(1234b, 2f)
20692 +#endif
20693 +
20694 + movl %eax, (v)
20695 + movl %edx, 4(v)
20696 +
20697 +#ifdef CONFIG_PAX_REFCOUNT
20698 +2:
20699 +#endif
20700 +
20701 +RET_ENDP
20702 +BEGIN(sub_return_unchecked)
20703 + negl %edx
20704 + negl %eax
20705 + sbbl $0, %edx
20706 + addl (v), %eax
20707 + adcl 4(v), %edx
20708 movl %eax, (v)
20709 movl %edx, 4(v)
20710 RET_ENDP
20711 @@ -105,6 +180,20 @@ RET_ENDP
20712 BEGIN(inc)
20713 addl $1, (v)
20714 adcl $0, 4(v)
20715 +
20716 +#ifdef CONFIG_PAX_REFCOUNT
20717 + jno 0f
20718 + subl $1, (v)
20719 + sbbl $0, 4(v)
20720 + int $4
20721 +0:
20722 + _ASM_EXTABLE(0b, 0b)
20723 +#endif
20724 +
20725 +RET_ENDP
20726 +BEGIN(inc_unchecked)
20727 + addl $1, (v)
20728 + adcl $0, 4(v)
20729 RET_ENDP
20730 #undef v
20731
20732 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20733 movl 4(v), %edx
20734 addl $1, %eax
20735 adcl $0, %edx
20736 +
20737 +#ifdef CONFIG_PAX_REFCOUNT
20738 + into
20739 +1234:
20740 + _ASM_EXTABLE(1234b, 2f)
20741 +#endif
20742 +
20743 + movl %eax, (v)
20744 + movl %edx, 4(v)
20745 +
20746 +#ifdef CONFIG_PAX_REFCOUNT
20747 +2:
20748 +#endif
20749 +
20750 +RET_ENDP
20751 +BEGIN(inc_return_unchecked)
20752 + movl (v), %eax
20753 + movl 4(v), %edx
20754 + addl $1, %eax
20755 + adcl $0, %edx
20756 movl %eax, (v)
20757 movl %edx, 4(v)
20758 RET_ENDP
20759 @@ -123,6 +232,20 @@ RET_ENDP
20760 BEGIN(dec)
20761 subl $1, (v)
20762 sbbl $0, 4(v)
20763 +
20764 +#ifdef CONFIG_PAX_REFCOUNT
20765 + jno 0f
20766 + addl $1, (v)
20767 + adcl $0, 4(v)
20768 + int $4
20769 +0:
20770 + _ASM_EXTABLE(0b, 0b)
20771 +#endif
20772 +
20773 +RET_ENDP
20774 +BEGIN(dec_unchecked)
20775 + subl $1, (v)
20776 + sbbl $0, 4(v)
20777 RET_ENDP
20778 #undef v
20779
20780 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20781 movl 4(v), %edx
20782 subl $1, %eax
20783 sbbl $0, %edx
20784 +
20785 +#ifdef CONFIG_PAX_REFCOUNT
20786 + into
20787 +1234:
20788 + _ASM_EXTABLE(1234b, 2f)
20789 +#endif
20790 +
20791 + movl %eax, (v)
20792 + movl %edx, 4(v)
20793 +
20794 +#ifdef CONFIG_PAX_REFCOUNT
20795 +2:
20796 +#endif
20797 +
20798 +RET_ENDP
20799 +BEGIN(dec_return_unchecked)
20800 + movl (v), %eax
20801 + movl 4(v), %edx
20802 + subl $1, %eax
20803 + sbbl $0, %edx
20804 movl %eax, (v)
20805 movl %edx, 4(v)
20806 RET_ENDP
20807 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20808 adcl %edx, %edi
20809 addl (v), %eax
20810 adcl 4(v), %edx
20811 +
20812 +#ifdef CONFIG_PAX_REFCOUNT
20813 + into
20814 +1234:
20815 + _ASM_EXTABLE(1234b, 2f)
20816 +#endif
20817 +
20818 cmpl %eax, %ecx
20819 je 3f
20820 1:
20821 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20822 1:
20823 addl $1, %eax
20824 adcl $0, %edx
20825 +
20826 +#ifdef CONFIG_PAX_REFCOUNT
20827 + into
20828 +1234:
20829 + _ASM_EXTABLE(1234b, 2f)
20830 +#endif
20831 +
20832 movl %eax, (v)
20833 movl %edx, 4(v)
20834 movl $1, %eax
20835 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20836 movl 4(v), %edx
20837 subl $1, %eax
20838 sbbl $0, %edx
20839 +
20840 +#ifdef CONFIG_PAX_REFCOUNT
20841 + into
20842 +1234:
20843 + _ASM_EXTABLE(1234b, 1f)
20844 +#endif
20845 +
20846 js 1f
20847 movl %eax, (v)
20848 movl %edx, 4(v)
20849 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20850 index f5cc9eb..51fa319 100644
20851 --- a/arch/x86/lib/atomic64_cx8_32.S
20852 +++ b/arch/x86/lib/atomic64_cx8_32.S
20853 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20854 CFI_STARTPROC
20855
20856 read64 %ecx
20857 + pax_force_retaddr
20858 ret
20859 CFI_ENDPROC
20860 ENDPROC(atomic64_read_cx8)
20861
20862 +ENTRY(atomic64_read_unchecked_cx8)
20863 + CFI_STARTPROC
20864 +
20865 + read64 %ecx
20866 + pax_force_retaddr
20867 + ret
20868 + CFI_ENDPROC
20869 +ENDPROC(atomic64_read_unchecked_cx8)
20870 +
20871 ENTRY(atomic64_set_cx8)
20872 CFI_STARTPROC
20873
20874 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20875 cmpxchg8b (%esi)
20876 jne 1b
20877
20878 + pax_force_retaddr
20879 ret
20880 CFI_ENDPROC
20881 ENDPROC(atomic64_set_cx8)
20882
20883 +ENTRY(atomic64_set_unchecked_cx8)
20884 + CFI_STARTPROC
20885 +
20886 +1:
20887 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20888 + * are atomic on 586 and newer */
20889 + cmpxchg8b (%esi)
20890 + jne 1b
20891 +
20892 + pax_force_retaddr
20893 + ret
20894 + CFI_ENDPROC
20895 +ENDPROC(atomic64_set_unchecked_cx8)
20896 +
20897 ENTRY(atomic64_xchg_cx8)
20898 CFI_STARTPROC
20899
20900 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
20901 cmpxchg8b (%esi)
20902 jne 1b
20903
20904 + pax_force_retaddr
20905 ret
20906 CFI_ENDPROC
20907 ENDPROC(atomic64_xchg_cx8)
20908
20909 -.macro addsub_return func ins insc
20910 -ENTRY(atomic64_\func\()_return_cx8)
20911 +.macro addsub_return func ins insc unchecked=""
20912 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20913 CFI_STARTPROC
20914 SAVE ebp
20915 SAVE ebx
20916 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20917 movl %edx, %ecx
20918 \ins\()l %esi, %ebx
20919 \insc\()l %edi, %ecx
20920 +
20921 +.ifb \unchecked
20922 +#ifdef CONFIG_PAX_REFCOUNT
20923 + into
20924 +2:
20925 + _ASM_EXTABLE(2b, 3f)
20926 +#endif
20927 +.endif
20928 +
20929 LOCK_PREFIX
20930 cmpxchg8b (%ebp)
20931 jne 1b
20932 -
20933 -10:
20934 movl %ebx, %eax
20935 movl %ecx, %edx
20936 +
20937 +.ifb \unchecked
20938 +#ifdef CONFIG_PAX_REFCOUNT
20939 +3:
20940 +#endif
20941 +.endif
20942 +
20943 RESTORE edi
20944 RESTORE esi
20945 RESTORE ebx
20946 RESTORE ebp
20947 + pax_force_retaddr
20948 ret
20949 CFI_ENDPROC
20950 -ENDPROC(atomic64_\func\()_return_cx8)
20951 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20952 .endm
20953
20954 addsub_return add add adc
20955 addsub_return sub sub sbb
20956 +addsub_return add add adc _unchecked
20957 +addsub_return sub sub sbb _unchecked
20958
20959 -.macro incdec_return func ins insc
20960 -ENTRY(atomic64_\func\()_return_cx8)
20961 +.macro incdec_return func ins insc unchecked=""
20962 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20963 CFI_STARTPROC
20964 SAVE ebx
20965
20966 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
20967 movl %edx, %ecx
20968 \ins\()l $1, %ebx
20969 \insc\()l $0, %ecx
20970 +
20971 +.ifb \unchecked
20972 +#ifdef CONFIG_PAX_REFCOUNT
20973 + into
20974 +2:
20975 + _ASM_EXTABLE(2b, 3f)
20976 +#endif
20977 +.endif
20978 +
20979 LOCK_PREFIX
20980 cmpxchg8b (%esi)
20981 jne 1b
20982
20983 -10:
20984 movl %ebx, %eax
20985 movl %ecx, %edx
20986 +
20987 +.ifb \unchecked
20988 +#ifdef CONFIG_PAX_REFCOUNT
20989 +3:
20990 +#endif
20991 +.endif
20992 +
20993 RESTORE ebx
20994 + pax_force_retaddr
20995 ret
20996 CFI_ENDPROC
20997 -ENDPROC(atomic64_\func\()_return_cx8)
20998 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20999 .endm
21000
21001 incdec_return inc add adc
21002 incdec_return dec sub sbb
21003 +incdec_return inc add adc _unchecked
21004 +incdec_return dec sub sbb _unchecked
21005
21006 ENTRY(atomic64_dec_if_positive_cx8)
21007 CFI_STARTPROC
21008 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21009 movl %edx, %ecx
21010 subl $1, %ebx
21011 sbb $0, %ecx
21012 +
21013 +#ifdef CONFIG_PAX_REFCOUNT
21014 + into
21015 +1234:
21016 + _ASM_EXTABLE(1234b, 2f)
21017 +#endif
21018 +
21019 js 2f
21020 LOCK_PREFIX
21021 cmpxchg8b (%esi)
21022 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21023 movl %ebx, %eax
21024 movl %ecx, %edx
21025 RESTORE ebx
21026 + pax_force_retaddr
21027 ret
21028 CFI_ENDPROC
21029 ENDPROC(atomic64_dec_if_positive_cx8)
21030 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21031 movl %edx, %ecx
21032 addl %ebp, %ebx
21033 adcl %edi, %ecx
21034 +
21035 +#ifdef CONFIG_PAX_REFCOUNT
21036 + into
21037 +1234:
21038 + _ASM_EXTABLE(1234b, 3f)
21039 +#endif
21040 +
21041 LOCK_PREFIX
21042 cmpxchg8b (%esi)
21043 jne 1b
21044 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21045 CFI_ADJUST_CFA_OFFSET -8
21046 RESTORE ebx
21047 RESTORE ebp
21048 + pax_force_retaddr
21049 ret
21050 4:
21051 cmpl %edx, 4(%esp)
21052 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21053 xorl %ecx, %ecx
21054 addl $1, %ebx
21055 adcl %edx, %ecx
21056 +
21057 +#ifdef CONFIG_PAX_REFCOUNT
21058 + into
21059 +1234:
21060 + _ASM_EXTABLE(1234b, 3f)
21061 +#endif
21062 +
21063 LOCK_PREFIX
21064 cmpxchg8b (%esi)
21065 jne 1b
21066 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21067 movl $1, %eax
21068 3:
21069 RESTORE ebx
21070 + pax_force_retaddr
21071 ret
21072 CFI_ENDPROC
21073 ENDPROC(atomic64_inc_not_zero_cx8)
21074 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21075 index 78d16a5..fbcf666 100644
21076 --- a/arch/x86/lib/checksum_32.S
21077 +++ b/arch/x86/lib/checksum_32.S
21078 @@ -28,7 +28,8 @@
21079 #include <linux/linkage.h>
21080 #include <asm/dwarf2.h>
21081 #include <asm/errno.h>
21082 -
21083 +#include <asm/segment.h>
21084 +
21085 /*
21086 * computes a partial checksum, e.g. for TCP/UDP fragments
21087 */
21088 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21089
21090 #define ARGBASE 16
21091 #define FP 12
21092 -
21093 -ENTRY(csum_partial_copy_generic)
21094 +
21095 +ENTRY(csum_partial_copy_generic_to_user)
21096 CFI_STARTPROC
21097 +
21098 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21099 + pushl_cfi %gs
21100 + popl_cfi %es
21101 + jmp csum_partial_copy_generic
21102 +#endif
21103 +
21104 +ENTRY(csum_partial_copy_generic_from_user)
21105 +
21106 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21107 + pushl_cfi %gs
21108 + popl_cfi %ds
21109 +#endif
21110 +
21111 +ENTRY(csum_partial_copy_generic)
21112 subl $4,%esp
21113 CFI_ADJUST_CFA_OFFSET 4
21114 pushl_cfi %edi
21115 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21116 jmp 4f
21117 SRC(1: movw (%esi), %bx )
21118 addl $2, %esi
21119 -DST( movw %bx, (%edi) )
21120 +DST( movw %bx, %es:(%edi) )
21121 addl $2, %edi
21122 addw %bx, %ax
21123 adcl $0, %eax
21124 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21125 SRC(1: movl (%esi), %ebx )
21126 SRC( movl 4(%esi), %edx )
21127 adcl %ebx, %eax
21128 -DST( movl %ebx, (%edi) )
21129 +DST( movl %ebx, %es:(%edi) )
21130 adcl %edx, %eax
21131 -DST( movl %edx, 4(%edi) )
21132 +DST( movl %edx, %es:4(%edi) )
21133
21134 SRC( movl 8(%esi), %ebx )
21135 SRC( movl 12(%esi), %edx )
21136 adcl %ebx, %eax
21137 -DST( movl %ebx, 8(%edi) )
21138 +DST( movl %ebx, %es:8(%edi) )
21139 adcl %edx, %eax
21140 -DST( movl %edx, 12(%edi) )
21141 +DST( movl %edx, %es:12(%edi) )
21142
21143 SRC( movl 16(%esi), %ebx )
21144 SRC( movl 20(%esi), %edx )
21145 adcl %ebx, %eax
21146 -DST( movl %ebx, 16(%edi) )
21147 +DST( movl %ebx, %es:16(%edi) )
21148 adcl %edx, %eax
21149 -DST( movl %edx, 20(%edi) )
21150 +DST( movl %edx, %es:20(%edi) )
21151
21152 SRC( movl 24(%esi), %ebx )
21153 SRC( movl 28(%esi), %edx )
21154 adcl %ebx, %eax
21155 -DST( movl %ebx, 24(%edi) )
21156 +DST( movl %ebx, %es:24(%edi) )
21157 adcl %edx, %eax
21158 -DST( movl %edx, 28(%edi) )
21159 +DST( movl %edx, %es:28(%edi) )
21160
21161 lea 32(%esi), %esi
21162 lea 32(%edi), %edi
21163 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21164 shrl $2, %edx # This clears CF
21165 SRC(3: movl (%esi), %ebx )
21166 adcl %ebx, %eax
21167 -DST( movl %ebx, (%edi) )
21168 +DST( movl %ebx, %es:(%edi) )
21169 lea 4(%esi), %esi
21170 lea 4(%edi), %edi
21171 dec %edx
21172 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21173 jb 5f
21174 SRC( movw (%esi), %cx )
21175 leal 2(%esi), %esi
21176 -DST( movw %cx, (%edi) )
21177 +DST( movw %cx, %es:(%edi) )
21178 leal 2(%edi), %edi
21179 je 6f
21180 shll $16,%ecx
21181 SRC(5: movb (%esi), %cl )
21182 -DST( movb %cl, (%edi) )
21183 +DST( movb %cl, %es:(%edi) )
21184 6: addl %ecx, %eax
21185 adcl $0, %eax
21186 7:
21187 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21188
21189 6001:
21190 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21191 - movl $-EFAULT, (%ebx)
21192 + movl $-EFAULT, %ss:(%ebx)
21193
21194 # zero the complete destination - computing the rest
21195 # is too much work
21196 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21197
21198 6002:
21199 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21200 - movl $-EFAULT,(%ebx)
21201 + movl $-EFAULT,%ss:(%ebx)
21202 jmp 5000b
21203
21204 .previous
21205
21206 + pushl_cfi %ss
21207 + popl_cfi %ds
21208 + pushl_cfi %ss
21209 + popl_cfi %es
21210 popl_cfi %ebx
21211 CFI_RESTORE ebx
21212 popl_cfi %esi
21213 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21214 popl_cfi %ecx # equivalent to addl $4,%esp
21215 ret
21216 CFI_ENDPROC
21217 -ENDPROC(csum_partial_copy_generic)
21218 +ENDPROC(csum_partial_copy_generic_to_user)
21219
21220 #else
21221
21222 /* Version for PentiumII/PPro */
21223
21224 #define ROUND1(x) \
21225 + nop; nop; nop; \
21226 SRC(movl x(%esi), %ebx ) ; \
21227 addl %ebx, %eax ; \
21228 - DST(movl %ebx, x(%edi) ) ;
21229 + DST(movl %ebx, %es:x(%edi)) ;
21230
21231 #define ROUND(x) \
21232 + nop; nop; nop; \
21233 SRC(movl x(%esi), %ebx ) ; \
21234 adcl %ebx, %eax ; \
21235 - DST(movl %ebx, x(%edi) ) ;
21236 + DST(movl %ebx, %es:x(%edi)) ;
21237
21238 #define ARGBASE 12
21239 -
21240 -ENTRY(csum_partial_copy_generic)
21241 +
21242 +ENTRY(csum_partial_copy_generic_to_user)
21243 CFI_STARTPROC
21244 +
21245 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21246 + pushl_cfi %gs
21247 + popl_cfi %es
21248 + jmp csum_partial_copy_generic
21249 +#endif
21250 +
21251 +ENTRY(csum_partial_copy_generic_from_user)
21252 +
21253 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21254 + pushl_cfi %gs
21255 + popl_cfi %ds
21256 +#endif
21257 +
21258 +ENTRY(csum_partial_copy_generic)
21259 pushl_cfi %ebx
21260 CFI_REL_OFFSET ebx, 0
21261 pushl_cfi %edi
21262 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21263 subl %ebx, %edi
21264 lea -1(%esi),%edx
21265 andl $-32,%edx
21266 - lea 3f(%ebx,%ebx), %ebx
21267 + lea 3f(%ebx,%ebx,2), %ebx
21268 testl %esi, %esi
21269 jmp *%ebx
21270 1: addl $64,%esi
21271 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21272 jb 5f
21273 SRC( movw (%esi), %dx )
21274 leal 2(%esi), %esi
21275 -DST( movw %dx, (%edi) )
21276 +DST( movw %dx, %es:(%edi) )
21277 leal 2(%edi), %edi
21278 je 6f
21279 shll $16,%edx
21280 5:
21281 SRC( movb (%esi), %dl )
21282 -DST( movb %dl, (%edi) )
21283 +DST( movb %dl, %es:(%edi) )
21284 6: addl %edx, %eax
21285 adcl $0, %eax
21286 7:
21287 .section .fixup, "ax"
21288 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21289 - movl $-EFAULT, (%ebx)
21290 + movl $-EFAULT, %ss:(%ebx)
21291 # zero the complete destination (computing the rest is too much work)
21292 movl ARGBASE+8(%esp),%edi # dst
21293 movl ARGBASE+12(%esp),%ecx # len
21294 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21295 rep; stosb
21296 jmp 7b
21297 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21298 - movl $-EFAULT, (%ebx)
21299 + movl $-EFAULT, %ss:(%ebx)
21300 jmp 7b
21301 .previous
21302
21303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21304 + pushl_cfi %ss
21305 + popl_cfi %ds
21306 + pushl_cfi %ss
21307 + popl_cfi %es
21308 +#endif
21309 +
21310 popl_cfi %esi
21311 CFI_RESTORE esi
21312 popl_cfi %edi
21313 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21314 CFI_RESTORE ebx
21315 ret
21316 CFI_ENDPROC
21317 -ENDPROC(csum_partial_copy_generic)
21318 +ENDPROC(csum_partial_copy_generic_to_user)
21319
21320 #undef ROUND
21321 #undef ROUND1
21322 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21323 index f2145cf..cea889d 100644
21324 --- a/arch/x86/lib/clear_page_64.S
21325 +++ b/arch/x86/lib/clear_page_64.S
21326 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21327 movl $4096/8,%ecx
21328 xorl %eax,%eax
21329 rep stosq
21330 + pax_force_retaddr
21331 ret
21332 CFI_ENDPROC
21333 ENDPROC(clear_page_c)
21334 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21335 movl $4096,%ecx
21336 xorl %eax,%eax
21337 rep stosb
21338 + pax_force_retaddr
21339 ret
21340 CFI_ENDPROC
21341 ENDPROC(clear_page_c_e)
21342 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21343 leaq 64(%rdi),%rdi
21344 jnz .Lloop
21345 nop
21346 + pax_force_retaddr
21347 ret
21348 CFI_ENDPROC
21349 .Lclear_page_end:
21350 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21351
21352 #include <asm/cpufeature.h>
21353
21354 - .section .altinstr_replacement,"ax"
21355 + .section .altinstr_replacement,"a"
21356 1: .byte 0xeb /* jmp <disp8> */
21357 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21358 2: .byte 0xeb /* jmp <disp8> */
21359 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21360 index 1e572c5..2a162cd 100644
21361 --- a/arch/x86/lib/cmpxchg16b_emu.S
21362 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21363 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21364
21365 popf
21366 mov $1, %al
21367 + pax_force_retaddr
21368 ret
21369
21370 not_same:
21371 popf
21372 xor %al,%al
21373 + pax_force_retaddr
21374 ret
21375
21376 CFI_ENDPROC
21377 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21378 index 6b34d04..dccb07f 100644
21379 --- a/arch/x86/lib/copy_page_64.S
21380 +++ b/arch/x86/lib/copy_page_64.S
21381 @@ -9,6 +9,7 @@ copy_page_c:
21382 CFI_STARTPROC
21383 movl $4096/8,%ecx
21384 rep movsq
21385 + pax_force_retaddr
21386 ret
21387 CFI_ENDPROC
21388 ENDPROC(copy_page_c)
21389 @@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21390
21391 ENTRY(copy_page)
21392 CFI_STARTPROC
21393 - subq $2*8,%rsp
21394 - CFI_ADJUST_CFA_OFFSET 2*8
21395 + subq $3*8,%rsp
21396 + CFI_ADJUST_CFA_OFFSET 3*8
21397 movq %rbx,(%rsp)
21398 CFI_REL_OFFSET rbx, 0
21399 movq %r12,1*8(%rsp)
21400 CFI_REL_OFFSET r12, 1*8
21401 + movq %r13,2*8(%rsp)
21402 + CFI_REL_OFFSET r13, 2*8
21403
21404 movl $(4096/64)-5,%ecx
21405 .p2align 4
21406 @@ -37,7 +40,7 @@ ENTRY(copy_page)
21407 movq 16 (%rsi), %rdx
21408 movq 24 (%rsi), %r8
21409 movq 32 (%rsi), %r9
21410 - movq 40 (%rsi), %r10
21411 + movq 40 (%rsi), %r13
21412 movq 48 (%rsi), %r11
21413 movq 56 (%rsi), %r12
21414
21415 @@ -48,7 +51,7 @@ ENTRY(copy_page)
21416 movq %rdx, 16 (%rdi)
21417 movq %r8, 24 (%rdi)
21418 movq %r9, 32 (%rdi)
21419 - movq %r10, 40 (%rdi)
21420 + movq %r13, 40 (%rdi)
21421 movq %r11, 48 (%rdi)
21422 movq %r12, 56 (%rdi)
21423
21424 @@ -67,7 +70,7 @@ ENTRY(copy_page)
21425 movq 16 (%rsi), %rdx
21426 movq 24 (%rsi), %r8
21427 movq 32 (%rsi), %r9
21428 - movq 40 (%rsi), %r10
21429 + movq 40 (%rsi), %r13
21430 movq 48 (%rsi), %r11
21431 movq 56 (%rsi), %r12
21432
21433 @@ -76,7 +79,7 @@ ENTRY(copy_page)
21434 movq %rdx, 16 (%rdi)
21435 movq %r8, 24 (%rdi)
21436 movq %r9, 32 (%rdi)
21437 - movq %r10, 40 (%rdi)
21438 + movq %r13, 40 (%rdi)
21439 movq %r11, 48 (%rdi)
21440 movq %r12, 56 (%rdi)
21441
21442 @@ -89,8 +92,11 @@ ENTRY(copy_page)
21443 CFI_RESTORE rbx
21444 movq 1*8(%rsp),%r12
21445 CFI_RESTORE r12
21446 - addq $2*8,%rsp
21447 - CFI_ADJUST_CFA_OFFSET -2*8
21448 + movq 2*8(%rsp),%r13
21449 + CFI_RESTORE r13
21450 + addq $3*8,%rsp
21451 + CFI_ADJUST_CFA_OFFSET -3*8
21452 + pax_force_retaddr
21453 ret
21454 .Lcopy_page_end:
21455 CFI_ENDPROC
21456 @@ -101,7 +107,7 @@ ENDPROC(copy_page)
21457
21458 #include <asm/cpufeature.h>
21459
21460 - .section .altinstr_replacement,"ax"
21461 + .section .altinstr_replacement,"a"
21462 1: .byte 0xeb /* jmp <disp8> */
21463 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21464 2:
21465 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21466 index 0248402..821c786 100644
21467 --- a/arch/x86/lib/copy_user_64.S
21468 +++ b/arch/x86/lib/copy_user_64.S
21469 @@ -16,6 +16,7 @@
21470 #include <asm/thread_info.h>
21471 #include <asm/cpufeature.h>
21472 #include <asm/alternative-asm.h>
21473 +#include <asm/pgtable.h>
21474
21475 /*
21476 * By placing feature2 after feature1 in altinstructions section, we logically
21477 @@ -29,7 +30,7 @@
21478 .byte 0xe9 /* 32bit jump */
21479 .long \orig-1f /* by default jump to orig */
21480 1:
21481 - .section .altinstr_replacement,"ax"
21482 + .section .altinstr_replacement,"a"
21483 2: .byte 0xe9 /* near jump with 32bit immediate */
21484 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21485 3: .byte 0xe9 /* near jump with 32bit immediate */
21486 @@ -71,47 +72,20 @@
21487 #endif
21488 .endm
21489
21490 -/* Standard copy_to_user with segment limit checking */
21491 -ENTRY(_copy_to_user)
21492 - CFI_STARTPROC
21493 - GET_THREAD_INFO(%rax)
21494 - movq %rdi,%rcx
21495 - addq %rdx,%rcx
21496 - jc bad_to_user
21497 - cmpq TI_addr_limit(%rax),%rcx
21498 - ja bad_to_user
21499 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21500 - copy_user_generic_unrolled,copy_user_generic_string, \
21501 - copy_user_enhanced_fast_string
21502 - CFI_ENDPROC
21503 -ENDPROC(_copy_to_user)
21504 -
21505 -/* Standard copy_from_user with segment limit checking */
21506 -ENTRY(_copy_from_user)
21507 - CFI_STARTPROC
21508 - GET_THREAD_INFO(%rax)
21509 - movq %rsi,%rcx
21510 - addq %rdx,%rcx
21511 - jc bad_from_user
21512 - cmpq TI_addr_limit(%rax),%rcx
21513 - ja bad_from_user
21514 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21515 - copy_user_generic_unrolled,copy_user_generic_string, \
21516 - copy_user_enhanced_fast_string
21517 - CFI_ENDPROC
21518 -ENDPROC(_copy_from_user)
21519 -
21520 .section .fixup,"ax"
21521 /* must zero dest */
21522 ENTRY(bad_from_user)
21523 bad_from_user:
21524 CFI_STARTPROC
21525 + testl %edx,%edx
21526 + js bad_to_user
21527 movl %edx,%ecx
21528 xorl %eax,%eax
21529 rep
21530 stosb
21531 bad_to_user:
21532 movl %edx,%eax
21533 + pax_force_retaddr
21534 ret
21535 CFI_ENDPROC
21536 ENDPROC(bad_from_user)
21537 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21538 jz 17f
21539 1: movq (%rsi),%r8
21540 2: movq 1*8(%rsi),%r9
21541 -3: movq 2*8(%rsi),%r10
21542 +3: movq 2*8(%rsi),%rax
21543 4: movq 3*8(%rsi),%r11
21544 5: movq %r8,(%rdi)
21545 6: movq %r9,1*8(%rdi)
21546 -7: movq %r10,2*8(%rdi)
21547 +7: movq %rax,2*8(%rdi)
21548 8: movq %r11,3*8(%rdi)
21549 9: movq 4*8(%rsi),%r8
21550 10: movq 5*8(%rsi),%r9
21551 -11: movq 6*8(%rsi),%r10
21552 +11: movq 6*8(%rsi),%rax
21553 12: movq 7*8(%rsi),%r11
21554 13: movq %r8,4*8(%rdi)
21555 14: movq %r9,5*8(%rdi)
21556 -15: movq %r10,6*8(%rdi)
21557 +15: movq %rax,6*8(%rdi)
21558 16: movq %r11,7*8(%rdi)
21559 leaq 64(%rsi),%rsi
21560 leaq 64(%rdi),%rdi
21561 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21562 decl %ecx
21563 jnz 21b
21564 23: xor %eax,%eax
21565 + pax_force_retaddr
21566 ret
21567
21568 .section .fixup,"ax"
21569 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21570 3: rep
21571 movsb
21572 4: xorl %eax,%eax
21573 + pax_force_retaddr
21574 ret
21575
21576 .section .fixup,"ax"
21577 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21578 1: rep
21579 movsb
21580 2: xorl %eax,%eax
21581 + pax_force_retaddr
21582 ret
21583
21584 .section .fixup,"ax"
21585 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21586 index cb0c112..e3a6895 100644
21587 --- a/arch/x86/lib/copy_user_nocache_64.S
21588 +++ b/arch/x86/lib/copy_user_nocache_64.S
21589 @@ -8,12 +8,14 @@
21590
21591 #include <linux/linkage.h>
21592 #include <asm/dwarf2.h>
21593 +#include <asm/alternative-asm.h>
21594
21595 #define FIX_ALIGNMENT 1
21596
21597 #include <asm/current.h>
21598 #include <asm/asm-offsets.h>
21599 #include <asm/thread_info.h>
21600 +#include <asm/pgtable.h>
21601
21602 .macro ALIGN_DESTINATION
21603 #ifdef FIX_ALIGNMENT
21604 @@ -50,6 +52,15 @@
21605 */
21606 ENTRY(__copy_user_nocache)
21607 CFI_STARTPROC
21608 +
21609 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21610 + mov $PAX_USER_SHADOW_BASE,%rcx
21611 + cmp %rcx,%rsi
21612 + jae 1f
21613 + add %rcx,%rsi
21614 +1:
21615 +#endif
21616 +
21617 cmpl $8,%edx
21618 jb 20f /* less then 8 bytes, go to byte copy loop */
21619 ALIGN_DESTINATION
21620 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21621 jz 17f
21622 1: movq (%rsi),%r8
21623 2: movq 1*8(%rsi),%r9
21624 -3: movq 2*8(%rsi),%r10
21625 +3: movq 2*8(%rsi),%rax
21626 4: movq 3*8(%rsi),%r11
21627 5: movnti %r8,(%rdi)
21628 6: movnti %r9,1*8(%rdi)
21629 -7: movnti %r10,2*8(%rdi)
21630 +7: movnti %rax,2*8(%rdi)
21631 8: movnti %r11,3*8(%rdi)
21632 9: movq 4*8(%rsi),%r8
21633 10: movq 5*8(%rsi),%r9
21634 -11: movq 6*8(%rsi),%r10
21635 +11: movq 6*8(%rsi),%rax
21636 12: movq 7*8(%rsi),%r11
21637 13: movnti %r8,4*8(%rdi)
21638 14: movnti %r9,5*8(%rdi)
21639 -15: movnti %r10,6*8(%rdi)
21640 +15: movnti %rax,6*8(%rdi)
21641 16: movnti %r11,7*8(%rdi)
21642 leaq 64(%rsi),%rsi
21643 leaq 64(%rdi),%rdi
21644 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21645 jnz 21b
21646 23: xorl %eax,%eax
21647 sfence
21648 + pax_force_retaddr
21649 ret
21650
21651 .section .fixup,"ax"
21652 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21653 index fb903b7..c92b7f7 100644
21654 --- a/arch/x86/lib/csum-copy_64.S
21655 +++ b/arch/x86/lib/csum-copy_64.S
21656 @@ -8,6 +8,7 @@
21657 #include <linux/linkage.h>
21658 #include <asm/dwarf2.h>
21659 #include <asm/errno.h>
21660 +#include <asm/alternative-asm.h>
21661
21662 /*
21663 * Checksum copy with exception handling.
21664 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21665 CFI_RESTORE rbp
21666 addq $7*8, %rsp
21667 CFI_ADJUST_CFA_OFFSET -7*8
21668 + pax_force_retaddr 0, 1
21669 ret
21670 CFI_RESTORE_STATE
21671
21672 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21673 index 459b58a..9570bc7 100644
21674 --- a/arch/x86/lib/csum-wrappers_64.c
21675 +++ b/arch/x86/lib/csum-wrappers_64.c
21676 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21677 len -= 2;
21678 }
21679 }
21680 - isum = csum_partial_copy_generic((__force const void *)src,
21681 +
21682 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21683 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21684 + src += PAX_USER_SHADOW_BASE;
21685 +#endif
21686 +
21687 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21688 dst, len, isum, errp, NULL);
21689 if (unlikely(*errp))
21690 goto out_err;
21691 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21692 }
21693
21694 *errp = 0;
21695 - return csum_partial_copy_generic(src, (void __force *)dst,
21696 +
21697 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21698 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21699 + dst += PAX_USER_SHADOW_BASE;
21700 +#endif
21701 +
21702 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21703 len, isum, NULL, errp);
21704 }
21705 EXPORT_SYMBOL(csum_partial_copy_to_user);
21706 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21707 index 51f1504..ddac4c1 100644
21708 --- a/arch/x86/lib/getuser.S
21709 +++ b/arch/x86/lib/getuser.S
21710 @@ -33,15 +33,38 @@
21711 #include <asm/asm-offsets.h>
21712 #include <asm/thread_info.h>
21713 #include <asm/asm.h>
21714 +#include <asm/segment.h>
21715 +#include <asm/pgtable.h>
21716 +#include <asm/alternative-asm.h>
21717 +
21718 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21719 +#define __copyuser_seg gs;
21720 +#else
21721 +#define __copyuser_seg
21722 +#endif
21723
21724 .text
21725 ENTRY(__get_user_1)
21726 CFI_STARTPROC
21727 +
21728 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21729 GET_THREAD_INFO(%_ASM_DX)
21730 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21731 jae bad_get_user
21732 -1: movzb (%_ASM_AX),%edx
21733 +
21734 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21735 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21736 + cmp %_ASM_DX,%_ASM_AX
21737 + jae 1234f
21738 + add %_ASM_DX,%_ASM_AX
21739 +1234:
21740 +#endif
21741 +
21742 +#endif
21743 +
21744 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21745 xor %eax,%eax
21746 + pax_force_retaddr
21747 ret
21748 CFI_ENDPROC
21749 ENDPROC(__get_user_1)
21750 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21751 ENTRY(__get_user_2)
21752 CFI_STARTPROC
21753 add $1,%_ASM_AX
21754 +
21755 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21756 jc bad_get_user
21757 GET_THREAD_INFO(%_ASM_DX)
21758 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21759 jae bad_get_user
21760 -2: movzwl -1(%_ASM_AX),%edx
21761 +
21762 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21763 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21764 + cmp %_ASM_DX,%_ASM_AX
21765 + jae 1234f
21766 + add %_ASM_DX,%_ASM_AX
21767 +1234:
21768 +#endif
21769 +
21770 +#endif
21771 +
21772 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21773 xor %eax,%eax
21774 + pax_force_retaddr
21775 ret
21776 CFI_ENDPROC
21777 ENDPROC(__get_user_2)
21778 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21779 ENTRY(__get_user_4)
21780 CFI_STARTPROC
21781 add $3,%_ASM_AX
21782 +
21783 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21784 jc bad_get_user
21785 GET_THREAD_INFO(%_ASM_DX)
21786 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21787 jae bad_get_user
21788 -3: mov -3(%_ASM_AX),%edx
21789 +
21790 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21791 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21792 + cmp %_ASM_DX,%_ASM_AX
21793 + jae 1234f
21794 + add %_ASM_DX,%_ASM_AX
21795 +1234:
21796 +#endif
21797 +
21798 +#endif
21799 +
21800 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21801 xor %eax,%eax
21802 + pax_force_retaddr
21803 ret
21804 CFI_ENDPROC
21805 ENDPROC(__get_user_4)
21806 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21807 GET_THREAD_INFO(%_ASM_DX)
21808 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21809 jae bad_get_user
21810 +
21811 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21812 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21813 + cmp %_ASM_DX,%_ASM_AX
21814 + jae 1234f
21815 + add %_ASM_DX,%_ASM_AX
21816 +1234:
21817 +#endif
21818 +
21819 4: movq -7(%_ASM_AX),%_ASM_DX
21820 xor %eax,%eax
21821 + pax_force_retaddr
21822 ret
21823 CFI_ENDPROC
21824 ENDPROC(__get_user_8)
21825 @@ -91,6 +152,7 @@ bad_get_user:
21826 CFI_STARTPROC
21827 xor %edx,%edx
21828 mov $(-EFAULT),%_ASM_AX
21829 + pax_force_retaddr
21830 ret
21831 CFI_ENDPROC
21832 END(bad_get_user)
21833 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21834 index b1e6c4b..21ae8fc 100644
21835 --- a/arch/x86/lib/insn.c
21836 +++ b/arch/x86/lib/insn.c
21837 @@ -21,6 +21,11 @@
21838 #include <linux/string.h>
21839 #include <asm/inat.h>
21840 #include <asm/insn.h>
21841 +#ifdef __KERNEL__
21842 +#include <asm/pgtable_types.h>
21843 +#else
21844 +#define ktla_ktva(addr) addr
21845 +#endif
21846
21847 /* Verify next sizeof(t) bytes can be on the same instruction */
21848 #define validate_next(t, insn, n) \
21849 @@ -49,8 +54,8 @@
21850 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21851 {
21852 memset(insn, 0, sizeof(*insn));
21853 - insn->kaddr = kaddr;
21854 - insn->next_byte = kaddr;
21855 + insn->kaddr = ktla_ktva(kaddr);
21856 + insn->next_byte = ktla_ktva(kaddr);
21857 insn->x86_64 = x86_64 ? 1 : 0;
21858 insn->opnd_bytes = 4;
21859 if (x86_64)
21860 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21861 index 05a95e7..326f2fa 100644
21862 --- a/arch/x86/lib/iomap_copy_64.S
21863 +++ b/arch/x86/lib/iomap_copy_64.S
21864 @@ -17,6 +17,7 @@
21865
21866 #include <linux/linkage.h>
21867 #include <asm/dwarf2.h>
21868 +#include <asm/alternative-asm.h>
21869
21870 /*
21871 * override generic version in lib/iomap_copy.c
21872 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21873 CFI_STARTPROC
21874 movl %edx,%ecx
21875 rep movsd
21876 + pax_force_retaddr
21877 ret
21878 CFI_ENDPROC
21879 ENDPROC(__iowrite32_copy)
21880 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21881 index 1c273be..da9cc0e 100644
21882 --- a/arch/x86/lib/memcpy_64.S
21883 +++ b/arch/x86/lib/memcpy_64.S
21884 @@ -33,6 +33,7 @@
21885 rep movsq
21886 movl %edx, %ecx
21887 rep movsb
21888 + pax_force_retaddr
21889 ret
21890 .Lmemcpy_e:
21891 .previous
21892 @@ -49,6 +50,7 @@
21893 movq %rdi, %rax
21894 movq %rdx, %rcx
21895 rep movsb
21896 + pax_force_retaddr
21897 ret
21898 .Lmemcpy_e_e:
21899 .previous
21900 @@ -76,13 +78,13 @@ ENTRY(memcpy)
21901 */
21902 movq 0*8(%rsi), %r8
21903 movq 1*8(%rsi), %r9
21904 - movq 2*8(%rsi), %r10
21905 + movq 2*8(%rsi), %rcx
21906 movq 3*8(%rsi), %r11
21907 leaq 4*8(%rsi), %rsi
21908
21909 movq %r8, 0*8(%rdi)
21910 movq %r9, 1*8(%rdi)
21911 - movq %r10, 2*8(%rdi)
21912 + movq %rcx, 2*8(%rdi)
21913 movq %r11, 3*8(%rdi)
21914 leaq 4*8(%rdi), %rdi
21915 jae .Lcopy_forward_loop
21916 @@ -105,12 +107,12 @@ ENTRY(memcpy)
21917 subq $0x20, %rdx
21918 movq -1*8(%rsi), %r8
21919 movq -2*8(%rsi), %r9
21920 - movq -3*8(%rsi), %r10
21921 + movq -3*8(%rsi), %rcx
21922 movq -4*8(%rsi), %r11
21923 leaq -4*8(%rsi), %rsi
21924 movq %r8, -1*8(%rdi)
21925 movq %r9, -2*8(%rdi)
21926 - movq %r10, -3*8(%rdi)
21927 + movq %rcx, -3*8(%rdi)
21928 movq %r11, -4*8(%rdi)
21929 leaq -4*8(%rdi), %rdi
21930 jae .Lcopy_backward_loop
21931 @@ -130,12 +132,13 @@ ENTRY(memcpy)
21932 */
21933 movq 0*8(%rsi), %r8
21934 movq 1*8(%rsi), %r9
21935 - movq -2*8(%rsi, %rdx), %r10
21936 + movq -2*8(%rsi, %rdx), %rcx
21937 movq -1*8(%rsi, %rdx), %r11
21938 movq %r8, 0*8(%rdi)
21939 movq %r9, 1*8(%rdi)
21940 - movq %r10, -2*8(%rdi, %rdx)
21941 + movq %rcx, -2*8(%rdi, %rdx)
21942 movq %r11, -1*8(%rdi, %rdx)
21943 + pax_force_retaddr
21944 retq
21945 .p2align 4
21946 .Lless_16bytes:
21947 @@ -148,6 +151,7 @@ ENTRY(memcpy)
21948 movq -1*8(%rsi, %rdx), %r9
21949 movq %r8, 0*8(%rdi)
21950 movq %r9, -1*8(%rdi, %rdx)
21951 + pax_force_retaddr
21952 retq
21953 .p2align 4
21954 .Lless_8bytes:
21955 @@ -161,6 +165,7 @@ ENTRY(memcpy)
21956 movl -4(%rsi, %rdx), %r8d
21957 movl %ecx, (%rdi)
21958 movl %r8d, -4(%rdi, %rdx)
21959 + pax_force_retaddr
21960 retq
21961 .p2align 4
21962 .Lless_3bytes:
21963 @@ -179,6 +184,7 @@ ENTRY(memcpy)
21964 movb %cl, (%rdi)
21965
21966 .Lend:
21967 + pax_force_retaddr
21968 retq
21969 CFI_ENDPROC
21970 ENDPROC(memcpy)
21971 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
21972 index ee16461..c39c199 100644
21973 --- a/arch/x86/lib/memmove_64.S
21974 +++ b/arch/x86/lib/memmove_64.S
21975 @@ -61,13 +61,13 @@ ENTRY(memmove)
21976 5:
21977 sub $0x20, %rdx
21978 movq 0*8(%rsi), %r11
21979 - movq 1*8(%rsi), %r10
21980 + movq 1*8(%rsi), %rcx
21981 movq 2*8(%rsi), %r9
21982 movq 3*8(%rsi), %r8
21983 leaq 4*8(%rsi), %rsi
21984
21985 movq %r11, 0*8(%rdi)
21986 - movq %r10, 1*8(%rdi)
21987 + movq %rcx, 1*8(%rdi)
21988 movq %r9, 2*8(%rdi)
21989 movq %r8, 3*8(%rdi)
21990 leaq 4*8(%rdi), %rdi
21991 @@ -81,10 +81,10 @@ ENTRY(memmove)
21992 4:
21993 movq %rdx, %rcx
21994 movq -8(%rsi, %rdx), %r11
21995 - lea -8(%rdi, %rdx), %r10
21996 + lea -8(%rdi, %rdx), %r9
21997 shrq $3, %rcx
21998 rep movsq
21999 - movq %r11, (%r10)
22000 + movq %r11, (%r9)
22001 jmp 13f
22002 .Lmemmove_end_forward:
22003
22004 @@ -95,14 +95,14 @@ ENTRY(memmove)
22005 7:
22006 movq %rdx, %rcx
22007 movq (%rsi), %r11
22008 - movq %rdi, %r10
22009 + movq %rdi, %r9
22010 leaq -8(%rsi, %rdx), %rsi
22011 leaq -8(%rdi, %rdx), %rdi
22012 shrq $3, %rcx
22013 std
22014 rep movsq
22015 cld
22016 - movq %r11, (%r10)
22017 + movq %r11, (%r9)
22018 jmp 13f
22019
22020 /*
22021 @@ -127,13 +127,13 @@ ENTRY(memmove)
22022 8:
22023 subq $0x20, %rdx
22024 movq -1*8(%rsi), %r11
22025 - movq -2*8(%rsi), %r10
22026 + movq -2*8(%rsi), %rcx
22027 movq -3*8(%rsi), %r9
22028 movq -4*8(%rsi), %r8
22029 leaq -4*8(%rsi), %rsi
22030
22031 movq %r11, -1*8(%rdi)
22032 - movq %r10, -2*8(%rdi)
22033 + movq %rcx, -2*8(%rdi)
22034 movq %r9, -3*8(%rdi)
22035 movq %r8, -4*8(%rdi)
22036 leaq -4*8(%rdi), %rdi
22037 @@ -151,11 +151,11 @@ ENTRY(memmove)
22038 * Move data from 16 bytes to 31 bytes.
22039 */
22040 movq 0*8(%rsi), %r11
22041 - movq 1*8(%rsi), %r10
22042 + movq 1*8(%rsi), %rcx
22043 movq -2*8(%rsi, %rdx), %r9
22044 movq -1*8(%rsi, %rdx), %r8
22045 movq %r11, 0*8(%rdi)
22046 - movq %r10, 1*8(%rdi)
22047 + movq %rcx, 1*8(%rdi)
22048 movq %r9, -2*8(%rdi, %rdx)
22049 movq %r8, -1*8(%rdi, %rdx)
22050 jmp 13f
22051 @@ -167,9 +167,9 @@ ENTRY(memmove)
22052 * Move data from 8 bytes to 15 bytes.
22053 */
22054 movq 0*8(%rsi), %r11
22055 - movq -1*8(%rsi, %rdx), %r10
22056 + movq -1*8(%rsi, %rdx), %r9
22057 movq %r11, 0*8(%rdi)
22058 - movq %r10, -1*8(%rdi, %rdx)
22059 + movq %r9, -1*8(%rdi, %rdx)
22060 jmp 13f
22061 10:
22062 cmpq $4, %rdx
22063 @@ -178,9 +178,9 @@ ENTRY(memmove)
22064 * Move data from 4 bytes to 7 bytes.
22065 */
22066 movl (%rsi), %r11d
22067 - movl -4(%rsi, %rdx), %r10d
22068 + movl -4(%rsi, %rdx), %r9d
22069 movl %r11d, (%rdi)
22070 - movl %r10d, -4(%rdi, %rdx)
22071 + movl %r9d, -4(%rdi, %rdx)
22072 jmp 13f
22073 11:
22074 cmp $2, %rdx
22075 @@ -189,9 +189,9 @@ ENTRY(memmove)
22076 * Move data from 2 bytes to 3 bytes.
22077 */
22078 movw (%rsi), %r11w
22079 - movw -2(%rsi, %rdx), %r10w
22080 + movw -2(%rsi, %rdx), %r9w
22081 movw %r11w, (%rdi)
22082 - movw %r10w, -2(%rdi, %rdx)
22083 + movw %r9w, -2(%rdi, %rdx)
22084 jmp 13f
22085 12:
22086 cmp $1, %rdx
22087 @@ -202,6 +202,7 @@ ENTRY(memmove)
22088 movb (%rsi), %r11b
22089 movb %r11b, (%rdi)
22090 13:
22091 + pax_force_retaddr
22092 retq
22093 CFI_ENDPROC
22094
22095 @@ -210,6 +211,7 @@ ENTRY(memmove)
22096 /* Forward moving data. */
22097 movq %rdx, %rcx
22098 rep movsb
22099 + pax_force_retaddr
22100 retq
22101 .Lmemmove_end_forward_efs:
22102 .previous
22103 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22104 index 2dcb380..963660a 100644
22105 --- a/arch/x86/lib/memset_64.S
22106 +++ b/arch/x86/lib/memset_64.S
22107 @@ -30,6 +30,7 @@
22108 movl %edx,%ecx
22109 rep stosb
22110 movq %r9,%rax
22111 + pax_force_retaddr
22112 ret
22113 .Lmemset_e:
22114 .previous
22115 @@ -52,6 +53,7 @@
22116 movq %rdx,%rcx
22117 rep stosb
22118 movq %r9,%rax
22119 + pax_force_retaddr
22120 ret
22121 .Lmemset_e_e:
22122 .previous
22123 @@ -59,7 +61,7 @@
22124 ENTRY(memset)
22125 ENTRY(__memset)
22126 CFI_STARTPROC
22127 - movq %rdi,%r10
22128 + movq %rdi,%r11
22129
22130 /* expand byte value */
22131 movzbl %sil,%ecx
22132 @@ -117,7 +119,8 @@ ENTRY(__memset)
22133 jnz .Lloop_1
22134
22135 .Lende:
22136 - movq %r10,%rax
22137 + movq %r11,%rax
22138 + pax_force_retaddr
22139 ret
22140
22141 CFI_RESTORE_STATE
22142 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22143 index c9f2d9b..e7fd2c0 100644
22144 --- a/arch/x86/lib/mmx_32.c
22145 +++ b/arch/x86/lib/mmx_32.c
22146 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22147 {
22148 void *p;
22149 int i;
22150 + unsigned long cr0;
22151
22152 if (unlikely(in_interrupt()))
22153 return __memcpy(to, from, len);
22154 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22155 kernel_fpu_begin();
22156
22157 __asm__ __volatile__ (
22158 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22159 - " prefetch 64(%0)\n"
22160 - " prefetch 128(%0)\n"
22161 - " prefetch 192(%0)\n"
22162 - " prefetch 256(%0)\n"
22163 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22164 + " prefetch 64(%1)\n"
22165 + " prefetch 128(%1)\n"
22166 + " prefetch 192(%1)\n"
22167 + " prefetch 256(%1)\n"
22168 "2: \n"
22169 ".section .fixup, \"ax\"\n"
22170 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22171 + "3: \n"
22172 +
22173 +#ifdef CONFIG_PAX_KERNEXEC
22174 + " movl %%cr0, %0\n"
22175 + " movl %0, %%eax\n"
22176 + " andl $0xFFFEFFFF, %%eax\n"
22177 + " movl %%eax, %%cr0\n"
22178 +#endif
22179 +
22180 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22181 +
22182 +#ifdef CONFIG_PAX_KERNEXEC
22183 + " movl %0, %%cr0\n"
22184 +#endif
22185 +
22186 " jmp 2b\n"
22187 ".previous\n"
22188 _ASM_EXTABLE(1b, 3b)
22189 - : : "r" (from));
22190 + : "=&r" (cr0) : "r" (from) : "ax");
22191
22192 for ( ; i > 5; i--) {
22193 __asm__ __volatile__ (
22194 - "1: prefetch 320(%0)\n"
22195 - "2: movq (%0), %%mm0\n"
22196 - " movq 8(%0), %%mm1\n"
22197 - " movq 16(%0), %%mm2\n"
22198 - " movq 24(%0), %%mm3\n"
22199 - " movq %%mm0, (%1)\n"
22200 - " movq %%mm1, 8(%1)\n"
22201 - " movq %%mm2, 16(%1)\n"
22202 - " movq %%mm3, 24(%1)\n"
22203 - " movq 32(%0), %%mm0\n"
22204 - " movq 40(%0), %%mm1\n"
22205 - " movq 48(%0), %%mm2\n"
22206 - " movq 56(%0), %%mm3\n"
22207 - " movq %%mm0, 32(%1)\n"
22208 - " movq %%mm1, 40(%1)\n"
22209 - " movq %%mm2, 48(%1)\n"
22210 - " movq %%mm3, 56(%1)\n"
22211 + "1: prefetch 320(%1)\n"
22212 + "2: movq (%1), %%mm0\n"
22213 + " movq 8(%1), %%mm1\n"
22214 + " movq 16(%1), %%mm2\n"
22215 + " movq 24(%1), %%mm3\n"
22216 + " movq %%mm0, (%2)\n"
22217 + " movq %%mm1, 8(%2)\n"
22218 + " movq %%mm2, 16(%2)\n"
22219 + " movq %%mm3, 24(%2)\n"
22220 + " movq 32(%1), %%mm0\n"
22221 + " movq 40(%1), %%mm1\n"
22222 + " movq 48(%1), %%mm2\n"
22223 + " movq 56(%1), %%mm3\n"
22224 + " movq %%mm0, 32(%2)\n"
22225 + " movq %%mm1, 40(%2)\n"
22226 + " movq %%mm2, 48(%2)\n"
22227 + " movq %%mm3, 56(%2)\n"
22228 ".section .fixup, \"ax\"\n"
22229 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22230 + "3:\n"
22231 +
22232 +#ifdef CONFIG_PAX_KERNEXEC
22233 + " movl %%cr0, %0\n"
22234 + " movl %0, %%eax\n"
22235 + " andl $0xFFFEFFFF, %%eax\n"
22236 + " movl %%eax, %%cr0\n"
22237 +#endif
22238 +
22239 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22240 +
22241 +#ifdef CONFIG_PAX_KERNEXEC
22242 + " movl %0, %%cr0\n"
22243 +#endif
22244 +
22245 " jmp 2b\n"
22246 ".previous\n"
22247 _ASM_EXTABLE(1b, 3b)
22248 - : : "r" (from), "r" (to) : "memory");
22249 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22250
22251 from += 64;
22252 to += 64;
22253 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22254 static void fast_copy_page(void *to, void *from)
22255 {
22256 int i;
22257 + unsigned long cr0;
22258
22259 kernel_fpu_begin();
22260
22261 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22262 * but that is for later. -AV
22263 */
22264 __asm__ __volatile__(
22265 - "1: prefetch (%0)\n"
22266 - " prefetch 64(%0)\n"
22267 - " prefetch 128(%0)\n"
22268 - " prefetch 192(%0)\n"
22269 - " prefetch 256(%0)\n"
22270 + "1: prefetch (%1)\n"
22271 + " prefetch 64(%1)\n"
22272 + " prefetch 128(%1)\n"
22273 + " prefetch 192(%1)\n"
22274 + " prefetch 256(%1)\n"
22275 "2: \n"
22276 ".section .fixup, \"ax\"\n"
22277 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22278 + "3: \n"
22279 +
22280 +#ifdef CONFIG_PAX_KERNEXEC
22281 + " movl %%cr0, %0\n"
22282 + " movl %0, %%eax\n"
22283 + " andl $0xFFFEFFFF, %%eax\n"
22284 + " movl %%eax, %%cr0\n"
22285 +#endif
22286 +
22287 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22288 +
22289 +#ifdef CONFIG_PAX_KERNEXEC
22290 + " movl %0, %%cr0\n"
22291 +#endif
22292 +
22293 " jmp 2b\n"
22294 ".previous\n"
22295 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22296 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22297
22298 for (i = 0; i < (4096-320)/64; i++) {
22299 __asm__ __volatile__ (
22300 - "1: prefetch 320(%0)\n"
22301 - "2: movq (%0), %%mm0\n"
22302 - " movntq %%mm0, (%1)\n"
22303 - " movq 8(%0), %%mm1\n"
22304 - " movntq %%mm1, 8(%1)\n"
22305 - " movq 16(%0), %%mm2\n"
22306 - " movntq %%mm2, 16(%1)\n"
22307 - " movq 24(%0), %%mm3\n"
22308 - " movntq %%mm3, 24(%1)\n"
22309 - " movq 32(%0), %%mm4\n"
22310 - " movntq %%mm4, 32(%1)\n"
22311 - " movq 40(%0), %%mm5\n"
22312 - " movntq %%mm5, 40(%1)\n"
22313 - " movq 48(%0), %%mm6\n"
22314 - " movntq %%mm6, 48(%1)\n"
22315 - " movq 56(%0), %%mm7\n"
22316 - " movntq %%mm7, 56(%1)\n"
22317 + "1: prefetch 320(%1)\n"
22318 + "2: movq (%1), %%mm0\n"
22319 + " movntq %%mm0, (%2)\n"
22320 + " movq 8(%1), %%mm1\n"
22321 + " movntq %%mm1, 8(%2)\n"
22322 + " movq 16(%1), %%mm2\n"
22323 + " movntq %%mm2, 16(%2)\n"
22324 + " movq 24(%1), %%mm3\n"
22325 + " movntq %%mm3, 24(%2)\n"
22326 + " movq 32(%1), %%mm4\n"
22327 + " movntq %%mm4, 32(%2)\n"
22328 + " movq 40(%1), %%mm5\n"
22329 + " movntq %%mm5, 40(%2)\n"
22330 + " movq 48(%1), %%mm6\n"
22331 + " movntq %%mm6, 48(%2)\n"
22332 + " movq 56(%1), %%mm7\n"
22333 + " movntq %%mm7, 56(%2)\n"
22334 ".section .fixup, \"ax\"\n"
22335 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22336 + "3:\n"
22337 +
22338 +#ifdef CONFIG_PAX_KERNEXEC
22339 + " movl %%cr0, %0\n"
22340 + " movl %0, %%eax\n"
22341 + " andl $0xFFFEFFFF, %%eax\n"
22342 + " movl %%eax, %%cr0\n"
22343 +#endif
22344 +
22345 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22346 +
22347 +#ifdef CONFIG_PAX_KERNEXEC
22348 + " movl %0, %%cr0\n"
22349 +#endif
22350 +
22351 " jmp 2b\n"
22352 ".previous\n"
22353 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22354 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22355
22356 from += 64;
22357 to += 64;
22358 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22359 static void fast_copy_page(void *to, void *from)
22360 {
22361 int i;
22362 + unsigned long cr0;
22363
22364 kernel_fpu_begin();
22365
22366 __asm__ __volatile__ (
22367 - "1: prefetch (%0)\n"
22368 - " prefetch 64(%0)\n"
22369 - " prefetch 128(%0)\n"
22370 - " prefetch 192(%0)\n"
22371 - " prefetch 256(%0)\n"
22372 + "1: prefetch (%1)\n"
22373 + " prefetch 64(%1)\n"
22374 + " prefetch 128(%1)\n"
22375 + " prefetch 192(%1)\n"
22376 + " prefetch 256(%1)\n"
22377 "2: \n"
22378 ".section .fixup, \"ax\"\n"
22379 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22380 + "3: \n"
22381 +
22382 +#ifdef CONFIG_PAX_KERNEXEC
22383 + " movl %%cr0, %0\n"
22384 + " movl %0, %%eax\n"
22385 + " andl $0xFFFEFFFF, %%eax\n"
22386 + " movl %%eax, %%cr0\n"
22387 +#endif
22388 +
22389 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22390 +
22391 +#ifdef CONFIG_PAX_KERNEXEC
22392 + " movl %0, %%cr0\n"
22393 +#endif
22394 +
22395 " jmp 2b\n"
22396 ".previous\n"
22397 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22398 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22399
22400 for (i = 0; i < 4096/64; i++) {
22401 __asm__ __volatile__ (
22402 - "1: prefetch 320(%0)\n"
22403 - "2: movq (%0), %%mm0\n"
22404 - " movq 8(%0), %%mm1\n"
22405 - " movq 16(%0), %%mm2\n"
22406 - " movq 24(%0), %%mm3\n"
22407 - " movq %%mm0, (%1)\n"
22408 - " movq %%mm1, 8(%1)\n"
22409 - " movq %%mm2, 16(%1)\n"
22410 - " movq %%mm3, 24(%1)\n"
22411 - " movq 32(%0), %%mm0\n"
22412 - " movq 40(%0), %%mm1\n"
22413 - " movq 48(%0), %%mm2\n"
22414 - " movq 56(%0), %%mm3\n"
22415 - " movq %%mm0, 32(%1)\n"
22416 - " movq %%mm1, 40(%1)\n"
22417 - " movq %%mm2, 48(%1)\n"
22418 - " movq %%mm3, 56(%1)\n"
22419 + "1: prefetch 320(%1)\n"
22420 + "2: movq (%1), %%mm0\n"
22421 + " movq 8(%1), %%mm1\n"
22422 + " movq 16(%1), %%mm2\n"
22423 + " movq 24(%1), %%mm3\n"
22424 + " movq %%mm0, (%2)\n"
22425 + " movq %%mm1, 8(%2)\n"
22426 + " movq %%mm2, 16(%2)\n"
22427 + " movq %%mm3, 24(%2)\n"
22428 + " movq 32(%1), %%mm0\n"
22429 + " movq 40(%1), %%mm1\n"
22430 + " movq 48(%1), %%mm2\n"
22431 + " movq 56(%1), %%mm3\n"
22432 + " movq %%mm0, 32(%2)\n"
22433 + " movq %%mm1, 40(%2)\n"
22434 + " movq %%mm2, 48(%2)\n"
22435 + " movq %%mm3, 56(%2)\n"
22436 ".section .fixup, \"ax\"\n"
22437 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22438 + "3:\n"
22439 +
22440 +#ifdef CONFIG_PAX_KERNEXEC
22441 + " movl %%cr0, %0\n"
22442 + " movl %0, %%eax\n"
22443 + " andl $0xFFFEFFFF, %%eax\n"
22444 + " movl %%eax, %%cr0\n"
22445 +#endif
22446 +
22447 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22448 +
22449 +#ifdef CONFIG_PAX_KERNEXEC
22450 + " movl %0, %%cr0\n"
22451 +#endif
22452 +
22453 " jmp 2b\n"
22454 ".previous\n"
22455 _ASM_EXTABLE(1b, 3b)
22456 - : : "r" (from), "r" (to) : "memory");
22457 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22458
22459 from += 64;
22460 to += 64;
22461 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22462 index 69fa106..adda88b 100644
22463 --- a/arch/x86/lib/msr-reg.S
22464 +++ b/arch/x86/lib/msr-reg.S
22465 @@ -3,6 +3,7 @@
22466 #include <asm/dwarf2.h>
22467 #include <asm/asm.h>
22468 #include <asm/msr.h>
22469 +#include <asm/alternative-asm.h>
22470
22471 #ifdef CONFIG_X86_64
22472 /*
22473 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22474 CFI_STARTPROC
22475 pushq_cfi %rbx
22476 pushq_cfi %rbp
22477 - movq %rdi, %r10 /* Save pointer */
22478 + movq %rdi, %r9 /* Save pointer */
22479 xorl %r11d, %r11d /* Return value */
22480 movl (%rdi), %eax
22481 movl 4(%rdi), %ecx
22482 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22483 movl 28(%rdi), %edi
22484 CFI_REMEMBER_STATE
22485 1: \op
22486 -2: movl %eax, (%r10)
22487 +2: movl %eax, (%r9)
22488 movl %r11d, %eax /* Return value */
22489 - movl %ecx, 4(%r10)
22490 - movl %edx, 8(%r10)
22491 - movl %ebx, 12(%r10)
22492 - movl %ebp, 20(%r10)
22493 - movl %esi, 24(%r10)
22494 - movl %edi, 28(%r10)
22495 + movl %ecx, 4(%r9)
22496 + movl %edx, 8(%r9)
22497 + movl %ebx, 12(%r9)
22498 + movl %ebp, 20(%r9)
22499 + movl %esi, 24(%r9)
22500 + movl %edi, 28(%r9)
22501 popq_cfi %rbp
22502 popq_cfi %rbx
22503 + pax_force_retaddr
22504 ret
22505 3:
22506 CFI_RESTORE_STATE
22507 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22508 index 36b0d15..d381858 100644
22509 --- a/arch/x86/lib/putuser.S
22510 +++ b/arch/x86/lib/putuser.S
22511 @@ -15,7 +15,9 @@
22512 #include <asm/thread_info.h>
22513 #include <asm/errno.h>
22514 #include <asm/asm.h>
22515 -
22516 +#include <asm/segment.h>
22517 +#include <asm/pgtable.h>
22518 +#include <asm/alternative-asm.h>
22519
22520 /*
22521 * __put_user_X
22522 @@ -29,52 +31,119 @@
22523 * as they get called from within inline assembly.
22524 */
22525
22526 -#define ENTER CFI_STARTPROC ; \
22527 - GET_THREAD_INFO(%_ASM_BX)
22528 -#define EXIT ret ; \
22529 +#define ENTER CFI_STARTPROC
22530 +#define EXIT pax_force_retaddr; ret ; \
22531 CFI_ENDPROC
22532
22533 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22534 +#define _DEST %_ASM_CX,%_ASM_BX
22535 +#else
22536 +#define _DEST %_ASM_CX
22537 +#endif
22538 +
22539 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22540 +#define __copyuser_seg gs;
22541 +#else
22542 +#define __copyuser_seg
22543 +#endif
22544 +
22545 .text
22546 ENTRY(__put_user_1)
22547 ENTER
22548 +
22549 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22550 + GET_THREAD_INFO(%_ASM_BX)
22551 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22552 jae bad_put_user
22553 -1: movb %al,(%_ASM_CX)
22554 +
22555 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22556 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22557 + cmp %_ASM_BX,%_ASM_CX
22558 + jb 1234f
22559 + xor %ebx,%ebx
22560 +1234:
22561 +#endif
22562 +
22563 +#endif
22564 +
22565 +1: __copyuser_seg movb %al,(_DEST)
22566 xor %eax,%eax
22567 EXIT
22568 ENDPROC(__put_user_1)
22569
22570 ENTRY(__put_user_2)
22571 ENTER
22572 +
22573 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22574 + GET_THREAD_INFO(%_ASM_BX)
22575 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22576 sub $1,%_ASM_BX
22577 cmp %_ASM_BX,%_ASM_CX
22578 jae bad_put_user
22579 -2: movw %ax,(%_ASM_CX)
22580 +
22581 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22582 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22583 + cmp %_ASM_BX,%_ASM_CX
22584 + jb 1234f
22585 + xor %ebx,%ebx
22586 +1234:
22587 +#endif
22588 +
22589 +#endif
22590 +
22591 +2: __copyuser_seg movw %ax,(_DEST)
22592 xor %eax,%eax
22593 EXIT
22594 ENDPROC(__put_user_2)
22595
22596 ENTRY(__put_user_4)
22597 ENTER
22598 +
22599 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22600 + GET_THREAD_INFO(%_ASM_BX)
22601 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22602 sub $3,%_ASM_BX
22603 cmp %_ASM_BX,%_ASM_CX
22604 jae bad_put_user
22605 -3: movl %eax,(%_ASM_CX)
22606 +
22607 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22608 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22609 + cmp %_ASM_BX,%_ASM_CX
22610 + jb 1234f
22611 + xor %ebx,%ebx
22612 +1234:
22613 +#endif
22614 +
22615 +#endif
22616 +
22617 +3: __copyuser_seg movl %eax,(_DEST)
22618 xor %eax,%eax
22619 EXIT
22620 ENDPROC(__put_user_4)
22621
22622 ENTRY(__put_user_8)
22623 ENTER
22624 +
22625 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22626 + GET_THREAD_INFO(%_ASM_BX)
22627 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22628 sub $7,%_ASM_BX
22629 cmp %_ASM_BX,%_ASM_CX
22630 jae bad_put_user
22631 -4: mov %_ASM_AX,(%_ASM_CX)
22632 +
22633 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22634 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22635 + cmp %_ASM_BX,%_ASM_CX
22636 + jb 1234f
22637 + xor %ebx,%ebx
22638 +1234:
22639 +#endif
22640 +
22641 +#endif
22642 +
22643 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22644 #ifdef CONFIG_X86_32
22645 -5: movl %edx,4(%_ASM_CX)
22646 +5: __copyuser_seg movl %edx,4(_DEST)
22647 #endif
22648 xor %eax,%eax
22649 EXIT
22650 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22651 index 1cad221..de671ee 100644
22652 --- a/arch/x86/lib/rwlock.S
22653 +++ b/arch/x86/lib/rwlock.S
22654 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22655 FRAME
22656 0: LOCK_PREFIX
22657 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22658 +
22659 +#ifdef CONFIG_PAX_REFCOUNT
22660 + jno 1234f
22661 + LOCK_PREFIX
22662 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22663 + int $4
22664 +1234:
22665 + _ASM_EXTABLE(1234b, 1234b)
22666 +#endif
22667 +
22668 1: rep; nop
22669 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22670 jne 1b
22671 LOCK_PREFIX
22672 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22673 +
22674 +#ifdef CONFIG_PAX_REFCOUNT
22675 + jno 1234f
22676 + LOCK_PREFIX
22677 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22678 + int $4
22679 +1234:
22680 + _ASM_EXTABLE(1234b, 1234b)
22681 +#endif
22682 +
22683 jnz 0b
22684 ENDFRAME
22685 + pax_force_retaddr
22686 ret
22687 CFI_ENDPROC
22688 END(__write_lock_failed)
22689 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22690 FRAME
22691 0: LOCK_PREFIX
22692 READ_LOCK_SIZE(inc) (%__lock_ptr)
22693 +
22694 +#ifdef CONFIG_PAX_REFCOUNT
22695 + jno 1234f
22696 + LOCK_PREFIX
22697 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22698 + int $4
22699 +1234:
22700 + _ASM_EXTABLE(1234b, 1234b)
22701 +#endif
22702 +
22703 1: rep; nop
22704 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22705 js 1b
22706 LOCK_PREFIX
22707 READ_LOCK_SIZE(dec) (%__lock_ptr)
22708 +
22709 +#ifdef CONFIG_PAX_REFCOUNT
22710 + jno 1234f
22711 + LOCK_PREFIX
22712 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22713 + int $4
22714 +1234:
22715 + _ASM_EXTABLE(1234b, 1234b)
22716 +#endif
22717 +
22718 js 0b
22719 ENDFRAME
22720 + pax_force_retaddr
22721 ret
22722 CFI_ENDPROC
22723 END(__read_lock_failed)
22724 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22725 index 5dff5f0..cadebf4 100644
22726 --- a/arch/x86/lib/rwsem.S
22727 +++ b/arch/x86/lib/rwsem.S
22728 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22729 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22730 CFI_RESTORE __ASM_REG(dx)
22731 restore_common_regs
22732 + pax_force_retaddr
22733 ret
22734 CFI_ENDPROC
22735 ENDPROC(call_rwsem_down_read_failed)
22736 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22737 movq %rax,%rdi
22738 call rwsem_down_write_failed
22739 restore_common_regs
22740 + pax_force_retaddr
22741 ret
22742 CFI_ENDPROC
22743 ENDPROC(call_rwsem_down_write_failed)
22744 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22745 movq %rax,%rdi
22746 call rwsem_wake
22747 restore_common_regs
22748 -1: ret
22749 +1: pax_force_retaddr
22750 + ret
22751 CFI_ENDPROC
22752 ENDPROC(call_rwsem_wake)
22753
22754 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22755 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22756 CFI_RESTORE __ASM_REG(dx)
22757 restore_common_regs
22758 + pax_force_retaddr
22759 ret
22760 CFI_ENDPROC
22761 ENDPROC(call_rwsem_downgrade_wake)
22762 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22763 index a63efd6..ccecad8 100644
22764 --- a/arch/x86/lib/thunk_64.S
22765 +++ b/arch/x86/lib/thunk_64.S
22766 @@ -8,6 +8,7 @@
22767 #include <linux/linkage.h>
22768 #include <asm/dwarf2.h>
22769 #include <asm/calling.h>
22770 +#include <asm/alternative-asm.h>
22771
22772 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22773 .macro THUNK name, func, put_ret_addr_in_rdi=0
22774 @@ -41,5 +42,6 @@
22775 SAVE_ARGS
22776 restore:
22777 RESTORE_ARGS
22778 + pax_force_retaddr
22779 ret
22780 CFI_ENDPROC
22781 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22782 index ef2a6a5..3b28862 100644
22783 --- a/arch/x86/lib/usercopy_32.c
22784 +++ b/arch/x86/lib/usercopy_32.c
22785 @@ -41,10 +41,12 @@ do { \
22786 int __d0; \
22787 might_fault(); \
22788 __asm__ __volatile__( \
22789 + __COPYUSER_SET_ES \
22790 "0: rep; stosl\n" \
22791 " movl %2,%0\n" \
22792 "1: rep; stosb\n" \
22793 "2:\n" \
22794 + __COPYUSER_RESTORE_ES \
22795 ".section .fixup,\"ax\"\n" \
22796 "3: lea 0(%2,%0,4),%0\n" \
22797 " jmp 2b\n" \
22798 @@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
22799 might_fault();
22800
22801 __asm__ __volatile__(
22802 + __COPYUSER_SET_ES
22803 " testl %0, %0\n"
22804 " jz 3f\n"
22805 " andl %0,%%ecx\n"
22806 @@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
22807 " subl %%ecx,%0\n"
22808 " addl %0,%%eax\n"
22809 "1:\n"
22810 + __COPYUSER_RESTORE_ES
22811 ".section .fixup,\"ax\"\n"
22812 "2: xorl %%eax,%%eax\n"
22813 " jmp 1b\n"
22814 @@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
22815
22816 #ifdef CONFIG_X86_INTEL_USERCOPY
22817 static unsigned long
22818 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22819 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22820 {
22821 int d0, d1;
22822 __asm__ __volatile__(
22823 @@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22824 " .align 2,0x90\n"
22825 "3: movl 0(%4), %%eax\n"
22826 "4: movl 4(%4), %%edx\n"
22827 - "5: movl %%eax, 0(%3)\n"
22828 - "6: movl %%edx, 4(%3)\n"
22829 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22830 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22831 "7: movl 8(%4), %%eax\n"
22832 "8: movl 12(%4),%%edx\n"
22833 - "9: movl %%eax, 8(%3)\n"
22834 - "10: movl %%edx, 12(%3)\n"
22835 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22836 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22837 "11: movl 16(%4), %%eax\n"
22838 "12: movl 20(%4), %%edx\n"
22839 - "13: movl %%eax, 16(%3)\n"
22840 - "14: movl %%edx, 20(%3)\n"
22841 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22842 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22843 "15: movl 24(%4), %%eax\n"
22844 "16: movl 28(%4), %%edx\n"
22845 - "17: movl %%eax, 24(%3)\n"
22846 - "18: movl %%edx, 28(%3)\n"
22847 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22848 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22849 "19: movl 32(%4), %%eax\n"
22850 "20: movl 36(%4), %%edx\n"
22851 - "21: movl %%eax, 32(%3)\n"
22852 - "22: movl %%edx, 36(%3)\n"
22853 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22854 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22855 "23: movl 40(%4), %%eax\n"
22856 "24: movl 44(%4), %%edx\n"
22857 - "25: movl %%eax, 40(%3)\n"
22858 - "26: movl %%edx, 44(%3)\n"
22859 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22860 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22861 "27: movl 48(%4), %%eax\n"
22862 "28: movl 52(%4), %%edx\n"
22863 - "29: movl %%eax, 48(%3)\n"
22864 - "30: movl %%edx, 52(%3)\n"
22865 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22866 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22867 "31: movl 56(%4), %%eax\n"
22868 "32: movl 60(%4), %%edx\n"
22869 - "33: movl %%eax, 56(%3)\n"
22870 - "34: movl %%edx, 60(%3)\n"
22871 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22872 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22873 " addl $-64, %0\n"
22874 " addl $64, %4\n"
22875 " addl $64, %3\n"
22876 @@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22877 " shrl $2, %0\n"
22878 " andl $3, %%eax\n"
22879 " cld\n"
22880 + __COPYUSER_SET_ES
22881 "99: rep; movsl\n"
22882 "36: movl %%eax, %0\n"
22883 "37: rep; movsb\n"
22884 "100:\n"
22885 + __COPYUSER_RESTORE_ES
22886 ".section .fixup,\"ax\"\n"
22887 "101: lea 0(%%eax,%0,4),%0\n"
22888 " jmp 100b\n"
22889 @@ -247,46 +253,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22890 }
22891
22892 static unsigned long
22893 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22894 +{
22895 + int d0, d1;
22896 + __asm__ __volatile__(
22897 + " .align 2,0x90\n"
22898 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22899 + " cmpl $67, %0\n"
22900 + " jbe 3f\n"
22901 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22902 + " .align 2,0x90\n"
22903 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22904 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22905 + "5: movl %%eax, 0(%3)\n"
22906 + "6: movl %%edx, 4(%3)\n"
22907 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22908 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22909 + "9: movl %%eax, 8(%3)\n"
22910 + "10: movl %%edx, 12(%3)\n"
22911 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22912 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22913 + "13: movl %%eax, 16(%3)\n"
22914 + "14: movl %%edx, 20(%3)\n"
22915 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22916 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22917 + "17: movl %%eax, 24(%3)\n"
22918 + "18: movl %%edx, 28(%3)\n"
22919 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22920 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22921 + "21: movl %%eax, 32(%3)\n"
22922 + "22: movl %%edx, 36(%3)\n"
22923 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22924 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22925 + "25: movl %%eax, 40(%3)\n"
22926 + "26: movl %%edx, 44(%3)\n"
22927 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22928 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22929 + "29: movl %%eax, 48(%3)\n"
22930 + "30: movl %%edx, 52(%3)\n"
22931 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22932 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22933 + "33: movl %%eax, 56(%3)\n"
22934 + "34: movl %%edx, 60(%3)\n"
22935 + " addl $-64, %0\n"
22936 + " addl $64, %4\n"
22937 + " addl $64, %3\n"
22938 + " cmpl $63, %0\n"
22939 + " ja 1b\n"
22940 + "35: movl %0, %%eax\n"
22941 + " shrl $2, %0\n"
22942 + " andl $3, %%eax\n"
22943 + " cld\n"
22944 + "99: rep; "__copyuser_seg" movsl\n"
22945 + "36: movl %%eax, %0\n"
22946 + "37: rep; "__copyuser_seg" movsb\n"
22947 + "100:\n"
22948 + ".section .fixup,\"ax\"\n"
22949 + "101: lea 0(%%eax,%0,4),%0\n"
22950 + " jmp 100b\n"
22951 + ".previous\n"
22952 + ".section __ex_table,\"a\"\n"
22953 + " .align 4\n"
22954 + " .long 1b,100b\n"
22955 + " .long 2b,100b\n"
22956 + " .long 3b,100b\n"
22957 + " .long 4b,100b\n"
22958 + " .long 5b,100b\n"
22959 + " .long 6b,100b\n"
22960 + " .long 7b,100b\n"
22961 + " .long 8b,100b\n"
22962 + " .long 9b,100b\n"
22963 + " .long 10b,100b\n"
22964 + " .long 11b,100b\n"
22965 + " .long 12b,100b\n"
22966 + " .long 13b,100b\n"
22967 + " .long 14b,100b\n"
22968 + " .long 15b,100b\n"
22969 + " .long 16b,100b\n"
22970 + " .long 17b,100b\n"
22971 + " .long 18b,100b\n"
22972 + " .long 19b,100b\n"
22973 + " .long 20b,100b\n"
22974 + " .long 21b,100b\n"
22975 + " .long 22b,100b\n"
22976 + " .long 23b,100b\n"
22977 + " .long 24b,100b\n"
22978 + " .long 25b,100b\n"
22979 + " .long 26b,100b\n"
22980 + " .long 27b,100b\n"
22981 + " .long 28b,100b\n"
22982 + " .long 29b,100b\n"
22983 + " .long 30b,100b\n"
22984 + " .long 31b,100b\n"
22985 + " .long 32b,100b\n"
22986 + " .long 33b,100b\n"
22987 + " .long 34b,100b\n"
22988 + " .long 35b,100b\n"
22989 + " .long 36b,100b\n"
22990 + " .long 37b,100b\n"
22991 + " .long 99b,101b\n"
22992 + ".previous"
22993 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
22994 + : "1"(to), "2"(from), "0"(size)
22995 + : "eax", "edx", "memory");
22996 + return size;
22997 +}
22998 +
22999 +static unsigned long
23000 +__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23001 +static unsigned long
23002 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23003 {
23004 int d0, d1;
23005 __asm__ __volatile__(
23006 " .align 2,0x90\n"
23007 - "0: movl 32(%4), %%eax\n"
23008 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23009 " cmpl $67, %0\n"
23010 " jbe 2f\n"
23011 - "1: movl 64(%4), %%eax\n"
23012 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23013 " .align 2,0x90\n"
23014 - "2: movl 0(%4), %%eax\n"
23015 - "21: movl 4(%4), %%edx\n"
23016 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23017 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23018 " movl %%eax, 0(%3)\n"
23019 " movl %%edx, 4(%3)\n"
23020 - "3: movl 8(%4), %%eax\n"
23021 - "31: movl 12(%4),%%edx\n"
23022 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23023 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23024 " movl %%eax, 8(%3)\n"
23025 " movl %%edx, 12(%3)\n"
23026 - "4: movl 16(%4), %%eax\n"
23027 - "41: movl 20(%4), %%edx\n"
23028 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23029 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23030 " movl %%eax, 16(%3)\n"
23031 " movl %%edx, 20(%3)\n"
23032 - "10: movl 24(%4), %%eax\n"
23033 - "51: movl 28(%4), %%edx\n"
23034 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23035 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23036 " movl %%eax, 24(%3)\n"
23037 " movl %%edx, 28(%3)\n"
23038 - "11: movl 32(%4), %%eax\n"
23039 - "61: movl 36(%4), %%edx\n"
23040 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23041 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23042 " movl %%eax, 32(%3)\n"
23043 " movl %%edx, 36(%3)\n"
23044 - "12: movl 40(%4), %%eax\n"
23045 - "71: movl 44(%4), %%edx\n"
23046 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23047 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23048 " movl %%eax, 40(%3)\n"
23049 " movl %%edx, 44(%3)\n"
23050 - "13: movl 48(%4), %%eax\n"
23051 - "81: movl 52(%4), %%edx\n"
23052 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23053 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23054 " movl %%eax, 48(%3)\n"
23055 " movl %%edx, 52(%3)\n"
23056 - "14: movl 56(%4), %%eax\n"
23057 - "91: movl 60(%4), %%edx\n"
23058 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23059 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23060 " movl %%eax, 56(%3)\n"
23061 " movl %%edx, 60(%3)\n"
23062 " addl $-64, %0\n"
23063 @@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23064 " shrl $2, %0\n"
23065 " andl $3, %%eax\n"
23066 " cld\n"
23067 - "6: rep; movsl\n"
23068 + "6: rep; "__copyuser_seg" movsl\n"
23069 " movl %%eax,%0\n"
23070 - "7: rep; movsb\n"
23071 + "7: rep; "__copyuser_seg" movsb\n"
23072 "8:\n"
23073 ".section .fixup,\"ax\"\n"
23074 "9: lea 0(%%eax,%0,4),%0\n"
23075 @@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23076 */
23077
23078 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23079 + const void __user *from, unsigned long size) __size_overflow(3);
23080 +static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23081 const void __user *from, unsigned long size)
23082 {
23083 int d0, d1;
23084
23085 __asm__ __volatile__(
23086 " .align 2,0x90\n"
23087 - "0: movl 32(%4), %%eax\n"
23088 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23089 " cmpl $67, %0\n"
23090 " jbe 2f\n"
23091 - "1: movl 64(%4), %%eax\n"
23092 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23093 " .align 2,0x90\n"
23094 - "2: movl 0(%4), %%eax\n"
23095 - "21: movl 4(%4), %%edx\n"
23096 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23097 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23098 " movnti %%eax, 0(%3)\n"
23099 " movnti %%edx, 4(%3)\n"
23100 - "3: movl 8(%4), %%eax\n"
23101 - "31: movl 12(%4),%%edx\n"
23102 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23103 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23104 " movnti %%eax, 8(%3)\n"
23105 " movnti %%edx, 12(%3)\n"
23106 - "4: movl 16(%4), %%eax\n"
23107 - "41: movl 20(%4), %%edx\n"
23108 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23109 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23110 " movnti %%eax, 16(%3)\n"
23111 " movnti %%edx, 20(%3)\n"
23112 - "10: movl 24(%4), %%eax\n"
23113 - "51: movl 28(%4), %%edx\n"
23114 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23115 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23116 " movnti %%eax, 24(%3)\n"
23117 " movnti %%edx, 28(%3)\n"
23118 - "11: movl 32(%4), %%eax\n"
23119 - "61: movl 36(%4), %%edx\n"
23120 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23121 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23122 " movnti %%eax, 32(%3)\n"
23123 " movnti %%edx, 36(%3)\n"
23124 - "12: movl 40(%4), %%eax\n"
23125 - "71: movl 44(%4), %%edx\n"
23126 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23127 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23128 " movnti %%eax, 40(%3)\n"
23129 " movnti %%edx, 44(%3)\n"
23130 - "13: movl 48(%4), %%eax\n"
23131 - "81: movl 52(%4), %%edx\n"
23132 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23133 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23134 " movnti %%eax, 48(%3)\n"
23135 " movnti %%edx, 52(%3)\n"
23136 - "14: movl 56(%4), %%eax\n"
23137 - "91: movl 60(%4), %%edx\n"
23138 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23139 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23140 " movnti %%eax, 56(%3)\n"
23141 " movnti %%edx, 60(%3)\n"
23142 " addl $-64, %0\n"
23143 @@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23144 " shrl $2, %0\n"
23145 " andl $3, %%eax\n"
23146 " cld\n"
23147 - "6: rep; movsl\n"
23148 + "6: rep; "__copyuser_seg" movsl\n"
23149 " movl %%eax,%0\n"
23150 - "7: rep; movsb\n"
23151 + "7: rep; "__copyuser_seg" movsb\n"
23152 "8:\n"
23153 ".section .fixup,\"ax\"\n"
23154 "9: lea 0(%%eax,%0,4),%0\n"
23155 @@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23156 }
23157
23158 static unsigned long __copy_user_intel_nocache(void *to,
23159 + const void __user *from, unsigned long size) __size_overflow(3);
23160 +static unsigned long __copy_user_intel_nocache(void *to,
23161 const void __user *from, unsigned long size)
23162 {
23163 int d0, d1;
23164
23165 __asm__ __volatile__(
23166 " .align 2,0x90\n"
23167 - "0: movl 32(%4), %%eax\n"
23168 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23169 " cmpl $67, %0\n"
23170 " jbe 2f\n"
23171 - "1: movl 64(%4), %%eax\n"
23172 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23173 " .align 2,0x90\n"
23174 - "2: movl 0(%4), %%eax\n"
23175 - "21: movl 4(%4), %%edx\n"
23176 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23177 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23178 " movnti %%eax, 0(%3)\n"
23179 " movnti %%edx, 4(%3)\n"
23180 - "3: movl 8(%4), %%eax\n"
23181 - "31: movl 12(%4),%%edx\n"
23182 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23183 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23184 " movnti %%eax, 8(%3)\n"
23185 " movnti %%edx, 12(%3)\n"
23186 - "4: movl 16(%4), %%eax\n"
23187 - "41: movl 20(%4), %%edx\n"
23188 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23189 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23190 " movnti %%eax, 16(%3)\n"
23191 " movnti %%edx, 20(%3)\n"
23192 - "10: movl 24(%4), %%eax\n"
23193 - "51: movl 28(%4), %%edx\n"
23194 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23195 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23196 " movnti %%eax, 24(%3)\n"
23197 " movnti %%edx, 28(%3)\n"
23198 - "11: movl 32(%4), %%eax\n"
23199 - "61: movl 36(%4), %%edx\n"
23200 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23201 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23202 " movnti %%eax, 32(%3)\n"
23203 " movnti %%edx, 36(%3)\n"
23204 - "12: movl 40(%4), %%eax\n"
23205 - "71: movl 44(%4), %%edx\n"
23206 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23207 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23208 " movnti %%eax, 40(%3)\n"
23209 " movnti %%edx, 44(%3)\n"
23210 - "13: movl 48(%4), %%eax\n"
23211 - "81: movl 52(%4), %%edx\n"
23212 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23213 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23214 " movnti %%eax, 48(%3)\n"
23215 " movnti %%edx, 52(%3)\n"
23216 - "14: movl 56(%4), %%eax\n"
23217 - "91: movl 60(%4), %%edx\n"
23218 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23219 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23220 " movnti %%eax, 56(%3)\n"
23221 " movnti %%edx, 60(%3)\n"
23222 " addl $-64, %0\n"
23223 @@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23224 " shrl $2, %0\n"
23225 " andl $3, %%eax\n"
23226 " cld\n"
23227 - "6: rep; movsl\n"
23228 + "6: rep; "__copyuser_seg" movsl\n"
23229 " movl %%eax,%0\n"
23230 - "7: rep; movsb\n"
23231 + "7: rep; "__copyuser_seg" movsb\n"
23232 "8:\n"
23233 ".section .fixup,\"ax\"\n"
23234 "9: lea 0(%%eax,%0,4),%0\n"
23235 @@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23236 */
23237 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23238 unsigned long size);
23239 -unsigned long __copy_user_intel(void __user *to, const void *from,
23240 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23241 + unsigned long size);
23242 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23243 unsigned long size);
23244 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23245 const void __user *from, unsigned long size);
23246 #endif /* CONFIG_X86_INTEL_USERCOPY */
23247
23248 /* Generic arbitrary sized copy. */
23249 -#define __copy_user(to, from, size) \
23250 +#define __copy_user(to, from, size, prefix, set, restore) \
23251 do { \
23252 int __d0, __d1, __d2; \
23253 __asm__ __volatile__( \
23254 + set \
23255 " cmp $7,%0\n" \
23256 " jbe 1f\n" \
23257 " movl %1,%0\n" \
23258 " negl %0\n" \
23259 " andl $7,%0\n" \
23260 " subl %0,%3\n" \
23261 - "4: rep; movsb\n" \
23262 + "4: rep; "prefix"movsb\n" \
23263 " movl %3,%0\n" \
23264 " shrl $2,%0\n" \
23265 " andl $3,%3\n" \
23266 " .align 2,0x90\n" \
23267 - "0: rep; movsl\n" \
23268 + "0: rep; "prefix"movsl\n" \
23269 " movl %3,%0\n" \
23270 - "1: rep; movsb\n" \
23271 + "1: rep; "prefix"movsb\n" \
23272 "2:\n" \
23273 + restore \
23274 ".section .fixup,\"ax\"\n" \
23275 "5: addl %3,%0\n" \
23276 " jmp 2b\n" \
23277 @@ -595,14 +718,14 @@ do { \
23278 " negl %0\n" \
23279 " andl $7,%0\n" \
23280 " subl %0,%3\n" \
23281 - "4: rep; movsb\n" \
23282 + "4: rep; "__copyuser_seg"movsb\n" \
23283 " movl %3,%0\n" \
23284 " shrl $2,%0\n" \
23285 " andl $3,%3\n" \
23286 " .align 2,0x90\n" \
23287 - "0: rep; movsl\n" \
23288 + "0: rep; "__copyuser_seg"movsl\n" \
23289 " movl %3,%0\n" \
23290 - "1: rep; movsb\n" \
23291 + "1: rep; "__copyuser_seg"movsb\n" \
23292 "2:\n" \
23293 ".section .fixup,\"ax\"\n" \
23294 "5: addl %3,%0\n" \
23295 @@ -688,9 +811,9 @@ survive:
23296 }
23297 #endif
23298 if (movsl_is_ok(to, from, n))
23299 - __copy_user(to, from, n);
23300 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23301 else
23302 - n = __copy_user_intel(to, from, n);
23303 + n = __generic_copy_to_user_intel(to, from, n);
23304 return n;
23305 }
23306 EXPORT_SYMBOL(__copy_to_user_ll);
23307 @@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23308 unsigned long n)
23309 {
23310 if (movsl_is_ok(to, from, n))
23311 - __copy_user(to, from, n);
23312 + __copy_user(to, from, n, __copyuser_seg, "", "");
23313 else
23314 - n = __copy_user_intel((void __user *)to,
23315 - (const void *)from, n);
23316 + n = __generic_copy_from_user_intel(to, from, n);
23317 return n;
23318 }
23319 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23320 @@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23321 if (n > 64 && cpu_has_xmm2)
23322 n = __copy_user_intel_nocache(to, from, n);
23323 else
23324 - __copy_user(to, from, n);
23325 + __copy_user(to, from, n, __copyuser_seg, "", "");
23326 #else
23327 - __copy_user(to, from, n);
23328 + __copy_user(to, from, n, __copyuser_seg, "", "");
23329 #endif
23330 return n;
23331 }
23332 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23333
23334 -/**
23335 - * copy_to_user: - Copy a block of data into user space.
23336 - * @to: Destination address, in user space.
23337 - * @from: Source address, in kernel space.
23338 - * @n: Number of bytes to copy.
23339 - *
23340 - * Context: User context only. This function may sleep.
23341 - *
23342 - * Copy data from kernel space to user space.
23343 - *
23344 - * Returns number of bytes that could not be copied.
23345 - * On success, this will be zero.
23346 - */
23347 -unsigned long
23348 -copy_to_user(void __user *to, const void *from, unsigned long n)
23349 -{
23350 - if (access_ok(VERIFY_WRITE, to, n))
23351 - n = __copy_to_user(to, from, n);
23352 - return n;
23353 -}
23354 -EXPORT_SYMBOL(copy_to_user);
23355 -
23356 -/**
23357 - * copy_from_user: - Copy a block of data from user space.
23358 - * @to: Destination address, in kernel space.
23359 - * @from: Source address, in user space.
23360 - * @n: Number of bytes to copy.
23361 - *
23362 - * Context: User context only. This function may sleep.
23363 - *
23364 - * Copy data from user space to kernel space.
23365 - *
23366 - * Returns number of bytes that could not be copied.
23367 - * On success, this will be zero.
23368 - *
23369 - * If some data could not be copied, this function will pad the copied
23370 - * data to the requested size using zero bytes.
23371 - */
23372 -unsigned long
23373 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23374 -{
23375 - if (access_ok(VERIFY_READ, from, n))
23376 - n = __copy_from_user(to, from, n);
23377 - else
23378 - memset(to, 0, n);
23379 - return n;
23380 -}
23381 -EXPORT_SYMBOL(_copy_from_user);
23382 -
23383 void copy_from_user_overflow(void)
23384 {
23385 WARN(1, "Buffer overflow detected!\n");
23386 }
23387 EXPORT_SYMBOL(copy_from_user_overflow);
23388 +
23389 +void copy_to_user_overflow(void)
23390 +{
23391 + WARN(1, "Buffer overflow detected!\n");
23392 +}
23393 +EXPORT_SYMBOL(copy_to_user_overflow);
23394 +
23395 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23396 +void __set_fs(mm_segment_t x)
23397 +{
23398 + switch (x.seg) {
23399 + case 0:
23400 + loadsegment(gs, 0);
23401 + break;
23402 + case TASK_SIZE_MAX:
23403 + loadsegment(gs, __USER_DS);
23404 + break;
23405 + case -1UL:
23406 + loadsegment(gs, __KERNEL_DS);
23407 + break;
23408 + default:
23409 + BUG();
23410 + }
23411 + return;
23412 +}
23413 +EXPORT_SYMBOL(__set_fs);
23414 +
23415 +void set_fs(mm_segment_t x)
23416 +{
23417 + current_thread_info()->addr_limit = x;
23418 + __set_fs(x);
23419 +}
23420 +EXPORT_SYMBOL(set_fs);
23421 +#endif
23422 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23423 index 0d0326f..5c5f91e 100644
23424 --- a/arch/x86/lib/usercopy_64.c
23425 +++ b/arch/x86/lib/usercopy_64.c
23426 @@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23427 {
23428 long __d0;
23429 might_fault();
23430 +
23431 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23432 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23433 + addr += PAX_USER_SHADOW_BASE;
23434 +#endif
23435 +
23436 /* no memory constraint because it doesn't change any memory gcc knows
23437 about */
23438 asm volatile(
23439 @@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
23440 }
23441 EXPORT_SYMBOL(strlen_user);
23442
23443 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23444 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23445 {
23446 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23447 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23448 - }
23449 - return len;
23450 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23451 +
23452 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23453 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23454 + to += PAX_USER_SHADOW_BASE;
23455 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23456 + from += PAX_USER_SHADOW_BASE;
23457 +#endif
23458 +
23459 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23460 + }
23461 + return len;
23462 }
23463 EXPORT_SYMBOL(copy_in_user);
23464
23465 @@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
23466 * it is not necessary to optimize tail handling.
23467 */
23468 unsigned long
23469 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23470 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23471 {
23472 char c;
23473 unsigned zero_len;
23474 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23475 index 1fb85db..8b3540b 100644
23476 --- a/arch/x86/mm/extable.c
23477 +++ b/arch/x86/mm/extable.c
23478 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23479 const struct exception_table_entry *fixup;
23480
23481 #ifdef CONFIG_PNPBIOS
23482 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23483 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23484 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23485 extern u32 pnp_bios_is_utter_crap;
23486 pnp_bios_is_utter_crap = 1;
23487 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23488 index 3ecfd1a..304d554 100644
23489 --- a/arch/x86/mm/fault.c
23490 +++ b/arch/x86/mm/fault.c
23491 @@ -13,11 +13,18 @@
23492 #include <linux/perf_event.h> /* perf_sw_event */
23493 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23494 #include <linux/prefetch.h> /* prefetchw */
23495 +#include <linux/unistd.h>
23496 +#include <linux/compiler.h>
23497
23498 #include <asm/traps.h> /* dotraplinkage, ... */
23499 #include <asm/pgalloc.h> /* pgd_*(), ... */
23500 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23501 #include <asm/fixmap.h> /* VSYSCALL_START */
23502 +#include <asm/tlbflush.h>
23503 +
23504 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23505 +#include <asm/stacktrace.h>
23506 +#endif
23507
23508 /*
23509 * Page fault error code bits:
23510 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23511 int ret = 0;
23512
23513 /* kprobe_running() needs smp_processor_id() */
23514 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23515 + if (kprobes_built_in() && !user_mode(regs)) {
23516 preempt_disable();
23517 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23518 ret = 1;
23519 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23520 return !instr_lo || (instr_lo>>1) == 1;
23521 case 0x00:
23522 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23523 - if (probe_kernel_address(instr, opcode))
23524 + if (user_mode(regs)) {
23525 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23526 + return 0;
23527 + } else if (probe_kernel_address(instr, opcode))
23528 return 0;
23529
23530 *prefetch = (instr_lo == 0xF) &&
23531 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23532 while (instr < max_instr) {
23533 unsigned char opcode;
23534
23535 - if (probe_kernel_address(instr, opcode))
23536 + if (user_mode(regs)) {
23537 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23538 + break;
23539 + } else if (probe_kernel_address(instr, opcode))
23540 break;
23541
23542 instr++;
23543 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23544 force_sig_info(si_signo, &info, tsk);
23545 }
23546
23547 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23548 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23549 +#endif
23550 +
23551 +#ifdef CONFIG_PAX_EMUTRAMP
23552 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23553 +#endif
23554 +
23555 +#ifdef CONFIG_PAX_PAGEEXEC
23556 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23557 +{
23558 + pgd_t *pgd;
23559 + pud_t *pud;
23560 + pmd_t *pmd;
23561 +
23562 + pgd = pgd_offset(mm, address);
23563 + if (!pgd_present(*pgd))
23564 + return NULL;
23565 + pud = pud_offset(pgd, address);
23566 + if (!pud_present(*pud))
23567 + return NULL;
23568 + pmd = pmd_offset(pud, address);
23569 + if (!pmd_present(*pmd))
23570 + return NULL;
23571 + return pmd;
23572 +}
23573 +#endif
23574 +
23575 DEFINE_SPINLOCK(pgd_lock);
23576 LIST_HEAD(pgd_list);
23577
23578 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23579 for (address = VMALLOC_START & PMD_MASK;
23580 address >= TASK_SIZE && address < FIXADDR_TOP;
23581 address += PMD_SIZE) {
23582 +
23583 +#ifdef CONFIG_PAX_PER_CPU_PGD
23584 + unsigned long cpu;
23585 +#else
23586 struct page *page;
23587 +#endif
23588
23589 spin_lock(&pgd_lock);
23590 +
23591 +#ifdef CONFIG_PAX_PER_CPU_PGD
23592 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23593 + pgd_t *pgd = get_cpu_pgd(cpu);
23594 + pmd_t *ret;
23595 +#else
23596 list_for_each_entry(page, &pgd_list, lru) {
23597 + pgd_t *pgd = page_address(page);
23598 spinlock_t *pgt_lock;
23599 pmd_t *ret;
23600
23601 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23602 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23603
23604 spin_lock(pgt_lock);
23605 - ret = vmalloc_sync_one(page_address(page), address);
23606 +#endif
23607 +
23608 + ret = vmalloc_sync_one(pgd, address);
23609 +
23610 +#ifndef CONFIG_PAX_PER_CPU_PGD
23611 spin_unlock(pgt_lock);
23612 +#endif
23613
23614 if (!ret)
23615 break;
23616 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23617 * an interrupt in the middle of a task switch..
23618 */
23619 pgd_paddr = read_cr3();
23620 +
23621 +#ifdef CONFIG_PAX_PER_CPU_PGD
23622 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23623 +#endif
23624 +
23625 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23626 if (!pmd_k)
23627 return -1;
23628 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23629 * happen within a race in page table update. In the later
23630 * case just flush:
23631 */
23632 +
23633 +#ifdef CONFIG_PAX_PER_CPU_PGD
23634 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23635 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23636 +#else
23637 pgd = pgd_offset(current->active_mm, address);
23638 +#endif
23639 +
23640 pgd_ref = pgd_offset_k(address);
23641 if (pgd_none(*pgd_ref))
23642 return -1;
23643 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23644 static int is_errata100(struct pt_regs *regs, unsigned long address)
23645 {
23646 #ifdef CONFIG_X86_64
23647 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23648 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23649 return 1;
23650 #endif
23651 return 0;
23652 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23653 }
23654
23655 static const char nx_warning[] = KERN_CRIT
23656 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23657 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23658
23659 static void
23660 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23661 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23662 if (!oops_may_print())
23663 return;
23664
23665 - if (error_code & PF_INSTR) {
23666 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23667 unsigned int level;
23668
23669 pte_t *pte = lookup_address(address, &level);
23670
23671 if (pte && pte_present(*pte) && !pte_exec(*pte))
23672 - printk(nx_warning, current_uid());
23673 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23674 }
23675
23676 +#ifdef CONFIG_PAX_KERNEXEC
23677 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23678 + if (current->signal->curr_ip)
23679 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23680 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23681 + else
23682 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23683 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23684 + }
23685 +#endif
23686 +
23687 printk(KERN_ALERT "BUG: unable to handle kernel ");
23688 if (address < PAGE_SIZE)
23689 printk(KERN_CONT "NULL pointer dereference");
23690 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23691 }
23692 #endif
23693
23694 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23695 + if (pax_is_fetch_fault(regs, error_code, address)) {
23696 +
23697 +#ifdef CONFIG_PAX_EMUTRAMP
23698 + switch (pax_handle_fetch_fault(regs)) {
23699 + case 2:
23700 + return;
23701 + }
23702 +#endif
23703 +
23704 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23705 + do_group_exit(SIGKILL);
23706 + }
23707 +#endif
23708 +
23709 if (unlikely(show_unhandled_signals))
23710 show_signal_msg(regs, error_code, address, tsk);
23711
23712 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23713 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23714 printk(KERN_ERR
23715 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23716 - tsk->comm, tsk->pid, address);
23717 + tsk->comm, task_pid_nr(tsk), address);
23718 code = BUS_MCEERR_AR;
23719 }
23720 #endif
23721 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23722 return 1;
23723 }
23724
23725 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23726 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23727 +{
23728 + pte_t *pte;
23729 + pmd_t *pmd;
23730 + spinlock_t *ptl;
23731 + unsigned char pte_mask;
23732 +
23733 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23734 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23735 + return 0;
23736 +
23737 + /* PaX: it's our fault, let's handle it if we can */
23738 +
23739 + /* PaX: take a look at read faults before acquiring any locks */
23740 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23741 + /* instruction fetch attempt from a protected page in user mode */
23742 + up_read(&mm->mmap_sem);
23743 +
23744 +#ifdef CONFIG_PAX_EMUTRAMP
23745 + switch (pax_handle_fetch_fault(regs)) {
23746 + case 2:
23747 + return 1;
23748 + }
23749 +#endif
23750 +
23751 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23752 + do_group_exit(SIGKILL);
23753 + }
23754 +
23755 + pmd = pax_get_pmd(mm, address);
23756 + if (unlikely(!pmd))
23757 + return 0;
23758 +
23759 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23760 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23761 + pte_unmap_unlock(pte, ptl);
23762 + return 0;
23763 + }
23764 +
23765 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23766 + /* write attempt to a protected page in user mode */
23767 + pte_unmap_unlock(pte, ptl);
23768 + return 0;
23769 + }
23770 +
23771 +#ifdef CONFIG_SMP
23772 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23773 +#else
23774 + if (likely(address > get_limit(regs->cs)))
23775 +#endif
23776 + {
23777 + set_pte(pte, pte_mkread(*pte));
23778 + __flush_tlb_one(address);
23779 + pte_unmap_unlock(pte, ptl);
23780 + up_read(&mm->mmap_sem);
23781 + return 1;
23782 + }
23783 +
23784 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23785 +
23786 + /*
23787 + * PaX: fill DTLB with user rights and retry
23788 + */
23789 + __asm__ __volatile__ (
23790 + "orb %2,(%1)\n"
23791 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23792 +/*
23793 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23794 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23795 + * page fault when examined during a TLB load attempt. this is true not only
23796 + * for PTEs holding a non-present entry but also present entries that will
23797 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23798 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23799 + * for our target pages since their PTEs are simply not in the TLBs at all.
23800 +
23801 + * the best thing in omitting it is that we gain around 15-20% speed in the
23802 + * fast path of the page fault handler and can get rid of tracing since we
23803 + * can no longer flush unintended entries.
23804 + */
23805 + "invlpg (%0)\n"
23806 +#endif
23807 + __copyuser_seg"testb $0,(%0)\n"
23808 + "xorb %3,(%1)\n"
23809 + :
23810 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23811 + : "memory", "cc");
23812 + pte_unmap_unlock(pte, ptl);
23813 + up_read(&mm->mmap_sem);
23814 + return 1;
23815 +}
23816 +#endif
23817 +
23818 /*
23819 * Handle a spurious fault caused by a stale TLB entry.
23820 *
23821 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23822 static inline int
23823 access_error(unsigned long error_code, struct vm_area_struct *vma)
23824 {
23825 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23826 + return 1;
23827 +
23828 if (error_code & PF_WRITE) {
23829 /* write, present and write, not present: */
23830 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23831 @@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23832 {
23833 struct vm_area_struct *vma;
23834 struct task_struct *tsk;
23835 - unsigned long address;
23836 struct mm_struct *mm;
23837 int fault;
23838 int write = error_code & PF_WRITE;
23839 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23840 (write ? FAULT_FLAG_WRITE : 0);
23841
23842 - tsk = current;
23843 - mm = tsk->mm;
23844 -
23845 /* Get the faulting address: */
23846 - address = read_cr2();
23847 + unsigned long address = read_cr2();
23848 +
23849 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23850 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23851 + if (!search_exception_tables(regs->ip)) {
23852 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23853 + bad_area_nosemaphore(regs, error_code, address);
23854 + return;
23855 + }
23856 + if (address < PAX_USER_SHADOW_BASE) {
23857 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23858 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23859 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23860 + } else
23861 + address -= PAX_USER_SHADOW_BASE;
23862 + }
23863 +#endif
23864 +
23865 + tsk = current;
23866 + mm = tsk->mm;
23867
23868 /*
23869 * Detect and handle instructions that would cause a page fault for
23870 @@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23871 * User-mode registers count as a user access even for any
23872 * potential system fault or CPU buglet:
23873 */
23874 - if (user_mode_vm(regs)) {
23875 + if (user_mode(regs)) {
23876 local_irq_enable();
23877 error_code |= PF_USER;
23878 } else {
23879 @@ -1132,6 +1339,11 @@ retry:
23880 might_sleep();
23881 }
23882
23883 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23884 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23885 + return;
23886 +#endif
23887 +
23888 vma = find_vma(mm, address);
23889 if (unlikely(!vma)) {
23890 bad_area(regs, error_code, address);
23891 @@ -1143,18 +1355,24 @@ retry:
23892 bad_area(regs, error_code, address);
23893 return;
23894 }
23895 - if (error_code & PF_USER) {
23896 - /*
23897 - * Accessing the stack below %sp is always a bug.
23898 - * The large cushion allows instructions like enter
23899 - * and pusha to work. ("enter $65535, $31" pushes
23900 - * 32 pointers and then decrements %sp by 65535.)
23901 - */
23902 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23903 - bad_area(regs, error_code, address);
23904 - return;
23905 - }
23906 + /*
23907 + * Accessing the stack below %sp is always a bug.
23908 + * The large cushion allows instructions like enter
23909 + * and pusha to work. ("enter $65535, $31" pushes
23910 + * 32 pointers and then decrements %sp by 65535.)
23911 + */
23912 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23913 + bad_area(regs, error_code, address);
23914 + return;
23915 }
23916 +
23917 +#ifdef CONFIG_PAX_SEGMEXEC
23918 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23919 + bad_area(regs, error_code, address);
23920 + return;
23921 + }
23922 +#endif
23923 +
23924 if (unlikely(expand_stack(vma, address))) {
23925 bad_area(regs, error_code, address);
23926 return;
23927 @@ -1209,3 +1427,292 @@ good_area:
23928
23929 up_read(&mm->mmap_sem);
23930 }
23931 +
23932 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23933 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23934 +{
23935 + struct mm_struct *mm = current->mm;
23936 + unsigned long ip = regs->ip;
23937 +
23938 + if (v8086_mode(regs))
23939 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23940 +
23941 +#ifdef CONFIG_PAX_PAGEEXEC
23942 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23943 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23944 + return true;
23945 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23946 + return true;
23947 + return false;
23948 + }
23949 +#endif
23950 +
23951 +#ifdef CONFIG_PAX_SEGMEXEC
23952 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23953 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23954 + return true;
23955 + return false;
23956 + }
23957 +#endif
23958 +
23959 + return false;
23960 +}
23961 +#endif
23962 +
23963 +#ifdef CONFIG_PAX_EMUTRAMP
23964 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23965 +{
23966 + int err;
23967 +
23968 + do { /* PaX: libffi trampoline emulation */
23969 + unsigned char mov, jmp;
23970 + unsigned int addr1, addr2;
23971 +
23972 +#ifdef CONFIG_X86_64
23973 + if ((regs->ip + 9) >> 32)
23974 + break;
23975 +#endif
23976 +
23977 + err = get_user(mov, (unsigned char __user *)regs->ip);
23978 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23979 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23980 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23981 +
23982 + if (err)
23983 + break;
23984 +
23985 + if (mov == 0xB8 && jmp == 0xE9) {
23986 + regs->ax = addr1;
23987 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23988 + return 2;
23989 + }
23990 + } while (0);
23991 +
23992 + do { /* PaX: gcc trampoline emulation #1 */
23993 + unsigned char mov1, mov2;
23994 + unsigned short jmp;
23995 + unsigned int addr1, addr2;
23996 +
23997 +#ifdef CONFIG_X86_64
23998 + if ((regs->ip + 11) >> 32)
23999 + break;
24000 +#endif
24001 +
24002 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24003 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24004 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24005 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24006 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24007 +
24008 + if (err)
24009 + break;
24010 +
24011 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24012 + regs->cx = addr1;
24013 + regs->ax = addr2;
24014 + regs->ip = addr2;
24015 + return 2;
24016 + }
24017 + } while (0);
24018 +
24019 + do { /* PaX: gcc trampoline emulation #2 */
24020 + unsigned char mov, jmp;
24021 + unsigned int addr1, addr2;
24022 +
24023 +#ifdef CONFIG_X86_64
24024 + if ((regs->ip + 9) >> 32)
24025 + break;
24026 +#endif
24027 +
24028 + err = get_user(mov, (unsigned char __user *)regs->ip);
24029 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24030 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24031 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24032 +
24033 + if (err)
24034 + break;
24035 +
24036 + if (mov == 0xB9 && jmp == 0xE9) {
24037 + regs->cx = addr1;
24038 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24039 + return 2;
24040 + }
24041 + } while (0);
24042 +
24043 + return 1; /* PaX in action */
24044 +}
24045 +
24046 +#ifdef CONFIG_X86_64
24047 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24048 +{
24049 + int err;
24050 +
24051 + do { /* PaX: libffi trampoline emulation */
24052 + unsigned short mov1, mov2, jmp1;
24053 + unsigned char stcclc, jmp2;
24054 + unsigned long addr1, addr2;
24055 +
24056 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24057 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24058 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24059 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24060 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24061 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24062 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24063 +
24064 + if (err)
24065 + break;
24066 +
24067 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24068 + regs->r11 = addr1;
24069 + regs->r10 = addr2;
24070 + if (stcclc == 0xF8)
24071 + regs->flags &= ~X86_EFLAGS_CF;
24072 + else
24073 + regs->flags |= X86_EFLAGS_CF;
24074 + regs->ip = addr1;
24075 + return 2;
24076 + }
24077 + } while (0);
24078 +
24079 + do { /* PaX: gcc trampoline emulation #1 */
24080 + unsigned short mov1, mov2, jmp1;
24081 + unsigned char jmp2;
24082 + unsigned int addr1;
24083 + unsigned long addr2;
24084 +
24085 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24086 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24087 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24088 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24089 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24090 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24091 +
24092 + if (err)
24093 + break;
24094 +
24095 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24096 + regs->r11 = addr1;
24097 + regs->r10 = addr2;
24098 + regs->ip = addr1;
24099 + return 2;
24100 + }
24101 + } while (0);
24102 +
24103 + do { /* PaX: gcc trampoline emulation #2 */
24104 + unsigned short mov1, mov2, jmp1;
24105 + unsigned char jmp2;
24106 + unsigned long addr1, addr2;
24107 +
24108 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24109 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24110 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24111 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24112 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24113 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24114 +
24115 + if (err)
24116 + break;
24117 +
24118 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24119 + regs->r11 = addr1;
24120 + regs->r10 = addr2;
24121 + regs->ip = addr1;
24122 + return 2;
24123 + }
24124 + } while (0);
24125 +
24126 + return 1; /* PaX in action */
24127 +}
24128 +#endif
24129 +
24130 +/*
24131 + * PaX: decide what to do with offenders (regs->ip = fault address)
24132 + *
24133 + * returns 1 when task should be killed
24134 + * 2 when gcc trampoline was detected
24135 + */
24136 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24137 +{
24138 + if (v8086_mode(regs))
24139 + return 1;
24140 +
24141 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24142 + return 1;
24143 +
24144 +#ifdef CONFIG_X86_32
24145 + return pax_handle_fetch_fault_32(regs);
24146 +#else
24147 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24148 + return pax_handle_fetch_fault_32(regs);
24149 + else
24150 + return pax_handle_fetch_fault_64(regs);
24151 +#endif
24152 +}
24153 +#endif
24154 +
24155 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24156 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24157 +{
24158 + long i;
24159 +
24160 + printk(KERN_ERR "PAX: bytes at PC: ");
24161 + for (i = 0; i < 20; i++) {
24162 + unsigned char c;
24163 + if (get_user(c, (unsigned char __force_user *)pc+i))
24164 + printk(KERN_CONT "?? ");
24165 + else
24166 + printk(KERN_CONT "%02x ", c);
24167 + }
24168 + printk("\n");
24169 +
24170 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24171 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24172 + unsigned long c;
24173 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24174 +#ifdef CONFIG_X86_32
24175 + printk(KERN_CONT "???????? ");
24176 +#else
24177 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24178 + printk(KERN_CONT "???????? ???????? ");
24179 + else
24180 + printk(KERN_CONT "???????????????? ");
24181 +#endif
24182 + } else {
24183 +#ifdef CONFIG_X86_64
24184 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24185 + printk(KERN_CONT "%08x ", (unsigned int)c);
24186 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24187 + } else
24188 +#endif
24189 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24190 + }
24191 + }
24192 + printk("\n");
24193 +}
24194 +#endif
24195 +
24196 +/**
24197 + * probe_kernel_write(): safely attempt to write to a location
24198 + * @dst: address to write to
24199 + * @src: pointer to the data that shall be written
24200 + * @size: size of the data chunk
24201 + *
24202 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24203 + * happens, handle that and return -EFAULT.
24204 + */
24205 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24206 +{
24207 + long ret;
24208 + mm_segment_t old_fs = get_fs();
24209 +
24210 + set_fs(KERNEL_DS);
24211 + pagefault_disable();
24212 + pax_open_kernel();
24213 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24214 + pax_close_kernel();
24215 + pagefault_enable();
24216 + set_fs(old_fs);
24217 +
24218 + return ret ? -EFAULT : 0;
24219 +}
24220 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24221 index dd74e46..7d26398 100644
24222 --- a/arch/x86/mm/gup.c
24223 +++ b/arch/x86/mm/gup.c
24224 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24225 addr = start;
24226 len = (unsigned long) nr_pages << PAGE_SHIFT;
24227 end = start + len;
24228 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24229 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24230 (void __user *)start, len)))
24231 return 0;
24232
24233 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24234 index 6f31ee5..8ee4164 100644
24235 --- a/arch/x86/mm/highmem_32.c
24236 +++ b/arch/x86/mm/highmem_32.c
24237 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24238 idx = type + KM_TYPE_NR*smp_processor_id();
24239 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24240 BUG_ON(!pte_none(*(kmap_pte-idx)));
24241 +
24242 + pax_open_kernel();
24243 set_pte(kmap_pte-idx, mk_pte(page, prot));
24244 + pax_close_kernel();
24245 +
24246 arch_flush_lazy_mmu_mode();
24247
24248 return (void *)vaddr;
24249 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24250 index f6679a7..8f795a3 100644
24251 --- a/arch/x86/mm/hugetlbpage.c
24252 +++ b/arch/x86/mm/hugetlbpage.c
24253 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24254 struct hstate *h = hstate_file(file);
24255 struct mm_struct *mm = current->mm;
24256 struct vm_area_struct *vma;
24257 - unsigned long start_addr;
24258 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24259 +
24260 +#ifdef CONFIG_PAX_SEGMEXEC
24261 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24262 + pax_task_size = SEGMEXEC_TASK_SIZE;
24263 +#endif
24264 +
24265 + pax_task_size -= PAGE_SIZE;
24266
24267 if (len > mm->cached_hole_size) {
24268 - start_addr = mm->free_area_cache;
24269 + start_addr = mm->free_area_cache;
24270 } else {
24271 - start_addr = TASK_UNMAPPED_BASE;
24272 - mm->cached_hole_size = 0;
24273 + start_addr = mm->mmap_base;
24274 + mm->cached_hole_size = 0;
24275 }
24276
24277 full_search:
24278 @@ -280,26 +287,27 @@ full_search:
24279
24280 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24281 /* At this point: (!vma || addr < vma->vm_end). */
24282 - if (TASK_SIZE - len < addr) {
24283 + if (pax_task_size - len < addr) {
24284 /*
24285 * Start a new search - just in case we missed
24286 * some holes.
24287 */
24288 - if (start_addr != TASK_UNMAPPED_BASE) {
24289 - start_addr = TASK_UNMAPPED_BASE;
24290 + if (start_addr != mm->mmap_base) {
24291 + start_addr = mm->mmap_base;
24292 mm->cached_hole_size = 0;
24293 goto full_search;
24294 }
24295 return -ENOMEM;
24296 }
24297 - if (!vma || addr + len <= vma->vm_start) {
24298 - mm->free_area_cache = addr + len;
24299 - return addr;
24300 - }
24301 + if (check_heap_stack_gap(vma, addr, len))
24302 + break;
24303 if (addr + mm->cached_hole_size < vma->vm_start)
24304 mm->cached_hole_size = vma->vm_start - addr;
24305 addr = ALIGN(vma->vm_end, huge_page_size(h));
24306 }
24307 +
24308 + mm->free_area_cache = addr + len;
24309 + return addr;
24310 }
24311
24312 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24313 @@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24314 struct mm_struct *mm = current->mm;
24315 struct vm_area_struct *vma;
24316 unsigned long base = mm->mmap_base;
24317 - unsigned long addr = addr0;
24318 + unsigned long addr;
24319 unsigned long largest_hole = mm->cached_hole_size;
24320 - unsigned long start_addr;
24321
24322 /* don't allow allocations above current base */
24323 if (mm->free_area_cache > base)
24324 @@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24325 largest_hole = 0;
24326 mm->free_area_cache = base;
24327 }
24328 -try_again:
24329 - start_addr = mm->free_area_cache;
24330
24331 /* make sure it can fit in the remaining address space */
24332 if (mm->free_area_cache < len)
24333 goto fail;
24334
24335 /* either no address requested or can't fit in requested address hole */
24336 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24337 + addr = mm->free_area_cache - len;
24338 do {
24339 + addr &= huge_page_mask(h);
24340 /*
24341 * Lookup failure means no vma is above this address,
24342 * i.e. return with success:
24343 @@ -340,10 +346,10 @@ try_again:
24344 if (!vma)
24345 return addr;
24346
24347 - if (addr + len <= vma->vm_start) {
24348 + if (check_heap_stack_gap(vma, addr, len)) {
24349 /* remember the address as a hint for next time */
24350 - mm->cached_hole_size = largest_hole;
24351 - return (mm->free_area_cache = addr);
24352 + mm->cached_hole_size = largest_hole;
24353 + return (mm->free_area_cache = addr);
24354 } else if (mm->free_area_cache == vma->vm_end) {
24355 /* pull free_area_cache down to the first hole */
24356 mm->free_area_cache = vma->vm_start;
24357 @@ -352,29 +358,34 @@ try_again:
24358
24359 /* remember the largest hole we saw so far */
24360 if (addr + largest_hole < vma->vm_start)
24361 - largest_hole = vma->vm_start - addr;
24362 + largest_hole = vma->vm_start - addr;
24363
24364 /* try just below the current vma->vm_start */
24365 - addr = (vma->vm_start - len) & huge_page_mask(h);
24366 - } while (len <= vma->vm_start);
24367 + addr = skip_heap_stack_gap(vma, len);
24368 + } while (!IS_ERR_VALUE(addr));
24369
24370 fail:
24371 /*
24372 - * if hint left us with no space for the requested
24373 - * mapping then try again:
24374 - */
24375 - if (start_addr != base) {
24376 - mm->free_area_cache = base;
24377 - largest_hole = 0;
24378 - goto try_again;
24379 - }
24380 - /*
24381 * A failed mmap() very likely causes application failure,
24382 * so fall back to the bottom-up function here. This scenario
24383 * can happen with large stack limits and large mmap()
24384 * allocations.
24385 */
24386 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24387 +
24388 +#ifdef CONFIG_PAX_SEGMEXEC
24389 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24390 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24391 + else
24392 +#endif
24393 +
24394 + mm->mmap_base = TASK_UNMAPPED_BASE;
24395 +
24396 +#ifdef CONFIG_PAX_RANDMMAP
24397 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24398 + mm->mmap_base += mm->delta_mmap;
24399 +#endif
24400 +
24401 + mm->free_area_cache = mm->mmap_base;
24402 mm->cached_hole_size = ~0UL;
24403 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24404 len, pgoff, flags);
24405 @@ -382,6 +393,7 @@ fail:
24406 /*
24407 * Restore the topdown base:
24408 */
24409 + mm->mmap_base = base;
24410 mm->free_area_cache = base;
24411 mm->cached_hole_size = ~0UL;
24412
24413 @@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24414 struct hstate *h = hstate_file(file);
24415 struct mm_struct *mm = current->mm;
24416 struct vm_area_struct *vma;
24417 + unsigned long pax_task_size = TASK_SIZE;
24418
24419 if (len & ~huge_page_mask(h))
24420 return -EINVAL;
24421 - if (len > TASK_SIZE)
24422 +
24423 +#ifdef CONFIG_PAX_SEGMEXEC
24424 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24425 + pax_task_size = SEGMEXEC_TASK_SIZE;
24426 +#endif
24427 +
24428 + pax_task_size -= PAGE_SIZE;
24429 +
24430 + if (len > pax_task_size)
24431 return -ENOMEM;
24432
24433 if (flags & MAP_FIXED) {
24434 @@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24435 if (addr) {
24436 addr = ALIGN(addr, huge_page_size(h));
24437 vma = find_vma(mm, addr);
24438 - if (TASK_SIZE - len >= addr &&
24439 - (!vma || addr + len <= vma->vm_start))
24440 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24441 return addr;
24442 }
24443 if (mm->get_unmapped_area == arch_get_unmapped_area)
24444 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24445 index 4f0cec7..00976ce 100644
24446 --- a/arch/x86/mm/init.c
24447 +++ b/arch/x86/mm/init.c
24448 @@ -16,6 +16,8 @@
24449 #include <asm/tlb.h>
24450 #include <asm/proto.h>
24451 #include <asm/dma.h> /* for MAX_DMA_PFN */
24452 +#include <asm/desc.h>
24453 +#include <asm/bios_ebda.h>
24454
24455 unsigned long __initdata pgt_buf_start;
24456 unsigned long __meminitdata pgt_buf_end;
24457 @@ -32,7 +34,7 @@ int direct_gbpages
24458 static void __init find_early_table_space(unsigned long end, int use_pse,
24459 int use_gbpages)
24460 {
24461 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24462 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24463 phys_addr_t base;
24464
24465 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24466 @@ -311,10 +313,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24467 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24468 * mmio resources as well as potential bios/acpi data regions.
24469 */
24470 +
24471 +#ifdef CONFIG_GRKERNSEC_KMEM
24472 +static unsigned int ebda_start __read_only;
24473 +static unsigned int ebda_end __read_only;
24474 +#endif
24475 +
24476 int devmem_is_allowed(unsigned long pagenr)
24477 {
24478 +#ifdef CONFIG_GRKERNSEC_KMEM
24479 + /* allow BDA */
24480 + if (!pagenr)
24481 + return 1;
24482 + /* allow EBDA */
24483 + if (pagenr >= ebda_start && pagenr < ebda_end)
24484 + return 1;
24485 +#else
24486 + if (!pagenr)
24487 + return 1;
24488 +#ifdef CONFIG_VM86
24489 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24490 + return 1;
24491 +#endif
24492 +#endif
24493 +
24494 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24495 + return 1;
24496 +#ifdef CONFIG_GRKERNSEC_KMEM
24497 + /* throw out everything else below 1MB */
24498 if (pagenr <= 256)
24499 - return 1;
24500 + return 0;
24501 +#endif
24502 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24503 return 0;
24504 if (!page_is_ram(pagenr))
24505 @@ -371,8 +400,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24506 #endif
24507 }
24508
24509 +#ifdef CONFIG_GRKERNSEC_KMEM
24510 +static inline void gr_init_ebda(void)
24511 +{
24512 + unsigned int ebda_addr;
24513 + unsigned int ebda_size = 0;
24514 +
24515 + ebda_addr = get_bios_ebda();
24516 + if (ebda_addr) {
24517 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24518 + ebda_size <<= 10;
24519 + }
24520 + if (ebda_addr && ebda_size) {
24521 + ebda_start = ebda_addr >> PAGE_SHIFT;
24522 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24523 + } else {
24524 + ebda_start = 0x9f000 >> PAGE_SHIFT;
24525 + ebda_end = 0xa0000 >> PAGE_SHIFT;
24526 + }
24527 +}
24528 +#else
24529 +static inline void gr_init_ebda(void) { }
24530 +#endif
24531 +
24532 void free_initmem(void)
24533 {
24534 +#ifdef CONFIG_PAX_KERNEXEC
24535 +#ifdef CONFIG_X86_32
24536 + /* PaX: limit KERNEL_CS to actual size */
24537 + unsigned long addr, limit;
24538 + struct desc_struct d;
24539 + int cpu;
24540 +#else
24541 + pgd_t *pgd;
24542 + pud_t *pud;
24543 + pmd_t *pmd;
24544 + unsigned long addr, end;
24545 +#endif
24546 +#endif
24547 +
24548 + gr_init_ebda();
24549 +
24550 +#ifdef CONFIG_PAX_KERNEXEC
24551 +#ifdef CONFIG_X86_32
24552 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24553 + limit = (limit - 1UL) >> PAGE_SHIFT;
24554 +
24555 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24556 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24557 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24558 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24559 + }
24560 +
24561 + /* PaX: make KERNEL_CS read-only */
24562 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24563 + if (!paravirt_enabled())
24564 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24565 +/*
24566 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24567 + pgd = pgd_offset_k(addr);
24568 + pud = pud_offset(pgd, addr);
24569 + pmd = pmd_offset(pud, addr);
24570 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24571 + }
24572 +*/
24573 +#ifdef CONFIG_X86_PAE
24574 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24575 +/*
24576 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24577 + pgd = pgd_offset_k(addr);
24578 + pud = pud_offset(pgd, addr);
24579 + pmd = pmd_offset(pud, addr);
24580 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24581 + }
24582 +*/
24583 +#endif
24584 +
24585 +#ifdef CONFIG_MODULES
24586 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24587 +#endif
24588 +
24589 +#else
24590 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24591 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24592 + pgd = pgd_offset_k(addr);
24593 + pud = pud_offset(pgd, addr);
24594 + pmd = pmd_offset(pud, addr);
24595 + if (!pmd_present(*pmd))
24596 + continue;
24597 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24598 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24599 + else
24600 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24601 + }
24602 +
24603 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24604 + end = addr + KERNEL_IMAGE_SIZE;
24605 + for (; addr < end; addr += PMD_SIZE) {
24606 + pgd = pgd_offset_k(addr);
24607 + pud = pud_offset(pgd, addr);
24608 + pmd = pmd_offset(pud, addr);
24609 + if (!pmd_present(*pmd))
24610 + continue;
24611 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24612 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24613 + }
24614 +#endif
24615 +
24616 + flush_tlb_all();
24617 +#endif
24618 +
24619 free_init_pages("unused kernel memory",
24620 (unsigned long)(&__init_begin),
24621 (unsigned long)(&__init_end));
24622 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24623 index 575d86f..4987469 100644
24624 --- a/arch/x86/mm/init_32.c
24625 +++ b/arch/x86/mm/init_32.c
24626 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24627 }
24628
24629 /*
24630 - * Creates a middle page table and puts a pointer to it in the
24631 - * given global directory entry. This only returns the gd entry
24632 - * in non-PAE compilation mode, since the middle layer is folded.
24633 - */
24634 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24635 -{
24636 - pud_t *pud;
24637 - pmd_t *pmd_table;
24638 -
24639 -#ifdef CONFIG_X86_PAE
24640 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24641 - if (after_bootmem)
24642 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24643 - else
24644 - pmd_table = (pmd_t *)alloc_low_page();
24645 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24646 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24647 - pud = pud_offset(pgd, 0);
24648 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24649 -
24650 - return pmd_table;
24651 - }
24652 -#endif
24653 - pud = pud_offset(pgd, 0);
24654 - pmd_table = pmd_offset(pud, 0);
24655 -
24656 - return pmd_table;
24657 -}
24658 -
24659 -/*
24660 * Create a page table and place a pointer to it in a middle page
24661 * directory entry:
24662 */
24663 @@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24664 page_table = (pte_t *)alloc_low_page();
24665
24666 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24667 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24668 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24669 +#else
24670 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24671 +#endif
24672 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24673 }
24674
24675 return pte_offset_kernel(pmd, 0);
24676 }
24677
24678 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24679 +{
24680 + pud_t *pud;
24681 + pmd_t *pmd_table;
24682 +
24683 + pud = pud_offset(pgd, 0);
24684 + pmd_table = pmd_offset(pud, 0);
24685 +
24686 + return pmd_table;
24687 +}
24688 +
24689 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24690 {
24691 int pgd_idx = pgd_index(vaddr);
24692 @@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24693 int pgd_idx, pmd_idx;
24694 unsigned long vaddr;
24695 pgd_t *pgd;
24696 + pud_t *pud;
24697 pmd_t *pmd;
24698 pte_t *pte = NULL;
24699
24700 @@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24701 pgd = pgd_base + pgd_idx;
24702
24703 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24704 - pmd = one_md_table_init(pgd);
24705 - pmd = pmd + pmd_index(vaddr);
24706 + pud = pud_offset(pgd, vaddr);
24707 + pmd = pmd_offset(pud, vaddr);
24708 +
24709 +#ifdef CONFIG_X86_PAE
24710 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24711 +#endif
24712 +
24713 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24714 pmd++, pmd_idx++) {
24715 pte = page_table_kmap_check(one_page_table_init(pmd),
24716 @@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24717 }
24718 }
24719
24720 -static inline int is_kernel_text(unsigned long addr)
24721 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24722 {
24723 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24724 - return 1;
24725 - return 0;
24726 + if ((start > ktla_ktva((unsigned long)_etext) ||
24727 + end <= ktla_ktva((unsigned long)_stext)) &&
24728 + (start > ktla_ktva((unsigned long)_einittext) ||
24729 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24730 +
24731 +#ifdef CONFIG_ACPI_SLEEP
24732 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24733 +#endif
24734 +
24735 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24736 + return 0;
24737 + return 1;
24738 }
24739
24740 /*
24741 @@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24742 unsigned long last_map_addr = end;
24743 unsigned long start_pfn, end_pfn;
24744 pgd_t *pgd_base = swapper_pg_dir;
24745 - int pgd_idx, pmd_idx, pte_ofs;
24746 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24747 unsigned long pfn;
24748 pgd_t *pgd;
24749 + pud_t *pud;
24750 pmd_t *pmd;
24751 pte_t *pte;
24752 unsigned pages_2m, pages_4k;
24753 @@ -280,8 +281,13 @@ repeat:
24754 pfn = start_pfn;
24755 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24756 pgd = pgd_base + pgd_idx;
24757 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24758 - pmd = one_md_table_init(pgd);
24759 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24760 + pud = pud_offset(pgd, 0);
24761 + pmd = pmd_offset(pud, 0);
24762 +
24763 +#ifdef CONFIG_X86_PAE
24764 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24765 +#endif
24766
24767 if (pfn >= end_pfn)
24768 continue;
24769 @@ -293,14 +299,13 @@ repeat:
24770 #endif
24771 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24772 pmd++, pmd_idx++) {
24773 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24774 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24775
24776 /*
24777 * Map with big pages if possible, otherwise
24778 * create normal page tables:
24779 */
24780 if (use_pse) {
24781 - unsigned int addr2;
24782 pgprot_t prot = PAGE_KERNEL_LARGE;
24783 /*
24784 * first pass will use the same initial
24785 @@ -310,11 +315,7 @@ repeat:
24786 __pgprot(PTE_IDENT_ATTR |
24787 _PAGE_PSE);
24788
24789 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24790 - PAGE_OFFSET + PAGE_SIZE-1;
24791 -
24792 - if (is_kernel_text(addr) ||
24793 - is_kernel_text(addr2))
24794 + if (is_kernel_text(address, address + PMD_SIZE))
24795 prot = PAGE_KERNEL_LARGE_EXEC;
24796
24797 pages_2m++;
24798 @@ -331,7 +332,7 @@ repeat:
24799 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24800 pte += pte_ofs;
24801 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24802 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24803 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24804 pgprot_t prot = PAGE_KERNEL;
24805 /*
24806 * first pass will use the same initial
24807 @@ -339,7 +340,7 @@ repeat:
24808 */
24809 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24810
24811 - if (is_kernel_text(addr))
24812 + if (is_kernel_text(address, address + PAGE_SIZE))
24813 prot = PAGE_KERNEL_EXEC;
24814
24815 pages_4k++;
24816 @@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24817
24818 pud = pud_offset(pgd, va);
24819 pmd = pmd_offset(pud, va);
24820 - if (!pmd_present(*pmd))
24821 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24822 break;
24823
24824 pte = pte_offset_kernel(pmd, va);
24825 @@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24826
24827 static void __init pagetable_init(void)
24828 {
24829 - pgd_t *pgd_base = swapper_pg_dir;
24830 -
24831 - permanent_kmaps_init(pgd_base);
24832 + permanent_kmaps_init(swapper_pg_dir);
24833 }
24834
24835 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24836 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24837 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24838
24839 /* user-defined highmem size */
24840 @@ -734,6 +733,12 @@ void __init mem_init(void)
24841
24842 pci_iommu_alloc();
24843
24844 +#ifdef CONFIG_PAX_PER_CPU_PGD
24845 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24846 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24847 + KERNEL_PGD_PTRS);
24848 +#endif
24849 +
24850 #ifdef CONFIG_FLATMEM
24851 BUG_ON(!mem_map);
24852 #endif
24853 @@ -760,7 +765,7 @@ void __init mem_init(void)
24854 reservedpages++;
24855
24856 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24857 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24858 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24859 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24860
24861 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24862 @@ -801,10 +806,10 @@ void __init mem_init(void)
24863 ((unsigned long)&__init_end -
24864 (unsigned long)&__init_begin) >> 10,
24865
24866 - (unsigned long)&_etext, (unsigned long)&_edata,
24867 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24868 + (unsigned long)&_sdata, (unsigned long)&_edata,
24869 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24870
24871 - (unsigned long)&_text, (unsigned long)&_etext,
24872 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24873 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24874
24875 /*
24876 @@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24877 if (!kernel_set_to_readonly)
24878 return;
24879
24880 + start = ktla_ktva(start);
24881 pr_debug("Set kernel text: %lx - %lx for read write\n",
24882 start, start+size);
24883
24884 @@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
24885 if (!kernel_set_to_readonly)
24886 return;
24887
24888 + start = ktla_ktva(start);
24889 pr_debug("Set kernel text: %lx - %lx for read only\n",
24890 start, start+size);
24891
24892 @@ -924,6 +931,7 @@ void mark_rodata_ro(void)
24893 unsigned long start = PFN_ALIGN(_text);
24894 unsigned long size = PFN_ALIGN(_etext) - start;
24895
24896 + start = ktla_ktva(start);
24897 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24898 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24899 size >> 10);
24900 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24901 index fc18be0..e539653 100644
24902 --- a/arch/x86/mm/init_64.c
24903 +++ b/arch/x86/mm/init_64.c
24904 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24905 * around without checking the pgd every time.
24906 */
24907
24908 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24909 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24910 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24911
24912 int force_personality32;
24913 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24914
24915 for (address = start; address <= end; address += PGDIR_SIZE) {
24916 const pgd_t *pgd_ref = pgd_offset_k(address);
24917 +
24918 +#ifdef CONFIG_PAX_PER_CPU_PGD
24919 + unsigned long cpu;
24920 +#else
24921 struct page *page;
24922 +#endif
24923
24924 if (pgd_none(*pgd_ref))
24925 continue;
24926
24927 spin_lock(&pgd_lock);
24928 +
24929 +#ifdef CONFIG_PAX_PER_CPU_PGD
24930 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24931 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24932 +#else
24933 list_for_each_entry(page, &pgd_list, lru) {
24934 pgd_t *pgd;
24935 spinlock_t *pgt_lock;
24936 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24937 /* the pgt_lock only for Xen */
24938 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24939 spin_lock(pgt_lock);
24940 +#endif
24941
24942 if (pgd_none(*pgd))
24943 set_pgd(pgd, *pgd_ref);
24944 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24945 BUG_ON(pgd_page_vaddr(*pgd)
24946 != pgd_page_vaddr(*pgd_ref));
24947
24948 +#ifndef CONFIG_PAX_PER_CPU_PGD
24949 spin_unlock(pgt_lock);
24950 +#endif
24951 +
24952 }
24953 spin_unlock(&pgd_lock);
24954 }
24955 @@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
24956 {
24957 if (pgd_none(*pgd)) {
24958 pud_t *pud = (pud_t *)spp_getpage();
24959 - pgd_populate(&init_mm, pgd, pud);
24960 + pgd_populate_kernel(&init_mm, pgd, pud);
24961 if (pud != pud_offset(pgd, 0))
24962 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
24963 pud, pud_offset(pgd, 0));
24964 @@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
24965 {
24966 if (pud_none(*pud)) {
24967 pmd_t *pmd = (pmd_t *) spp_getpage();
24968 - pud_populate(&init_mm, pud, pmd);
24969 + pud_populate_kernel(&init_mm, pud, pmd);
24970 if (pmd != pmd_offset(pud, 0))
24971 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
24972 pmd, pmd_offset(pud, 0));
24973 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
24974 pmd = fill_pmd(pud, vaddr);
24975 pte = fill_pte(pmd, vaddr);
24976
24977 + pax_open_kernel();
24978 set_pte(pte, new_pte);
24979 + pax_close_kernel();
24980
24981 /*
24982 * It's enough to flush this one mapping.
24983 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
24984 pgd = pgd_offset_k((unsigned long)__va(phys));
24985 if (pgd_none(*pgd)) {
24986 pud = (pud_t *) spp_getpage();
24987 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
24988 - _PAGE_USER));
24989 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
24990 }
24991 pud = pud_offset(pgd, (unsigned long)__va(phys));
24992 if (pud_none(*pud)) {
24993 pmd = (pmd_t *) spp_getpage();
24994 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
24995 - _PAGE_USER));
24996 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
24997 }
24998 pmd = pmd_offset(pud, phys);
24999 BUG_ON(!pmd_none(*pmd));
25000 @@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25001 if (pfn >= pgt_buf_top)
25002 panic("alloc_low_page: ran out of memory");
25003
25004 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25005 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25006 clear_page(adr);
25007 *phys = pfn * PAGE_SIZE;
25008 return adr;
25009 @@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25010
25011 phys = __pa(virt);
25012 left = phys & (PAGE_SIZE - 1);
25013 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25014 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25015 adr = (void *)(((unsigned long)adr) | left);
25016
25017 return adr;
25018 @@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25019 unmap_low_page(pmd);
25020
25021 spin_lock(&init_mm.page_table_lock);
25022 - pud_populate(&init_mm, pud, __va(pmd_phys));
25023 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25024 spin_unlock(&init_mm.page_table_lock);
25025 }
25026 __flush_tlb_all();
25027 @@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
25028 unmap_low_page(pud);
25029
25030 spin_lock(&init_mm.page_table_lock);
25031 - pgd_populate(&init_mm, pgd, __va(pud_phys));
25032 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25033 spin_unlock(&init_mm.page_table_lock);
25034 pgd_changed = true;
25035 }
25036 @@ -683,6 +697,12 @@ void __init mem_init(void)
25037
25038 pci_iommu_alloc();
25039
25040 +#ifdef CONFIG_PAX_PER_CPU_PGD
25041 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25042 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25043 + KERNEL_PGD_PTRS);
25044 +#endif
25045 +
25046 /* clear_bss() already clear the empty_zero_page */
25047
25048 reservedpages = 0;
25049 @@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
25050 static struct vm_area_struct gate_vma = {
25051 .vm_start = VSYSCALL_START,
25052 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25053 - .vm_page_prot = PAGE_READONLY_EXEC,
25054 - .vm_flags = VM_READ | VM_EXEC
25055 + .vm_page_prot = PAGE_READONLY,
25056 + .vm_flags = VM_READ
25057 };
25058
25059 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25060 @@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long addr)
25061
25062 const char *arch_vma_name(struct vm_area_struct *vma)
25063 {
25064 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25065 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25066 return "[vdso]";
25067 if (vma == &gate_vma)
25068 return "[vsyscall]";
25069 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25070 index 7b179b4..6bd1777 100644
25071 --- a/arch/x86/mm/iomap_32.c
25072 +++ b/arch/x86/mm/iomap_32.c
25073 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25074 type = kmap_atomic_idx_push();
25075 idx = type + KM_TYPE_NR * smp_processor_id();
25076 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25077 +
25078 + pax_open_kernel();
25079 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25080 + pax_close_kernel();
25081 +
25082 arch_flush_lazy_mmu_mode();
25083
25084 return (void *)vaddr;
25085 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25086 index be1ef57..55f0160 100644
25087 --- a/arch/x86/mm/ioremap.c
25088 +++ b/arch/x86/mm/ioremap.c
25089 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25090 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25091 int is_ram = page_is_ram(pfn);
25092
25093 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25094 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25095 return NULL;
25096 WARN_ON_ONCE(is_ram);
25097 }
25098 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25099
25100 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25101 if (page_is_ram(start >> PAGE_SHIFT))
25102 +#ifdef CONFIG_HIGHMEM
25103 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25104 +#endif
25105 return __va(phys);
25106
25107 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25108 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25109 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25110
25111 static __initdata int after_paging_init;
25112 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25113 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25114
25115 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25116 {
25117 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25118 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25119
25120 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25121 - memset(bm_pte, 0, sizeof(bm_pte));
25122 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25123 + pmd_populate_user(&init_mm, pmd, bm_pte);
25124
25125 /*
25126 * The boot-ioremap range spans multiple pmds, for which
25127 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25128 index d87dd6d..bf3fa66 100644
25129 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25130 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25131 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25132 * memory (e.g. tracked pages)? For now, we need this to avoid
25133 * invoking kmemcheck for PnP BIOS calls.
25134 */
25135 - if (regs->flags & X86_VM_MASK)
25136 + if (v8086_mode(regs))
25137 return false;
25138 - if (regs->cs != __KERNEL_CS)
25139 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25140 return false;
25141
25142 pte = kmemcheck_pte_lookup(address);
25143 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25144 index 845df68..1d8d29f 100644
25145 --- a/arch/x86/mm/mmap.c
25146 +++ b/arch/x86/mm/mmap.c
25147 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25148 * Leave an at least ~128 MB hole with possible stack randomization.
25149 */
25150 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25151 -#define MAX_GAP (TASK_SIZE/6*5)
25152 +#define MAX_GAP (pax_task_size/6*5)
25153
25154 static int mmap_is_legacy(void)
25155 {
25156 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25157 return rnd << PAGE_SHIFT;
25158 }
25159
25160 -static unsigned long mmap_base(void)
25161 +static unsigned long mmap_base(struct mm_struct *mm)
25162 {
25163 unsigned long gap = rlimit(RLIMIT_STACK);
25164 + unsigned long pax_task_size = TASK_SIZE;
25165 +
25166 +#ifdef CONFIG_PAX_SEGMEXEC
25167 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25168 + pax_task_size = SEGMEXEC_TASK_SIZE;
25169 +#endif
25170
25171 if (gap < MIN_GAP)
25172 gap = MIN_GAP;
25173 else if (gap > MAX_GAP)
25174 gap = MAX_GAP;
25175
25176 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25177 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25178 }
25179
25180 /*
25181 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25182 * does, but not when emulating X86_32
25183 */
25184 -static unsigned long mmap_legacy_base(void)
25185 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25186 {
25187 - if (mmap_is_ia32())
25188 + if (mmap_is_ia32()) {
25189 +
25190 +#ifdef CONFIG_PAX_SEGMEXEC
25191 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25192 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25193 + else
25194 +#endif
25195 +
25196 return TASK_UNMAPPED_BASE;
25197 - else
25198 + } else
25199 return TASK_UNMAPPED_BASE + mmap_rnd();
25200 }
25201
25202 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25203 void arch_pick_mmap_layout(struct mm_struct *mm)
25204 {
25205 if (mmap_is_legacy()) {
25206 - mm->mmap_base = mmap_legacy_base();
25207 + mm->mmap_base = mmap_legacy_base(mm);
25208 +
25209 +#ifdef CONFIG_PAX_RANDMMAP
25210 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25211 + mm->mmap_base += mm->delta_mmap;
25212 +#endif
25213 +
25214 mm->get_unmapped_area = arch_get_unmapped_area;
25215 mm->unmap_area = arch_unmap_area;
25216 } else {
25217 - mm->mmap_base = mmap_base();
25218 + mm->mmap_base = mmap_base(mm);
25219 +
25220 +#ifdef CONFIG_PAX_RANDMMAP
25221 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25222 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25223 +#endif
25224 +
25225 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25226 mm->unmap_area = arch_unmap_area_topdown;
25227 }
25228 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25229 index dc0b727..dc9d71a 100644
25230 --- a/arch/x86/mm/mmio-mod.c
25231 +++ b/arch/x86/mm/mmio-mod.c
25232 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25233 break;
25234 default:
25235 {
25236 - unsigned char *ip = (unsigned char *)instptr;
25237 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25238 my_trace->opcode = MMIO_UNKNOWN_OP;
25239 my_trace->width = 0;
25240 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25241 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25242 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25243 void __iomem *addr)
25244 {
25245 - static atomic_t next_id;
25246 + static atomic_unchecked_t next_id;
25247 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25248 /* These are page-unaligned. */
25249 struct mmiotrace_map map = {
25250 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25251 .private = trace
25252 },
25253 .phys = offset,
25254 - .id = atomic_inc_return(&next_id)
25255 + .id = atomic_inc_return_unchecked(&next_id)
25256 };
25257 map.map_id = trace->id;
25258
25259 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25260 index b008656..773eac2 100644
25261 --- a/arch/x86/mm/pageattr-test.c
25262 +++ b/arch/x86/mm/pageattr-test.c
25263 @@ -36,7 +36,7 @@ enum {
25264
25265 static int pte_testbit(pte_t pte)
25266 {
25267 - return pte_flags(pte) & _PAGE_UNUSED1;
25268 + return pte_flags(pte) & _PAGE_CPA_TEST;
25269 }
25270
25271 struct split_state {
25272 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25273 index e1ebde3..b1e1db38 100644
25274 --- a/arch/x86/mm/pageattr.c
25275 +++ b/arch/x86/mm/pageattr.c
25276 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25277 */
25278 #ifdef CONFIG_PCI_BIOS
25279 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25280 - pgprot_val(forbidden) |= _PAGE_NX;
25281 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25282 #endif
25283
25284 /*
25285 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25286 * Does not cover __inittext since that is gone later on. On
25287 * 64bit we do not enforce !NX on the low mapping
25288 */
25289 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25290 - pgprot_val(forbidden) |= _PAGE_NX;
25291 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25292 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25293
25294 +#ifdef CONFIG_DEBUG_RODATA
25295 /*
25296 * The .rodata section needs to be read-only. Using the pfn
25297 * catches all aliases.
25298 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25299 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25300 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25301 pgprot_val(forbidden) |= _PAGE_RW;
25302 +#endif
25303
25304 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25305 /*
25306 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25307 }
25308 #endif
25309
25310 +#ifdef CONFIG_PAX_KERNEXEC
25311 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25312 + pgprot_val(forbidden) |= _PAGE_RW;
25313 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25314 + }
25315 +#endif
25316 +
25317 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25318
25319 return prot;
25320 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25321 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25322 {
25323 /* change init_mm */
25324 + pax_open_kernel();
25325 set_pte_atomic(kpte, pte);
25326 +
25327 #ifdef CONFIG_X86_32
25328 if (!SHARED_KERNEL_PMD) {
25329 +
25330 +#ifdef CONFIG_PAX_PER_CPU_PGD
25331 + unsigned long cpu;
25332 +#else
25333 struct page *page;
25334 +#endif
25335
25336 +#ifdef CONFIG_PAX_PER_CPU_PGD
25337 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25338 + pgd_t *pgd = get_cpu_pgd(cpu);
25339 +#else
25340 list_for_each_entry(page, &pgd_list, lru) {
25341 - pgd_t *pgd;
25342 + pgd_t *pgd = (pgd_t *)page_address(page);
25343 +#endif
25344 +
25345 pud_t *pud;
25346 pmd_t *pmd;
25347
25348 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25349 + pgd += pgd_index(address);
25350 pud = pud_offset(pgd, address);
25351 pmd = pmd_offset(pud, address);
25352 set_pte_atomic((pte_t *)pmd, pte);
25353 }
25354 }
25355 #endif
25356 + pax_close_kernel();
25357 }
25358
25359 static int
25360 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25361 index f6ff57b..481690f 100644
25362 --- a/arch/x86/mm/pat.c
25363 +++ b/arch/x86/mm/pat.c
25364 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25365
25366 if (!entry) {
25367 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25368 - current->comm, current->pid, start, end);
25369 + current->comm, task_pid_nr(current), start, end);
25370 return -EINVAL;
25371 }
25372
25373 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25374 while (cursor < to) {
25375 if (!devmem_is_allowed(pfn)) {
25376 printk(KERN_INFO
25377 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25378 - current->comm, from, to);
25379 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25380 + current->comm, from, to, cursor);
25381 return 0;
25382 }
25383 cursor += PAGE_SIZE;
25384 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25385 printk(KERN_INFO
25386 "%s:%d ioremap_change_attr failed %s "
25387 "for %Lx-%Lx\n",
25388 - current->comm, current->pid,
25389 + current->comm, task_pid_nr(current),
25390 cattr_name(flags),
25391 base, (unsigned long long)(base + size));
25392 return -EINVAL;
25393 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25394 if (want_flags != flags) {
25395 printk(KERN_WARNING
25396 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25397 - current->comm, current->pid,
25398 + current->comm, task_pid_nr(current),
25399 cattr_name(want_flags),
25400 (unsigned long long)paddr,
25401 (unsigned long long)(paddr + size),
25402 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25403 free_memtype(paddr, paddr + size);
25404 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25405 " for %Lx-%Lx, got %s\n",
25406 - current->comm, current->pid,
25407 + current->comm, task_pid_nr(current),
25408 cattr_name(want_flags),
25409 (unsigned long long)paddr,
25410 (unsigned long long)(paddr + size),
25411 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25412 index 9f0614d..92ae64a 100644
25413 --- a/arch/x86/mm/pf_in.c
25414 +++ b/arch/x86/mm/pf_in.c
25415 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25416 int i;
25417 enum reason_type rv = OTHERS;
25418
25419 - p = (unsigned char *)ins_addr;
25420 + p = (unsigned char *)ktla_ktva(ins_addr);
25421 p += skip_prefix(p, &prf);
25422 p += get_opcode(p, &opcode);
25423
25424 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25425 struct prefix_bits prf;
25426 int i;
25427
25428 - p = (unsigned char *)ins_addr;
25429 + p = (unsigned char *)ktla_ktva(ins_addr);
25430 p += skip_prefix(p, &prf);
25431 p += get_opcode(p, &opcode);
25432
25433 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25434 struct prefix_bits prf;
25435 int i;
25436
25437 - p = (unsigned char *)ins_addr;
25438 + p = (unsigned char *)ktla_ktva(ins_addr);
25439 p += skip_prefix(p, &prf);
25440 p += get_opcode(p, &opcode);
25441
25442 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25443 struct prefix_bits prf;
25444 int i;
25445
25446 - p = (unsigned char *)ins_addr;
25447 + p = (unsigned char *)ktla_ktva(ins_addr);
25448 p += skip_prefix(p, &prf);
25449 p += get_opcode(p, &opcode);
25450 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25451 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25452 struct prefix_bits prf;
25453 int i;
25454
25455 - p = (unsigned char *)ins_addr;
25456 + p = (unsigned char *)ktla_ktva(ins_addr);
25457 p += skip_prefix(p, &prf);
25458 p += get_opcode(p, &opcode);
25459 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25460 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25461 index 8573b83..4f3ed7e 100644
25462 --- a/arch/x86/mm/pgtable.c
25463 +++ b/arch/x86/mm/pgtable.c
25464 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25465 list_del(&page->lru);
25466 }
25467
25468 -#define UNSHARED_PTRS_PER_PGD \
25469 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25470 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25471 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25472
25473 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25474 +{
25475 + unsigned int count = USER_PGD_PTRS;
25476
25477 + while (count--)
25478 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25479 +}
25480 +#endif
25481 +
25482 +#ifdef CONFIG_PAX_PER_CPU_PGD
25483 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25484 +{
25485 + unsigned int count = USER_PGD_PTRS;
25486 +
25487 + while (count--) {
25488 + pgd_t pgd;
25489 +
25490 +#ifdef CONFIG_X86_64
25491 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25492 +#else
25493 + pgd = *src++;
25494 +#endif
25495 +
25496 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25497 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25498 +#endif
25499 +
25500 + *dst++ = pgd;
25501 + }
25502 +
25503 +}
25504 +#endif
25505 +
25506 +#ifdef CONFIG_X86_64
25507 +#define pxd_t pud_t
25508 +#define pyd_t pgd_t
25509 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25510 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25511 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25512 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25513 +#define PYD_SIZE PGDIR_SIZE
25514 +#else
25515 +#define pxd_t pmd_t
25516 +#define pyd_t pud_t
25517 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25518 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25519 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25520 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25521 +#define PYD_SIZE PUD_SIZE
25522 +#endif
25523 +
25524 +#ifdef CONFIG_PAX_PER_CPU_PGD
25525 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25526 +static inline void pgd_dtor(pgd_t *pgd) {}
25527 +#else
25528 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25529 {
25530 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25531 @@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25532 pgd_list_del(pgd);
25533 spin_unlock(&pgd_lock);
25534 }
25535 +#endif
25536
25537 /*
25538 * List of all pgd's needed for non-PAE so it can invalidate entries
25539 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25540 * -- wli
25541 */
25542
25543 -#ifdef CONFIG_X86_PAE
25544 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25545 /*
25546 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25547 * updating the top-level pagetable entries to guarantee the
25548 @@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25549 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25550 * and initialize the kernel pmds here.
25551 */
25552 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25553 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25554
25555 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25556 {
25557 @@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25558 */
25559 flush_tlb_mm(mm);
25560 }
25561 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25562 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25563 #else /* !CONFIG_X86_PAE */
25564
25565 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25566 -#define PREALLOCATED_PMDS 0
25567 +#define PREALLOCATED_PXDS 0
25568
25569 #endif /* CONFIG_X86_PAE */
25570
25571 -static void free_pmds(pmd_t *pmds[])
25572 +static void free_pxds(pxd_t *pxds[])
25573 {
25574 int i;
25575
25576 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25577 - if (pmds[i])
25578 - free_page((unsigned long)pmds[i]);
25579 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25580 + if (pxds[i])
25581 + free_page((unsigned long)pxds[i]);
25582 }
25583
25584 -static int preallocate_pmds(pmd_t *pmds[])
25585 +static int preallocate_pxds(pxd_t *pxds[])
25586 {
25587 int i;
25588 bool failed = false;
25589
25590 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25591 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25592 - if (pmd == NULL)
25593 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25594 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25595 + if (pxd == NULL)
25596 failed = true;
25597 - pmds[i] = pmd;
25598 + pxds[i] = pxd;
25599 }
25600
25601 if (failed) {
25602 - free_pmds(pmds);
25603 + free_pxds(pxds);
25604 return -ENOMEM;
25605 }
25606
25607 @@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25608 * preallocate which never got a corresponding vma will need to be
25609 * freed manually.
25610 */
25611 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25612 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25613 {
25614 int i;
25615
25616 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25617 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25618 pgd_t pgd = pgdp[i];
25619
25620 if (pgd_val(pgd) != 0) {
25621 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25622 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25623
25624 - pgdp[i] = native_make_pgd(0);
25625 + set_pgd(pgdp + i, native_make_pgd(0));
25626
25627 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25628 - pmd_free(mm, pmd);
25629 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25630 + pxd_free(mm, pxd);
25631 }
25632 }
25633 }
25634
25635 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25636 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25637 {
25638 - pud_t *pud;
25639 + pyd_t *pyd;
25640 unsigned long addr;
25641 int i;
25642
25643 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25644 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25645 return;
25646
25647 - pud = pud_offset(pgd, 0);
25648 +#ifdef CONFIG_X86_64
25649 + pyd = pyd_offset(mm, 0L);
25650 +#else
25651 + pyd = pyd_offset(pgd, 0L);
25652 +#endif
25653
25654 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25655 - i++, pud++, addr += PUD_SIZE) {
25656 - pmd_t *pmd = pmds[i];
25657 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25658 + i++, pyd++, addr += PYD_SIZE) {
25659 + pxd_t *pxd = pxds[i];
25660
25661 if (i >= KERNEL_PGD_BOUNDARY)
25662 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25663 - sizeof(pmd_t) * PTRS_PER_PMD);
25664 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25665 + sizeof(pxd_t) * PTRS_PER_PMD);
25666
25667 - pud_populate(mm, pud, pmd);
25668 + pyd_populate(mm, pyd, pxd);
25669 }
25670 }
25671
25672 pgd_t *pgd_alloc(struct mm_struct *mm)
25673 {
25674 pgd_t *pgd;
25675 - pmd_t *pmds[PREALLOCATED_PMDS];
25676 + pxd_t *pxds[PREALLOCATED_PXDS];
25677
25678 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25679
25680 @@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25681
25682 mm->pgd = pgd;
25683
25684 - if (preallocate_pmds(pmds) != 0)
25685 + if (preallocate_pxds(pxds) != 0)
25686 goto out_free_pgd;
25687
25688 if (paravirt_pgd_alloc(mm) != 0)
25689 - goto out_free_pmds;
25690 + goto out_free_pxds;
25691
25692 /*
25693 * Make sure that pre-populating the pmds is atomic with
25694 @@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25695 spin_lock(&pgd_lock);
25696
25697 pgd_ctor(mm, pgd);
25698 - pgd_prepopulate_pmd(mm, pgd, pmds);
25699 + pgd_prepopulate_pxd(mm, pgd, pxds);
25700
25701 spin_unlock(&pgd_lock);
25702
25703 return pgd;
25704
25705 -out_free_pmds:
25706 - free_pmds(pmds);
25707 +out_free_pxds:
25708 + free_pxds(pxds);
25709 out_free_pgd:
25710 free_page((unsigned long)pgd);
25711 out:
25712 @@ -295,7 +356,7 @@ out:
25713
25714 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25715 {
25716 - pgd_mop_up_pmds(mm, pgd);
25717 + pgd_mop_up_pxds(mm, pgd);
25718 pgd_dtor(pgd);
25719 paravirt_pgd_free(mm, pgd);
25720 free_page((unsigned long)pgd);
25721 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25722 index a69bcb8..19068ab 100644
25723 --- a/arch/x86/mm/pgtable_32.c
25724 +++ b/arch/x86/mm/pgtable_32.c
25725 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25726 return;
25727 }
25728 pte = pte_offset_kernel(pmd, vaddr);
25729 +
25730 + pax_open_kernel();
25731 if (pte_val(pteval))
25732 set_pte_at(&init_mm, vaddr, pte, pteval);
25733 else
25734 pte_clear(&init_mm, vaddr, pte);
25735 + pax_close_kernel();
25736
25737 /*
25738 * It's enough to flush this one mapping.
25739 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25740 index 410531d..0f16030 100644
25741 --- a/arch/x86/mm/setup_nx.c
25742 +++ b/arch/x86/mm/setup_nx.c
25743 @@ -5,8 +5,10 @@
25744 #include <asm/pgtable.h>
25745 #include <asm/proto.h>
25746
25747 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25748 static int disable_nx __cpuinitdata;
25749
25750 +#ifndef CONFIG_PAX_PAGEEXEC
25751 /*
25752 * noexec = on|off
25753 *
25754 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25755 return 0;
25756 }
25757 early_param("noexec", noexec_setup);
25758 +#endif
25759 +
25760 +#endif
25761
25762 void __cpuinit x86_configure_nx(void)
25763 {
25764 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25765 if (cpu_has_nx && !disable_nx)
25766 __supported_pte_mask |= _PAGE_NX;
25767 else
25768 +#endif
25769 __supported_pte_mask &= ~_PAGE_NX;
25770 }
25771
25772 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25773 index d6c0418..06a0ad5 100644
25774 --- a/arch/x86/mm/tlb.c
25775 +++ b/arch/x86/mm/tlb.c
25776 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25777 BUG();
25778 cpumask_clear_cpu(cpu,
25779 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25780 +
25781 +#ifndef CONFIG_PAX_PER_CPU_PGD
25782 load_cr3(swapper_pg_dir);
25783 +#endif
25784 +
25785 }
25786 EXPORT_SYMBOL_GPL(leave_mm);
25787
25788 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25789 index 877b9a1..a8ecf42 100644
25790 --- a/arch/x86/net/bpf_jit.S
25791 +++ b/arch/x86/net/bpf_jit.S
25792 @@ -9,6 +9,7 @@
25793 */
25794 #include <linux/linkage.h>
25795 #include <asm/dwarf2.h>
25796 +#include <asm/alternative-asm.h>
25797
25798 /*
25799 * Calling convention :
25800 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25801 jle bpf_slow_path_word
25802 mov (SKBDATA,%rsi),%eax
25803 bswap %eax /* ntohl() */
25804 + pax_force_retaddr
25805 ret
25806
25807 sk_load_half:
25808 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25809 jle bpf_slow_path_half
25810 movzwl (SKBDATA,%rsi),%eax
25811 rol $8,%ax # ntohs()
25812 + pax_force_retaddr
25813 ret
25814
25815 sk_load_byte:
25816 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25817 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25818 jle bpf_slow_path_byte
25819 movzbl (SKBDATA,%rsi),%eax
25820 + pax_force_retaddr
25821 ret
25822
25823 /**
25824 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25825 movzbl (SKBDATA,%rsi),%ebx
25826 and $15,%bl
25827 shl $2,%bl
25828 + pax_force_retaddr
25829 ret
25830
25831 /* rsi contains offset and can be scratched */
25832 @@ -109,6 +114,7 @@ bpf_slow_path_word:
25833 js bpf_error
25834 mov -12(%rbp),%eax
25835 bswap %eax
25836 + pax_force_retaddr
25837 ret
25838
25839 bpf_slow_path_half:
25840 @@ -117,12 +123,14 @@ bpf_slow_path_half:
25841 mov -12(%rbp),%ax
25842 rol $8,%ax
25843 movzwl %ax,%eax
25844 + pax_force_retaddr
25845 ret
25846
25847 bpf_slow_path_byte:
25848 bpf_slow_path_common(1)
25849 js bpf_error
25850 movzbl -12(%rbp),%eax
25851 + pax_force_retaddr
25852 ret
25853
25854 bpf_slow_path_byte_msh:
25855 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25856 and $15,%al
25857 shl $2,%al
25858 xchg %eax,%ebx
25859 + pax_force_retaddr
25860 ret
25861
25862 #define sk_negative_common(SIZE) \
25863 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25864 sk_negative_common(4)
25865 mov (%rax), %eax
25866 bswap %eax
25867 + pax_force_retaddr
25868 ret
25869
25870 bpf_slow_path_half_neg:
25871 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25872 mov (%rax),%ax
25873 rol $8,%ax
25874 movzwl %ax,%eax
25875 + pax_force_retaddr
25876 ret
25877
25878 bpf_slow_path_byte_neg:
25879 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
25880 .globl sk_load_byte_negative_offset
25881 sk_negative_common(1)
25882 movzbl (%rax), %eax
25883 + pax_force_retaddr
25884 ret
25885
25886 bpf_slow_path_byte_msh_neg:
25887 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
25888 and $15,%al
25889 shl $2,%al
25890 xchg %eax,%ebx
25891 + pax_force_retaddr
25892 ret
25893
25894 bpf_error:
25895 @@ -197,4 +210,5 @@ bpf_error:
25896 xor %eax,%eax
25897 mov -8(%rbp),%rbx
25898 leaveq
25899 + pax_force_retaddr
25900 ret
25901 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25902 index 0597f95..a12c36e 100644
25903 --- a/arch/x86/net/bpf_jit_comp.c
25904 +++ b/arch/x86/net/bpf_jit_comp.c
25905 @@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
25906 set_fs(old_fs);
25907 }
25908
25909 +struct bpf_jit_work {
25910 + struct work_struct work;
25911 + void *image;
25912 +};
25913 +
25914 #define CHOOSE_LOAD_FUNC(K, func) \
25915 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
25916
25917 @@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25918 if (addrs == NULL)
25919 return;
25920
25921 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25922 + if (!fp->work)
25923 + goto out;
25924 +
25925 /* Before first pass, make a rough estimation of addrs[]
25926 * each bpf instruction is translated to less than 64 bytes
25927 */
25928 @@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25929 break;
25930 default:
25931 /* hmm, too complex filter, give up with jit compiler */
25932 - goto out;
25933 + goto error;
25934 }
25935 ilen = prog - temp;
25936 if (image) {
25937 if (unlikely(proglen + ilen > oldproglen)) {
25938 pr_err("bpb_jit_compile fatal error\n");
25939 - kfree(addrs);
25940 - module_free(NULL, image);
25941 - return;
25942 + module_free_exec(NULL, image);
25943 + goto error;
25944 }
25945 + pax_open_kernel();
25946 memcpy(image + proglen, temp, ilen);
25947 + pax_close_kernel();
25948 }
25949 proglen += ilen;
25950 addrs[i] = proglen;
25951 @@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25952 break;
25953 }
25954 if (proglen == oldproglen) {
25955 - image = module_alloc(max_t(unsigned int,
25956 - proglen,
25957 - sizeof(struct work_struct)));
25958 + image = module_alloc_exec(proglen);
25959 if (!image)
25960 - goto out;
25961 + goto error;
25962 }
25963 oldproglen = proglen;
25964 }
25965 @@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25966 bpf_flush_icache(image, image + proglen);
25967
25968 fp->bpf_func = (void *)image;
25969 - }
25970 + } else
25971 +error:
25972 + kfree(fp->work);
25973 +
25974 out:
25975 kfree(addrs);
25976 return;
25977 @@ -648,18 +659,20 @@ out:
25978
25979 static void jit_free_defer(struct work_struct *arg)
25980 {
25981 - module_free(NULL, arg);
25982 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
25983 + kfree(arg);
25984 }
25985
25986 /* run from softirq, we must use a work_struct to call
25987 - * module_free() from process context
25988 + * module_free_exec() from process context
25989 */
25990 void bpf_jit_free(struct sk_filter *fp)
25991 {
25992 if (fp->bpf_func != sk_run_filter) {
25993 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
25994 + struct work_struct *work = &fp->work->work;
25995
25996 INIT_WORK(work, jit_free_defer);
25997 + fp->work->image = fp->bpf_func;
25998 schedule_work(work);
25999 }
26000 }
26001 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26002 index d6aa6e8..266395a 100644
26003 --- a/arch/x86/oprofile/backtrace.c
26004 +++ b/arch/x86/oprofile/backtrace.c
26005 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26006 struct stack_frame_ia32 *fp;
26007 unsigned long bytes;
26008
26009 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26010 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26011 if (bytes != sizeof(bufhead))
26012 return NULL;
26013
26014 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26015 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26016
26017 oprofile_add_trace(bufhead[0].return_address);
26018
26019 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26020 struct stack_frame bufhead[2];
26021 unsigned long bytes;
26022
26023 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26024 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26025 if (bytes != sizeof(bufhead))
26026 return NULL;
26027
26028 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26029 {
26030 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26031
26032 - if (!user_mode_vm(regs)) {
26033 + if (!user_mode(regs)) {
26034 unsigned long stack = kernel_stack_pointer(regs);
26035 if (depth)
26036 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26037 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26038 index 140942f..8a5cc55 100644
26039 --- a/arch/x86/pci/mrst.c
26040 +++ b/arch/x86/pci/mrst.c
26041 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26042 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26043 pci_mmcfg_late_init();
26044 pcibios_enable_irq = mrst_pci_irq_enable;
26045 - pci_root_ops = pci_mrst_ops;
26046 + pax_open_kernel();
26047 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26048 + pax_close_kernel();
26049 pci_soc_mode = 1;
26050 /* Continue with standard init */
26051 return 1;
26052 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26053 index da8fe05..7ee6704 100644
26054 --- a/arch/x86/pci/pcbios.c
26055 +++ b/arch/x86/pci/pcbios.c
26056 @@ -79,50 +79,93 @@ union bios32 {
26057 static struct {
26058 unsigned long address;
26059 unsigned short segment;
26060 -} bios32_indirect = { 0, __KERNEL_CS };
26061 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26062
26063 /*
26064 * Returns the entry point for the given service, NULL on error
26065 */
26066
26067 -static unsigned long bios32_service(unsigned long service)
26068 +static unsigned long __devinit bios32_service(unsigned long service)
26069 {
26070 unsigned char return_code; /* %al */
26071 unsigned long address; /* %ebx */
26072 unsigned long length; /* %ecx */
26073 unsigned long entry; /* %edx */
26074 unsigned long flags;
26075 + struct desc_struct d, *gdt;
26076
26077 local_irq_save(flags);
26078 - __asm__("lcall *(%%edi); cld"
26079 +
26080 + gdt = get_cpu_gdt_table(smp_processor_id());
26081 +
26082 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26083 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26084 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26085 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26086 +
26087 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26088 : "=a" (return_code),
26089 "=b" (address),
26090 "=c" (length),
26091 "=d" (entry)
26092 : "0" (service),
26093 "1" (0),
26094 - "D" (&bios32_indirect));
26095 + "D" (&bios32_indirect),
26096 + "r"(__PCIBIOS_DS)
26097 + : "memory");
26098 +
26099 + pax_open_kernel();
26100 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26101 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26102 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26103 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26104 + pax_close_kernel();
26105 +
26106 local_irq_restore(flags);
26107
26108 switch (return_code) {
26109 - case 0:
26110 - return address + entry;
26111 - case 0x80: /* Not present */
26112 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26113 - return 0;
26114 - default: /* Shouldn't happen */
26115 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26116 - service, return_code);
26117 + case 0: {
26118 + int cpu;
26119 + unsigned char flags;
26120 +
26121 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26122 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26123 + printk(KERN_WARNING "bios32_service: not valid\n");
26124 return 0;
26125 + }
26126 + address = address + PAGE_OFFSET;
26127 + length += 16UL; /* some BIOSs underreport this... */
26128 + flags = 4;
26129 + if (length >= 64*1024*1024) {
26130 + length >>= PAGE_SHIFT;
26131 + flags |= 8;
26132 + }
26133 +
26134 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26135 + gdt = get_cpu_gdt_table(cpu);
26136 + pack_descriptor(&d, address, length, 0x9b, flags);
26137 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26138 + pack_descriptor(&d, address, length, 0x93, flags);
26139 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26140 + }
26141 + return entry;
26142 + }
26143 + case 0x80: /* Not present */
26144 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26145 + return 0;
26146 + default: /* Shouldn't happen */
26147 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26148 + service, return_code);
26149 + return 0;
26150 }
26151 }
26152
26153 static struct {
26154 unsigned long address;
26155 unsigned short segment;
26156 -} pci_indirect = { 0, __KERNEL_CS };
26157 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26158
26159 -static int pci_bios_present;
26160 +static int pci_bios_present __read_only;
26161
26162 static int __devinit check_pcibios(void)
26163 {
26164 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26165 unsigned long flags, pcibios_entry;
26166
26167 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26168 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26169 + pci_indirect.address = pcibios_entry;
26170
26171 local_irq_save(flags);
26172 - __asm__(
26173 - "lcall *(%%edi); cld\n\t"
26174 + __asm__("movw %w6, %%ds\n\t"
26175 + "lcall *%%ss:(%%edi); cld\n\t"
26176 + "push %%ss\n\t"
26177 + "pop %%ds\n\t"
26178 "jc 1f\n\t"
26179 "xor %%ah, %%ah\n"
26180 "1:"
26181 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26182 "=b" (ebx),
26183 "=c" (ecx)
26184 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26185 - "D" (&pci_indirect)
26186 + "D" (&pci_indirect),
26187 + "r" (__PCIBIOS_DS)
26188 : "memory");
26189 local_irq_restore(flags);
26190
26191 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26192
26193 switch (len) {
26194 case 1:
26195 - __asm__("lcall *(%%esi); cld\n\t"
26196 + __asm__("movw %w6, %%ds\n\t"
26197 + "lcall *%%ss:(%%esi); cld\n\t"
26198 + "push %%ss\n\t"
26199 + "pop %%ds\n\t"
26200 "jc 1f\n\t"
26201 "xor %%ah, %%ah\n"
26202 "1:"
26203 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26204 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26205 "b" (bx),
26206 "D" ((long)reg),
26207 - "S" (&pci_indirect));
26208 + "S" (&pci_indirect),
26209 + "r" (__PCIBIOS_DS));
26210 /*
26211 * Zero-extend the result beyond 8 bits, do not trust the
26212 * BIOS having done it:
26213 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26214 *value &= 0xff;
26215 break;
26216 case 2:
26217 - __asm__("lcall *(%%esi); cld\n\t"
26218 + __asm__("movw %w6, %%ds\n\t"
26219 + "lcall *%%ss:(%%esi); cld\n\t"
26220 + "push %%ss\n\t"
26221 + "pop %%ds\n\t"
26222 "jc 1f\n\t"
26223 "xor %%ah, %%ah\n"
26224 "1:"
26225 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26226 : "1" (PCIBIOS_READ_CONFIG_WORD),
26227 "b" (bx),
26228 "D" ((long)reg),
26229 - "S" (&pci_indirect));
26230 + "S" (&pci_indirect),
26231 + "r" (__PCIBIOS_DS));
26232 /*
26233 * Zero-extend the result beyond 16 bits, do not trust the
26234 * BIOS having done it:
26235 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26236 *value &= 0xffff;
26237 break;
26238 case 4:
26239 - __asm__("lcall *(%%esi); cld\n\t"
26240 + __asm__("movw %w6, %%ds\n\t"
26241 + "lcall *%%ss:(%%esi); cld\n\t"
26242 + "push %%ss\n\t"
26243 + "pop %%ds\n\t"
26244 "jc 1f\n\t"
26245 "xor %%ah, %%ah\n"
26246 "1:"
26247 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26248 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26249 "b" (bx),
26250 "D" ((long)reg),
26251 - "S" (&pci_indirect));
26252 + "S" (&pci_indirect),
26253 + "r" (__PCIBIOS_DS));
26254 break;
26255 }
26256
26257 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26258
26259 switch (len) {
26260 case 1:
26261 - __asm__("lcall *(%%esi); cld\n\t"
26262 + __asm__("movw %w6, %%ds\n\t"
26263 + "lcall *%%ss:(%%esi); cld\n\t"
26264 + "push %%ss\n\t"
26265 + "pop %%ds\n\t"
26266 "jc 1f\n\t"
26267 "xor %%ah, %%ah\n"
26268 "1:"
26269 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26270 "c" (value),
26271 "b" (bx),
26272 "D" ((long)reg),
26273 - "S" (&pci_indirect));
26274 + "S" (&pci_indirect),
26275 + "r" (__PCIBIOS_DS));
26276 break;
26277 case 2:
26278 - __asm__("lcall *(%%esi); cld\n\t"
26279 + __asm__("movw %w6, %%ds\n\t"
26280 + "lcall *%%ss:(%%esi); cld\n\t"
26281 + "push %%ss\n\t"
26282 + "pop %%ds\n\t"
26283 "jc 1f\n\t"
26284 "xor %%ah, %%ah\n"
26285 "1:"
26286 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26287 "c" (value),
26288 "b" (bx),
26289 "D" ((long)reg),
26290 - "S" (&pci_indirect));
26291 + "S" (&pci_indirect),
26292 + "r" (__PCIBIOS_DS));
26293 break;
26294 case 4:
26295 - __asm__("lcall *(%%esi); cld\n\t"
26296 + __asm__("movw %w6, %%ds\n\t"
26297 + "lcall *%%ss:(%%esi); cld\n\t"
26298 + "push %%ss\n\t"
26299 + "pop %%ds\n\t"
26300 "jc 1f\n\t"
26301 "xor %%ah, %%ah\n"
26302 "1:"
26303 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26304 "c" (value),
26305 "b" (bx),
26306 "D" ((long)reg),
26307 - "S" (&pci_indirect));
26308 + "S" (&pci_indirect),
26309 + "r" (__PCIBIOS_DS));
26310 break;
26311 }
26312
26313 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26314
26315 DBG("PCI: Fetching IRQ routing table... ");
26316 __asm__("push %%es\n\t"
26317 + "movw %w8, %%ds\n\t"
26318 "push %%ds\n\t"
26319 "pop %%es\n\t"
26320 - "lcall *(%%esi); cld\n\t"
26321 + "lcall *%%ss:(%%esi); cld\n\t"
26322 "pop %%es\n\t"
26323 + "push %%ss\n\t"
26324 + "pop %%ds\n"
26325 "jc 1f\n\t"
26326 "xor %%ah, %%ah\n"
26327 "1:"
26328 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26329 "1" (0),
26330 "D" ((long) &opt),
26331 "S" (&pci_indirect),
26332 - "m" (opt)
26333 + "m" (opt),
26334 + "r" (__PCIBIOS_DS)
26335 : "memory");
26336 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26337 if (ret & 0xff00)
26338 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26339 {
26340 int ret;
26341
26342 - __asm__("lcall *(%%esi); cld\n\t"
26343 + __asm__("movw %w5, %%ds\n\t"
26344 + "lcall *%%ss:(%%esi); cld\n\t"
26345 + "push %%ss\n\t"
26346 + "pop %%ds\n"
26347 "jc 1f\n\t"
26348 "xor %%ah, %%ah\n"
26349 "1:"
26350 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26351 : "0" (PCIBIOS_SET_PCI_HW_INT),
26352 "b" ((dev->bus->number << 8) | dev->devfn),
26353 "c" ((irq << 8) | (pin + 10)),
26354 - "S" (&pci_indirect));
26355 + "S" (&pci_indirect),
26356 + "r" (__PCIBIOS_DS));
26357 return !(ret & 0xff00);
26358 }
26359 EXPORT_SYMBOL(pcibios_set_irq_routing);
26360 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26361 index 40e4469..1ab536e 100644
26362 --- a/arch/x86/platform/efi/efi_32.c
26363 +++ b/arch/x86/platform/efi/efi_32.c
26364 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26365 {
26366 struct desc_ptr gdt_descr;
26367
26368 +#ifdef CONFIG_PAX_KERNEXEC
26369 + struct desc_struct d;
26370 +#endif
26371 +
26372 local_irq_save(efi_rt_eflags);
26373
26374 load_cr3(initial_page_table);
26375 __flush_tlb_all();
26376
26377 +#ifdef CONFIG_PAX_KERNEXEC
26378 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26379 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26380 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26381 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26382 +#endif
26383 +
26384 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26385 gdt_descr.size = GDT_SIZE - 1;
26386 load_gdt(&gdt_descr);
26387 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26388 {
26389 struct desc_ptr gdt_descr;
26390
26391 +#ifdef CONFIG_PAX_KERNEXEC
26392 + struct desc_struct d;
26393 +
26394 + memset(&d, 0, sizeof d);
26395 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26396 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26397 +#endif
26398 +
26399 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26400 gdt_descr.size = GDT_SIZE - 1;
26401 load_gdt(&gdt_descr);
26402 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26403 index fbe66e6..c5c0dd2 100644
26404 --- a/arch/x86/platform/efi/efi_stub_32.S
26405 +++ b/arch/x86/platform/efi/efi_stub_32.S
26406 @@ -6,7 +6,9 @@
26407 */
26408
26409 #include <linux/linkage.h>
26410 +#include <linux/init.h>
26411 #include <asm/page_types.h>
26412 +#include <asm/segment.h>
26413
26414 /*
26415 * efi_call_phys(void *, ...) is a function with variable parameters.
26416 @@ -20,7 +22,7 @@
26417 * service functions will comply with gcc calling convention, too.
26418 */
26419
26420 -.text
26421 +__INIT
26422 ENTRY(efi_call_phys)
26423 /*
26424 * 0. The function can only be called in Linux kernel. So CS has been
26425 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26426 * The mapping of lower virtual memory has been created in prelog and
26427 * epilog.
26428 */
26429 - movl $1f, %edx
26430 - subl $__PAGE_OFFSET, %edx
26431 - jmp *%edx
26432 + movl $(__KERNEXEC_EFI_DS), %edx
26433 + mov %edx, %ds
26434 + mov %edx, %es
26435 + mov %edx, %ss
26436 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26437 1:
26438
26439 /*
26440 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26441 * parameter 2, ..., param n. To make things easy, we save the return
26442 * address of efi_call_phys in a global variable.
26443 */
26444 - popl %edx
26445 - movl %edx, saved_return_addr
26446 - /* get the function pointer into ECX*/
26447 - popl %ecx
26448 - movl %ecx, efi_rt_function_ptr
26449 - movl $2f, %edx
26450 - subl $__PAGE_OFFSET, %edx
26451 - pushl %edx
26452 + popl (saved_return_addr)
26453 + popl (efi_rt_function_ptr)
26454
26455 /*
26456 * 3. Clear PG bit in %CR0.
26457 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26458 /*
26459 * 5. Call the physical function.
26460 */
26461 - jmp *%ecx
26462 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26463
26464 -2:
26465 /*
26466 * 6. After EFI runtime service returns, control will return to
26467 * following instruction. We'd better readjust stack pointer first.
26468 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26469 movl %cr0, %edx
26470 orl $0x80000000, %edx
26471 movl %edx, %cr0
26472 - jmp 1f
26473 -1:
26474 +
26475 /*
26476 * 8. Now restore the virtual mode from flat mode by
26477 * adding EIP with PAGE_OFFSET.
26478 */
26479 - movl $1f, %edx
26480 - jmp *%edx
26481 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26482 1:
26483 + movl $(__KERNEL_DS), %edx
26484 + mov %edx, %ds
26485 + mov %edx, %es
26486 + mov %edx, %ss
26487
26488 /*
26489 * 9. Balance the stack. And because EAX contain the return value,
26490 * we'd better not clobber it.
26491 */
26492 - leal efi_rt_function_ptr, %edx
26493 - movl (%edx), %ecx
26494 - pushl %ecx
26495 + pushl (efi_rt_function_ptr)
26496
26497 /*
26498 - * 10. Push the saved return address onto the stack and return.
26499 + * 10. Return to the saved return address.
26500 */
26501 - leal saved_return_addr, %edx
26502 - movl (%edx), %ecx
26503 - pushl %ecx
26504 - ret
26505 + jmpl *(saved_return_addr)
26506 ENDPROC(efi_call_phys)
26507 .previous
26508
26509 -.data
26510 +__INITDATA
26511 saved_return_addr:
26512 .long 0
26513 efi_rt_function_ptr:
26514 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26515 index 4c07cca..2c8427d 100644
26516 --- a/arch/x86/platform/efi/efi_stub_64.S
26517 +++ b/arch/x86/platform/efi/efi_stub_64.S
26518 @@ -7,6 +7,7 @@
26519 */
26520
26521 #include <linux/linkage.h>
26522 +#include <asm/alternative-asm.h>
26523
26524 #define SAVE_XMM \
26525 mov %rsp, %rax; \
26526 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26527 call *%rdi
26528 addq $32, %rsp
26529 RESTORE_XMM
26530 + pax_force_retaddr 0, 1
26531 ret
26532 ENDPROC(efi_call0)
26533
26534 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26535 call *%rdi
26536 addq $32, %rsp
26537 RESTORE_XMM
26538 + pax_force_retaddr 0, 1
26539 ret
26540 ENDPROC(efi_call1)
26541
26542 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26543 call *%rdi
26544 addq $32, %rsp
26545 RESTORE_XMM
26546 + pax_force_retaddr 0, 1
26547 ret
26548 ENDPROC(efi_call2)
26549
26550 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26551 call *%rdi
26552 addq $32, %rsp
26553 RESTORE_XMM
26554 + pax_force_retaddr 0, 1
26555 ret
26556 ENDPROC(efi_call3)
26557
26558 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26559 call *%rdi
26560 addq $32, %rsp
26561 RESTORE_XMM
26562 + pax_force_retaddr 0, 1
26563 ret
26564 ENDPROC(efi_call4)
26565
26566 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26567 call *%rdi
26568 addq $48, %rsp
26569 RESTORE_XMM
26570 + pax_force_retaddr 0, 1
26571 ret
26572 ENDPROC(efi_call5)
26573
26574 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26575 call *%rdi
26576 addq $48, %rsp
26577 RESTORE_XMM
26578 + pax_force_retaddr 0, 1
26579 ret
26580 ENDPROC(efi_call6)
26581 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26582 index e31bcd8..f12dc46 100644
26583 --- a/arch/x86/platform/mrst/mrst.c
26584 +++ b/arch/x86/platform/mrst/mrst.c
26585 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26586 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26587 int sfi_mrtc_num;
26588
26589 -static void mrst_power_off(void)
26590 +static __noreturn void mrst_power_off(void)
26591 {
26592 + BUG();
26593 }
26594
26595 -static void mrst_reboot(void)
26596 +static __noreturn void mrst_reboot(void)
26597 {
26598 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26599 + BUG();
26600 }
26601
26602 /* parse all the mtimer info to a static mtimer array */
26603 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26604 index 218cdb1..fd55c08 100644
26605 --- a/arch/x86/power/cpu.c
26606 +++ b/arch/x86/power/cpu.c
26607 @@ -132,7 +132,7 @@ static void do_fpu_end(void)
26608 static void fix_processor_context(void)
26609 {
26610 int cpu = smp_processor_id();
26611 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26612 + struct tss_struct *t = init_tss + cpu;
26613
26614 set_tss_desc(cpu, t); /*
26615 * This just modifies memory; should not be
26616 @@ -142,7 +142,9 @@ static void fix_processor_context(void)
26617 */
26618
26619 #ifdef CONFIG_X86_64
26620 + pax_open_kernel();
26621 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26622 + pax_close_kernel();
26623
26624 syscall_init(); /* This sets MSR_*STAR and related */
26625 #endif
26626 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26627 index b685296..0180fa9 100644
26628 --- a/arch/x86/tools/relocs.c
26629 +++ b/arch/x86/tools/relocs.c
26630 @@ -12,10 +12,13 @@
26631 #include <regex.h>
26632 #include <tools/le_byteshift.h>
26633
26634 +#include "../../../include/generated/autoconf.h"
26635 +
26636 static void die(char *fmt, ...);
26637
26638 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26639 static Elf32_Ehdr ehdr;
26640 +static Elf32_Phdr *phdr;
26641 static unsigned long reloc_count, reloc_idx;
26642 static unsigned long *relocs;
26643 static unsigned long reloc16_count, reloc16_idx;
26644 @@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
26645 }
26646 }
26647
26648 +static void read_phdrs(FILE *fp)
26649 +{
26650 + unsigned int i;
26651 +
26652 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26653 + if (!phdr) {
26654 + die("Unable to allocate %d program headers\n",
26655 + ehdr.e_phnum);
26656 + }
26657 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26658 + die("Seek to %d failed: %s\n",
26659 + ehdr.e_phoff, strerror(errno));
26660 + }
26661 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26662 + die("Cannot read ELF program headers: %s\n",
26663 + strerror(errno));
26664 + }
26665 + for(i = 0; i < ehdr.e_phnum; i++) {
26666 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26667 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26668 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26669 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26670 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26671 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26672 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26673 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26674 + }
26675 +
26676 +}
26677 +
26678 static void read_shdrs(FILE *fp)
26679 {
26680 - int i;
26681 + unsigned int i;
26682 Elf32_Shdr shdr;
26683
26684 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26685 @@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
26686
26687 static void read_strtabs(FILE *fp)
26688 {
26689 - int i;
26690 + unsigned int i;
26691 for (i = 0; i < ehdr.e_shnum; i++) {
26692 struct section *sec = &secs[i];
26693 if (sec->shdr.sh_type != SHT_STRTAB) {
26694 @@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
26695
26696 static void read_symtabs(FILE *fp)
26697 {
26698 - int i,j;
26699 + unsigned int i,j;
26700 for (i = 0; i < ehdr.e_shnum; i++) {
26701 struct section *sec = &secs[i];
26702 if (sec->shdr.sh_type != SHT_SYMTAB) {
26703 @@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
26704
26705 static void read_relocs(FILE *fp)
26706 {
26707 - int i,j;
26708 + unsigned int i,j;
26709 + uint32_t base;
26710 +
26711 for (i = 0; i < ehdr.e_shnum; i++) {
26712 struct section *sec = &secs[i];
26713 if (sec->shdr.sh_type != SHT_REL) {
26714 @@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
26715 die("Cannot read symbol table: %s\n",
26716 strerror(errno));
26717 }
26718 + base = 0;
26719 +
26720 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26721 + for (j = 0; j < ehdr.e_phnum; j++) {
26722 + if (phdr[j].p_type != PT_LOAD )
26723 + continue;
26724 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26725 + continue;
26726 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26727 + break;
26728 + }
26729 +#endif
26730 +
26731 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26732 Elf32_Rel *rel = &sec->reltab[j];
26733 - rel->r_offset = elf32_to_cpu(rel->r_offset);
26734 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26735 rel->r_info = elf32_to_cpu(rel->r_info);
26736 }
26737 }
26738 @@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
26739
26740 static void print_absolute_symbols(void)
26741 {
26742 - int i;
26743 + unsigned int i;
26744 printf("Absolute symbols\n");
26745 printf(" Num: Value Size Type Bind Visibility Name\n");
26746 for (i = 0; i < ehdr.e_shnum; i++) {
26747 struct section *sec = &secs[i];
26748 char *sym_strtab;
26749 - int j;
26750 + unsigned int j;
26751
26752 if (sec->shdr.sh_type != SHT_SYMTAB) {
26753 continue;
26754 @@ -482,7 +530,7 @@ static void print_absolute_symbols(void)
26755
26756 static void print_absolute_relocs(void)
26757 {
26758 - int i, printed = 0;
26759 + unsigned int i, printed = 0;
26760
26761 for (i = 0; i < ehdr.e_shnum; i++) {
26762 struct section *sec = &secs[i];
26763 @@ -551,7 +599,7 @@ static void print_absolute_relocs(void)
26764 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26765 int use_real_mode)
26766 {
26767 - int i;
26768 + unsigned int i;
26769 /* Walk through the relocations */
26770 for (i = 0; i < ehdr.e_shnum; i++) {
26771 char *sym_strtab;
26772 @@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26773 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26774 r_type = ELF32_R_TYPE(rel->r_info);
26775
26776 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26777 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26778 + continue;
26779 +
26780 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26781 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26782 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26783 + continue;
26784 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26785 + continue;
26786 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26787 + continue;
26788 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26789 + continue;
26790 +#endif
26791 +
26792 shn_abs = sym->st_shndx == SHN_ABS;
26793
26794 switch (r_type) {
26795 @@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
26796
26797 static void emit_relocs(int as_text, int use_real_mode)
26798 {
26799 - int i;
26800 + unsigned int i;
26801 /* Count how many relocations I have and allocate space for them. */
26802 reloc_count = 0;
26803 walk_relocs(count_reloc, use_real_mode);
26804 @@ -801,6 +865,7 @@ int main(int argc, char **argv)
26805 fname, strerror(errno));
26806 }
26807 read_ehdr(fp);
26808 + read_phdrs(fp);
26809 read_shdrs(fp);
26810 read_strtabs(fp);
26811 read_symtabs(fp);
26812 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26813 index fd14be1..e3c79c0 100644
26814 --- a/arch/x86/vdso/Makefile
26815 +++ b/arch/x86/vdso/Makefile
26816 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
26817 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26818 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26819
26820 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26821 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26822 GCOV_PROFILE := n
26823
26824 #
26825 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26826 index 66e6d93..587f435 100644
26827 --- a/arch/x86/vdso/vdso32-setup.c
26828 +++ b/arch/x86/vdso/vdso32-setup.c
26829 @@ -25,6 +25,7 @@
26830 #include <asm/tlbflush.h>
26831 #include <asm/vdso.h>
26832 #include <asm/proto.h>
26833 +#include <asm/mman.h>
26834
26835 enum {
26836 VDSO_DISABLED = 0,
26837 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26838 void enable_sep_cpu(void)
26839 {
26840 int cpu = get_cpu();
26841 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26842 + struct tss_struct *tss = init_tss + cpu;
26843
26844 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26845 put_cpu();
26846 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26847 gate_vma.vm_start = FIXADDR_USER_START;
26848 gate_vma.vm_end = FIXADDR_USER_END;
26849 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26850 - gate_vma.vm_page_prot = __P101;
26851 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26852
26853 return 0;
26854 }
26855 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26856 if (compat)
26857 addr = VDSO_HIGH_BASE;
26858 else {
26859 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26860 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26861 if (IS_ERR_VALUE(addr)) {
26862 ret = addr;
26863 goto up_fail;
26864 }
26865 }
26866
26867 - current->mm->context.vdso = (void *)addr;
26868 + current->mm->context.vdso = addr;
26869
26870 if (compat_uses_vma || !compat) {
26871 /*
26872 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26873 }
26874
26875 current_thread_info()->sysenter_return =
26876 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26877 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26878
26879 up_fail:
26880 if (ret)
26881 - current->mm->context.vdso = NULL;
26882 + current->mm->context.vdso = 0;
26883
26884 up_write(&mm->mmap_sem);
26885
26886 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
26887
26888 const char *arch_vma_name(struct vm_area_struct *vma)
26889 {
26890 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26891 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26892 return "[vdso]";
26893 +
26894 +#ifdef CONFIG_PAX_SEGMEXEC
26895 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26896 + return "[vdso]";
26897 +#endif
26898 +
26899 return NULL;
26900 }
26901
26902 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26903 * Check to see if the corresponding task was created in compat vdso
26904 * mode.
26905 */
26906 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26907 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26908 return &gate_vma;
26909 return NULL;
26910 }
26911 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26912 index 00aaf04..4a26505 100644
26913 --- a/arch/x86/vdso/vma.c
26914 +++ b/arch/x86/vdso/vma.c
26915 @@ -16,8 +16,6 @@
26916 #include <asm/vdso.h>
26917 #include <asm/page.h>
26918
26919 -unsigned int __read_mostly vdso_enabled = 1;
26920 -
26921 extern char vdso_start[], vdso_end[];
26922 extern unsigned short vdso_sync_cpuid;
26923
26924 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26925 * unaligned here as a result of stack start randomization.
26926 */
26927 addr = PAGE_ALIGN(addr);
26928 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26929
26930 return addr;
26931 }
26932 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
26933 unsigned size)
26934 {
26935 struct mm_struct *mm = current->mm;
26936 - unsigned long addr;
26937 + unsigned long addr = 0;
26938 int ret;
26939
26940 - if (!vdso_enabled)
26941 - return 0;
26942 -
26943 down_write(&mm->mmap_sem);
26944 +
26945 +#ifdef CONFIG_PAX_RANDMMAP
26946 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26947 +#endif
26948 +
26949 addr = vdso_addr(mm->start_stack, size);
26950 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26951 addr = get_unmapped_area(NULL, addr, size, 0, 0);
26952 if (IS_ERR_VALUE(addr)) {
26953 ret = addr;
26954 goto up_fail;
26955 }
26956
26957 - current->mm->context.vdso = (void *)addr;
26958 + mm->context.vdso = addr;
26959
26960 ret = install_special_mapping(mm, addr, size,
26961 VM_READ|VM_EXEC|
26962 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
26963 pages);
26964 - if (ret) {
26965 - current->mm->context.vdso = NULL;
26966 - goto up_fail;
26967 - }
26968 + if (ret)
26969 + mm->context.vdso = 0;
26970
26971 up_fail:
26972 up_write(&mm->mmap_sem);
26973 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26974 vdsox32_size);
26975 }
26976 #endif
26977 -
26978 -static __init int vdso_setup(char *s)
26979 -{
26980 - vdso_enabled = simple_strtoul(s, NULL, 0);
26981 - return 0;
26982 -}
26983 -__setup("vdso=", vdso_setup);
26984 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26985 index 6c7f1e8..de96944 100644
26986 --- a/arch/x86/xen/enlighten.c
26987 +++ b/arch/x86/xen/enlighten.c
26988 @@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26989
26990 struct shared_info xen_dummy_shared_info;
26991
26992 -void *xen_initial_gdt;
26993 -
26994 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26995 __read_mostly int xen_have_vector_callback;
26996 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26997 @@ -1157,30 +1155,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
26998 #endif
26999 };
27000
27001 -static void xen_reboot(int reason)
27002 +static __noreturn void xen_reboot(int reason)
27003 {
27004 struct sched_shutdown r = { .reason = reason };
27005
27006 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27007 - BUG();
27008 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27009 + BUG();
27010 }
27011
27012 -static void xen_restart(char *msg)
27013 +static __noreturn void xen_restart(char *msg)
27014 {
27015 xen_reboot(SHUTDOWN_reboot);
27016 }
27017
27018 -static void xen_emergency_restart(void)
27019 +static __noreturn void xen_emergency_restart(void)
27020 {
27021 xen_reboot(SHUTDOWN_reboot);
27022 }
27023
27024 -static void xen_machine_halt(void)
27025 +static __noreturn void xen_machine_halt(void)
27026 {
27027 xen_reboot(SHUTDOWN_poweroff);
27028 }
27029
27030 -static void xen_machine_power_off(void)
27031 +static __noreturn void xen_machine_power_off(void)
27032 {
27033 if (pm_power_off)
27034 pm_power_off();
27035 @@ -1283,7 +1281,17 @@ asmlinkage void __init xen_start_kernel(void)
27036 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27037
27038 /* Work out if we support NX */
27039 - x86_configure_nx();
27040 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27041 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27042 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27043 + unsigned l, h;
27044 +
27045 + __supported_pte_mask |= _PAGE_NX;
27046 + rdmsr(MSR_EFER, l, h);
27047 + l |= EFER_NX;
27048 + wrmsr(MSR_EFER, l, h);
27049 + }
27050 +#endif
27051
27052 xen_setup_features();
27053
27054 @@ -1314,13 +1322,6 @@ asmlinkage void __init xen_start_kernel(void)
27055
27056 machine_ops = xen_machine_ops;
27057
27058 - /*
27059 - * The only reliable way to retain the initial address of the
27060 - * percpu gdt_page is to remember it here, so we can go and
27061 - * mark it RW later, when the initial percpu area is freed.
27062 - */
27063 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27064 -
27065 xen_smp_init();
27066
27067 #ifdef CONFIG_ACPI_NUMA
27068 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27069 index 69f5857..0699dc5 100644
27070 --- a/arch/x86/xen/mmu.c
27071 +++ b/arch/x86/xen/mmu.c
27072 @@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27073 convert_pfn_mfn(init_level4_pgt);
27074 convert_pfn_mfn(level3_ident_pgt);
27075 convert_pfn_mfn(level3_kernel_pgt);
27076 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27077 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27078 + convert_pfn_mfn(level3_vmemmap_pgt);
27079
27080 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27081 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27082 @@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27083 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27084 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27085 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27086 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27087 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27088 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27089 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27090 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27091 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27092 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27093
27094 @@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_init(void)
27095 pv_mmu_ops.set_pud = xen_set_pud;
27096 #if PAGETABLE_LEVELS == 4
27097 pv_mmu_ops.set_pgd = xen_set_pgd;
27098 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27099 #endif
27100
27101 /* This will work as long as patching hasn't happened yet
27102 @@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27103 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27104 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27105 .set_pgd = xen_set_pgd_hyper,
27106 + .set_pgd_batched = xen_set_pgd_hyper,
27107
27108 .alloc_pud = xen_alloc_pmd_init,
27109 .release_pud = xen_release_pmd_init,
27110 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27111 index 0503c0c..ceb2d16 100644
27112 --- a/arch/x86/xen/smp.c
27113 +++ b/arch/x86/xen/smp.c
27114 @@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27115 {
27116 BUG_ON(smp_processor_id() != 0);
27117 native_smp_prepare_boot_cpu();
27118 -
27119 - /* We've switched to the "real" per-cpu gdt, so make sure the
27120 - old memory can be recycled */
27121 - make_lowmem_page_readwrite(xen_initial_gdt);
27122 -
27123 xen_filter_cpu_maps();
27124 xen_setup_vcpu_info_placement();
27125 }
27126 @@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27127 gdt = get_cpu_gdt_table(cpu);
27128
27129 ctxt->flags = VGCF_IN_KERNEL;
27130 - ctxt->user_regs.ds = __USER_DS;
27131 - ctxt->user_regs.es = __USER_DS;
27132 + ctxt->user_regs.ds = __KERNEL_DS;
27133 + ctxt->user_regs.es = __KERNEL_DS;
27134 ctxt->user_regs.ss = __KERNEL_DS;
27135 #ifdef CONFIG_X86_32
27136 ctxt->user_regs.fs = __KERNEL_PERCPU;
27137 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27138 + savesegment(gs, ctxt->user_regs.gs);
27139 #else
27140 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27141 #endif
27142 @@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27143 int rc;
27144
27145 per_cpu(current_task, cpu) = idle;
27146 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27147 #ifdef CONFIG_X86_32
27148 irq_ctx_init(cpu);
27149 #else
27150 clear_tsk_thread_flag(idle, TIF_FORK);
27151 - per_cpu(kernel_stack, cpu) =
27152 - (unsigned long)task_stack_page(idle) -
27153 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27154 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27155 #endif
27156 xen_setup_runstate_info(cpu);
27157 xen_setup_timer(cpu);
27158 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27159 index b040b0e..8cc4fe0 100644
27160 --- a/arch/x86/xen/xen-asm_32.S
27161 +++ b/arch/x86/xen/xen-asm_32.S
27162 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27163 ESP_OFFSET=4 # bytes pushed onto stack
27164
27165 /*
27166 - * Store vcpu_info pointer for easy access. Do it this way to
27167 - * avoid having to reload %fs
27168 + * Store vcpu_info pointer for easy access.
27169 */
27170 #ifdef CONFIG_SMP
27171 - GET_THREAD_INFO(%eax)
27172 - movl TI_cpu(%eax), %eax
27173 - movl __per_cpu_offset(,%eax,4), %eax
27174 - mov xen_vcpu(%eax), %eax
27175 + push %fs
27176 + mov $(__KERNEL_PERCPU), %eax
27177 + mov %eax, %fs
27178 + mov PER_CPU_VAR(xen_vcpu), %eax
27179 + pop %fs
27180 #else
27181 movl xen_vcpu, %eax
27182 #endif
27183 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27184 index aaa7291..3f77960 100644
27185 --- a/arch/x86/xen/xen-head.S
27186 +++ b/arch/x86/xen/xen-head.S
27187 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27188 #ifdef CONFIG_X86_32
27189 mov %esi,xen_start_info
27190 mov $init_thread_union+THREAD_SIZE,%esp
27191 +#ifdef CONFIG_SMP
27192 + movl $cpu_gdt_table,%edi
27193 + movl $__per_cpu_load,%eax
27194 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27195 + rorl $16,%eax
27196 + movb %al,__KERNEL_PERCPU + 4(%edi)
27197 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27198 + movl $__per_cpu_end - 1,%eax
27199 + subl $__per_cpu_start,%eax
27200 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27201 +#endif
27202 #else
27203 mov %rsi,xen_start_info
27204 mov $init_thread_union+THREAD_SIZE,%rsp
27205 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27206 index b095739..8c17bcd 100644
27207 --- a/arch/x86/xen/xen-ops.h
27208 +++ b/arch/x86/xen/xen-ops.h
27209 @@ -10,8 +10,6 @@
27210 extern const char xen_hypervisor_callback[];
27211 extern const char xen_failsafe_callback[];
27212
27213 -extern void *xen_initial_gdt;
27214 -
27215 struct trap_info;
27216 void xen_copy_trap_info(struct trap_info *traps);
27217
27218 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27219 index 525bd3d..ef888b1 100644
27220 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27221 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27222 @@ -119,9 +119,9 @@
27223 ----------------------------------------------------------------------*/
27224
27225 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27226 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27227 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27228 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27229 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27230
27231 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27232 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27233 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27234 index 2f33760..835e50a 100644
27235 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27236 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27237 @@ -11,6 +11,7 @@
27238 #ifndef _XTENSA_CORE_H
27239 #define _XTENSA_CORE_H
27240
27241 +#include <linux/const.h>
27242
27243 /****************************************************************************
27244 Parameters Useful for Any Code, USER or PRIVILEGED
27245 @@ -112,9 +113,9 @@
27246 ----------------------------------------------------------------------*/
27247
27248 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27249 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27250 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27251 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27252 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27253
27254 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27255 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27256 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27257 index af00795..2bb8105 100644
27258 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27259 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27260 @@ -11,6 +11,7 @@
27261 #ifndef _XTENSA_CORE_CONFIGURATION_H
27262 #define _XTENSA_CORE_CONFIGURATION_H
27263
27264 +#include <linux/const.h>
27265
27266 /****************************************************************************
27267 Parameters Useful for Any Code, USER or PRIVILEGED
27268 @@ -118,9 +119,9 @@
27269 ----------------------------------------------------------------------*/
27270
27271 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27272 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27273 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27274 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27275 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27276
27277 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27278 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27279 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27280 index 58916af..9cb880b 100644
27281 --- a/block/blk-iopoll.c
27282 +++ b/block/blk-iopoll.c
27283 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27284 }
27285 EXPORT_SYMBOL(blk_iopoll_complete);
27286
27287 -static void blk_iopoll_softirq(struct softirq_action *h)
27288 +static void blk_iopoll_softirq(void)
27289 {
27290 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27291 int rearm = 0, budget = blk_iopoll_budget;
27292 diff --git a/block/blk-map.c b/block/blk-map.c
27293 index 623e1cd..ca1e109 100644
27294 --- a/block/blk-map.c
27295 +++ b/block/blk-map.c
27296 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27297 if (!len || !kbuf)
27298 return -EINVAL;
27299
27300 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27301 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27302 if (do_copy)
27303 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27304 else
27305 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27306 index 467c8de..4bddc6d 100644
27307 --- a/block/blk-softirq.c
27308 +++ b/block/blk-softirq.c
27309 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27310 * Softirq action handler - move entries to local list and loop over them
27311 * while passing them to the queue registered handler.
27312 */
27313 -static void blk_done_softirq(struct softirq_action *h)
27314 +static void blk_done_softirq(void)
27315 {
27316 struct list_head *cpu_list, local_list;
27317
27318 diff --git a/block/bsg.c b/block/bsg.c
27319 index ff64ae3..593560c 100644
27320 --- a/block/bsg.c
27321 +++ b/block/bsg.c
27322 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27323 struct sg_io_v4 *hdr, struct bsg_device *bd,
27324 fmode_t has_write_perm)
27325 {
27326 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27327 + unsigned char *cmdptr;
27328 +
27329 if (hdr->request_len > BLK_MAX_CDB) {
27330 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27331 if (!rq->cmd)
27332 return -ENOMEM;
27333 - }
27334 + cmdptr = rq->cmd;
27335 + } else
27336 + cmdptr = tmpcmd;
27337
27338 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27339 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27340 hdr->request_len))
27341 return -EFAULT;
27342
27343 + if (cmdptr != rq->cmd)
27344 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27345 +
27346 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27347 if (blk_verify_command(rq->cmd, has_write_perm))
27348 return -EPERM;
27349 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27350 index 7c668c8..db3521c 100644
27351 --- a/block/compat_ioctl.c
27352 +++ b/block/compat_ioctl.c
27353 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27354 err |= __get_user(f->spec1, &uf->spec1);
27355 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27356 err |= __get_user(name, &uf->name);
27357 - f->name = compat_ptr(name);
27358 + f->name = (void __force_kernel *)compat_ptr(name);
27359 if (err) {
27360 err = -EFAULT;
27361 goto out;
27362 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27363 index 6296b40..417c00f 100644
27364 --- a/block/partitions/efi.c
27365 +++ b/block/partitions/efi.c
27366 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27367 if (!gpt)
27368 return NULL;
27369
27370 + if (!le32_to_cpu(gpt->num_partition_entries))
27371 + return NULL;
27372 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27373 + if (!pte)
27374 + return NULL;
27375 +
27376 count = le32_to_cpu(gpt->num_partition_entries) *
27377 le32_to_cpu(gpt->sizeof_partition_entry);
27378 - if (!count)
27379 - return NULL;
27380 - pte = kzalloc(count, GFP_KERNEL);
27381 - if (!pte)
27382 - return NULL;
27383 -
27384 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27385 (u8 *) pte,
27386 count) < count) {
27387 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27388 index 260fa80..e8f3caf 100644
27389 --- a/block/scsi_ioctl.c
27390 +++ b/block/scsi_ioctl.c
27391 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27392 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27393 struct sg_io_hdr *hdr, fmode_t mode)
27394 {
27395 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27396 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27397 + unsigned char *cmdptr;
27398 +
27399 + if (rq->cmd != rq->__cmd)
27400 + cmdptr = rq->cmd;
27401 + else
27402 + cmdptr = tmpcmd;
27403 +
27404 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27405 return -EFAULT;
27406 +
27407 + if (cmdptr != rq->cmd)
27408 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27409 +
27410 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27411 return -EPERM;
27412
27413 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27414 int err;
27415 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27416 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27417 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27418 + unsigned char *cmdptr;
27419
27420 if (!sic)
27421 return -EINVAL;
27422 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27423 */
27424 err = -EFAULT;
27425 rq->cmd_len = cmdlen;
27426 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27427 +
27428 + if (rq->cmd != rq->__cmd)
27429 + cmdptr = rq->cmd;
27430 + else
27431 + cmdptr = tmpcmd;
27432 +
27433 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27434 goto error;
27435
27436 + if (rq->cmd != cmdptr)
27437 + memcpy(rq->cmd, cmdptr, cmdlen);
27438 +
27439 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27440 goto error;
27441
27442 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27443 index 671d4d6..5f24030 100644
27444 --- a/crypto/cryptd.c
27445 +++ b/crypto/cryptd.c
27446 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27447
27448 struct cryptd_blkcipher_request_ctx {
27449 crypto_completion_t complete;
27450 -};
27451 +} __no_const;
27452
27453 struct cryptd_hash_ctx {
27454 struct crypto_shash *child;
27455 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27456
27457 struct cryptd_aead_request_ctx {
27458 crypto_completion_t complete;
27459 -};
27460 +} __no_const;
27461
27462 static void cryptd_queue_worker(struct work_struct *work);
27463
27464 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27465 index e6defd8..c26a225 100644
27466 --- a/drivers/acpi/apei/cper.c
27467 +++ b/drivers/acpi/apei/cper.c
27468 @@ -38,12 +38,12 @@
27469 */
27470 u64 cper_next_record_id(void)
27471 {
27472 - static atomic64_t seq;
27473 + static atomic64_unchecked_t seq;
27474
27475 - if (!atomic64_read(&seq))
27476 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27477 + if (!atomic64_read_unchecked(&seq))
27478 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27479
27480 - return atomic64_inc_return(&seq);
27481 + return atomic64_inc_return_unchecked(&seq);
27482 }
27483 EXPORT_SYMBOL_GPL(cper_next_record_id);
27484
27485 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27486 index 7586544..636a2f0 100644
27487 --- a/drivers/acpi/ec_sys.c
27488 +++ b/drivers/acpi/ec_sys.c
27489 @@ -12,6 +12,7 @@
27490 #include <linux/acpi.h>
27491 #include <linux/debugfs.h>
27492 #include <linux/module.h>
27493 +#include <linux/uaccess.h>
27494 #include "internal.h"
27495
27496 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27497 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27498 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27499 */
27500 unsigned int size = EC_SPACE_SIZE;
27501 - u8 *data = (u8 *) buf;
27502 + u8 data;
27503 loff_t init_off = *off;
27504 int err = 0;
27505
27506 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27507 size = count;
27508
27509 while (size) {
27510 - err = ec_read(*off, &data[*off - init_off]);
27511 + err = ec_read(*off, &data);
27512 if (err)
27513 return err;
27514 + if (put_user(data, &buf[*off - init_off]))
27515 + return -EFAULT;
27516 *off += 1;
27517 size--;
27518 }
27519 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27520
27521 unsigned int size = count;
27522 loff_t init_off = *off;
27523 - u8 *data = (u8 *) buf;
27524 int err = 0;
27525
27526 if (*off >= EC_SPACE_SIZE)
27527 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27528 }
27529
27530 while (size) {
27531 - u8 byte_write = data[*off - init_off];
27532 + u8 byte_write;
27533 + if (get_user(byte_write, &buf[*off - init_off]))
27534 + return -EFAULT;
27535 err = ec_write(*off, byte_write);
27536 if (err)
27537 return err;
27538 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27539 index 251c7b62..000462d 100644
27540 --- a/drivers/acpi/proc.c
27541 +++ b/drivers/acpi/proc.c
27542 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27543 size_t count, loff_t * ppos)
27544 {
27545 struct list_head *node, *next;
27546 - char strbuf[5];
27547 - char str[5] = "";
27548 - unsigned int len = count;
27549 + char strbuf[5] = {0};
27550
27551 - if (len > 4)
27552 - len = 4;
27553 - if (len < 0)
27554 + if (count > 4)
27555 + count = 4;
27556 + if (copy_from_user(strbuf, buffer, count))
27557 return -EFAULT;
27558 -
27559 - if (copy_from_user(strbuf, buffer, len))
27560 - return -EFAULT;
27561 - strbuf[len] = '\0';
27562 - sscanf(strbuf, "%s", str);
27563 + strbuf[count] = '\0';
27564
27565 mutex_lock(&acpi_device_lock);
27566 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27567 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27568 if (!dev->wakeup.flags.valid)
27569 continue;
27570
27571 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27572 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27573 if (device_can_wakeup(&dev->dev)) {
27574 bool enable = !device_may_wakeup(&dev->dev);
27575 device_set_wakeup_enable(&dev->dev, enable);
27576 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27577 index 0734086..3ad3e4c 100644
27578 --- a/drivers/acpi/processor_driver.c
27579 +++ b/drivers/acpi/processor_driver.c
27580 @@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27581 return 0;
27582 #endif
27583
27584 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27585 + BUG_ON(pr->id >= nr_cpu_ids);
27586
27587 /*
27588 * Buggy BIOS check
27589 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27590 index 23763a1..6375e67 100644
27591 --- a/drivers/ata/libata-core.c
27592 +++ b/drivers/ata/libata-core.c
27593 @@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27594 struct ata_port *ap;
27595 unsigned int tag;
27596
27597 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27598 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27599 ap = qc->ap;
27600
27601 qc->flags = 0;
27602 @@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27603 struct ata_port *ap;
27604 struct ata_link *link;
27605
27606 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27607 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27608 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27609 ap = qc->ap;
27610 link = qc->dev->link;
27611 @@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27612 return;
27613
27614 spin_lock(&lock);
27615 + pax_open_kernel();
27616
27617 for (cur = ops->inherits; cur; cur = cur->inherits) {
27618 void **inherit = (void **)cur;
27619 @@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27620 if (IS_ERR(*pp))
27621 *pp = NULL;
27622
27623 - ops->inherits = NULL;
27624 + *(struct ata_port_operations **)&ops->inherits = NULL;
27625
27626 + pax_close_kernel();
27627 spin_unlock(&lock);
27628 }
27629
27630 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27631 index 3239517..343b5f6 100644
27632 --- a/drivers/ata/pata_arasan_cf.c
27633 +++ b/drivers/ata/pata_arasan_cf.c
27634 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27635 /* Handle platform specific quirks */
27636 if (pdata->quirk) {
27637 if (pdata->quirk & CF_BROKEN_PIO) {
27638 - ap->ops->set_piomode = NULL;
27639 + pax_open_kernel();
27640 + *(void **)&ap->ops->set_piomode = NULL;
27641 + pax_close_kernel();
27642 ap->pio_mask = 0;
27643 }
27644 if (pdata->quirk & CF_BROKEN_MWDMA)
27645 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27646 index f9b983a..887b9d8 100644
27647 --- a/drivers/atm/adummy.c
27648 +++ b/drivers/atm/adummy.c
27649 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27650 vcc->pop(vcc, skb);
27651 else
27652 dev_kfree_skb_any(skb);
27653 - atomic_inc(&vcc->stats->tx);
27654 + atomic_inc_unchecked(&vcc->stats->tx);
27655
27656 return 0;
27657 }
27658 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27659 index f8f41e0..1f987dd 100644
27660 --- a/drivers/atm/ambassador.c
27661 +++ b/drivers/atm/ambassador.c
27662 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27663 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27664
27665 // VC layer stats
27666 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27667 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27668
27669 // free the descriptor
27670 kfree (tx_descr);
27671 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27672 dump_skb ("<<<", vc, skb);
27673
27674 // VC layer stats
27675 - atomic_inc(&atm_vcc->stats->rx);
27676 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27677 __net_timestamp(skb);
27678 // end of our responsibility
27679 atm_vcc->push (atm_vcc, skb);
27680 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27681 } else {
27682 PRINTK (KERN_INFO, "dropped over-size frame");
27683 // should we count this?
27684 - atomic_inc(&atm_vcc->stats->rx_drop);
27685 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27686 }
27687
27688 } else {
27689 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27690 }
27691
27692 if (check_area (skb->data, skb->len)) {
27693 - atomic_inc(&atm_vcc->stats->tx_err);
27694 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27695 return -ENOMEM; // ?
27696 }
27697
27698 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27699 index b22d71c..d6e1049 100644
27700 --- a/drivers/atm/atmtcp.c
27701 +++ b/drivers/atm/atmtcp.c
27702 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27703 if (vcc->pop) vcc->pop(vcc,skb);
27704 else dev_kfree_skb(skb);
27705 if (dev_data) return 0;
27706 - atomic_inc(&vcc->stats->tx_err);
27707 + atomic_inc_unchecked(&vcc->stats->tx_err);
27708 return -ENOLINK;
27709 }
27710 size = skb->len+sizeof(struct atmtcp_hdr);
27711 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27712 if (!new_skb) {
27713 if (vcc->pop) vcc->pop(vcc,skb);
27714 else dev_kfree_skb(skb);
27715 - atomic_inc(&vcc->stats->tx_err);
27716 + atomic_inc_unchecked(&vcc->stats->tx_err);
27717 return -ENOBUFS;
27718 }
27719 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27720 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27721 if (vcc->pop) vcc->pop(vcc,skb);
27722 else dev_kfree_skb(skb);
27723 out_vcc->push(out_vcc,new_skb);
27724 - atomic_inc(&vcc->stats->tx);
27725 - atomic_inc(&out_vcc->stats->rx);
27726 + atomic_inc_unchecked(&vcc->stats->tx);
27727 + atomic_inc_unchecked(&out_vcc->stats->rx);
27728 return 0;
27729 }
27730
27731 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27732 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27733 read_unlock(&vcc_sklist_lock);
27734 if (!out_vcc) {
27735 - atomic_inc(&vcc->stats->tx_err);
27736 + atomic_inc_unchecked(&vcc->stats->tx_err);
27737 goto done;
27738 }
27739 skb_pull(skb,sizeof(struct atmtcp_hdr));
27740 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27741 __net_timestamp(new_skb);
27742 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27743 out_vcc->push(out_vcc,new_skb);
27744 - atomic_inc(&vcc->stats->tx);
27745 - atomic_inc(&out_vcc->stats->rx);
27746 + atomic_inc_unchecked(&vcc->stats->tx);
27747 + atomic_inc_unchecked(&out_vcc->stats->rx);
27748 done:
27749 if (vcc->pop) vcc->pop(vcc,skb);
27750 else dev_kfree_skb(skb);
27751 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27752 index 2059ee4..faf51c7 100644
27753 --- a/drivers/atm/eni.c
27754 +++ b/drivers/atm/eni.c
27755 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27756 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27757 vcc->dev->number);
27758 length = 0;
27759 - atomic_inc(&vcc->stats->rx_err);
27760 + atomic_inc_unchecked(&vcc->stats->rx_err);
27761 }
27762 else {
27763 length = ATM_CELL_SIZE-1; /* no HEC */
27764 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27765 size);
27766 }
27767 eff = length = 0;
27768 - atomic_inc(&vcc->stats->rx_err);
27769 + atomic_inc_unchecked(&vcc->stats->rx_err);
27770 }
27771 else {
27772 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27773 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27774 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27775 vcc->dev->number,vcc->vci,length,size << 2,descr);
27776 length = eff = 0;
27777 - atomic_inc(&vcc->stats->rx_err);
27778 + atomic_inc_unchecked(&vcc->stats->rx_err);
27779 }
27780 }
27781 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27782 @@ -767,7 +767,7 @@ rx_dequeued++;
27783 vcc->push(vcc,skb);
27784 pushed++;
27785 }
27786 - atomic_inc(&vcc->stats->rx);
27787 + atomic_inc_unchecked(&vcc->stats->rx);
27788 }
27789 wake_up(&eni_dev->rx_wait);
27790 }
27791 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
27792 PCI_DMA_TODEVICE);
27793 if (vcc->pop) vcc->pop(vcc,skb);
27794 else dev_kfree_skb_irq(skb);
27795 - atomic_inc(&vcc->stats->tx);
27796 + atomic_inc_unchecked(&vcc->stats->tx);
27797 wake_up(&eni_dev->tx_wait);
27798 dma_complete++;
27799 }
27800 @@ -1567,7 +1567,7 @@ tx_complete++;
27801 /*--------------------------------- entries ---------------------------------*/
27802
27803
27804 -static const char *media_name[] __devinitdata = {
27805 +static const char *media_name[] __devinitconst = {
27806 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27807 "UTP", "05?", "06?", "07?", /* 4- 7 */
27808 "TAXI","09?", "10?", "11?", /* 8-11 */
27809 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27810 index 86fed1b..6dc4721 100644
27811 --- a/drivers/atm/firestream.c
27812 +++ b/drivers/atm/firestream.c
27813 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27814 }
27815 }
27816
27817 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27818 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27819
27820 fs_dprintk (FS_DEBUG_TXMEM, "i");
27821 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27822 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27823 #endif
27824 skb_put (skb, qe->p1 & 0xffff);
27825 ATM_SKB(skb)->vcc = atm_vcc;
27826 - atomic_inc(&atm_vcc->stats->rx);
27827 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27828 __net_timestamp(skb);
27829 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27830 atm_vcc->push (atm_vcc, skb);
27831 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27832 kfree (pe);
27833 }
27834 if (atm_vcc)
27835 - atomic_inc(&atm_vcc->stats->rx_drop);
27836 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27837 break;
27838 case 0x1f: /* Reassembly abort: no buffers. */
27839 /* Silently increment error counter. */
27840 if (atm_vcc)
27841 - atomic_inc(&atm_vcc->stats->rx_drop);
27842 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27843 break;
27844 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27845 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27846 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27847 index 361f5ae..7fc552d 100644
27848 --- a/drivers/atm/fore200e.c
27849 +++ b/drivers/atm/fore200e.c
27850 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27851 #endif
27852 /* check error condition */
27853 if (*entry->status & STATUS_ERROR)
27854 - atomic_inc(&vcc->stats->tx_err);
27855 + atomic_inc_unchecked(&vcc->stats->tx_err);
27856 else
27857 - atomic_inc(&vcc->stats->tx);
27858 + atomic_inc_unchecked(&vcc->stats->tx);
27859 }
27860 }
27861
27862 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27863 if (skb == NULL) {
27864 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27865
27866 - atomic_inc(&vcc->stats->rx_drop);
27867 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27868 return -ENOMEM;
27869 }
27870
27871 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27872
27873 dev_kfree_skb_any(skb);
27874
27875 - atomic_inc(&vcc->stats->rx_drop);
27876 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27877 return -ENOMEM;
27878 }
27879
27880 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27881
27882 vcc->push(vcc, skb);
27883 - atomic_inc(&vcc->stats->rx);
27884 + atomic_inc_unchecked(&vcc->stats->rx);
27885
27886 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27887
27888 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27889 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27890 fore200e->atm_dev->number,
27891 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27892 - atomic_inc(&vcc->stats->rx_err);
27893 + atomic_inc_unchecked(&vcc->stats->rx_err);
27894 }
27895 }
27896
27897 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27898 goto retry_here;
27899 }
27900
27901 - atomic_inc(&vcc->stats->tx_err);
27902 + atomic_inc_unchecked(&vcc->stats->tx_err);
27903
27904 fore200e->tx_sat++;
27905 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27906 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27907 index b182c2f..1c6fa8a 100644
27908 --- a/drivers/atm/he.c
27909 +++ b/drivers/atm/he.c
27910 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27911
27912 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27913 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27914 - atomic_inc(&vcc->stats->rx_drop);
27915 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27916 goto return_host_buffers;
27917 }
27918
27919 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27920 RBRQ_LEN_ERR(he_dev->rbrq_head)
27921 ? "LEN_ERR" : "",
27922 vcc->vpi, vcc->vci);
27923 - atomic_inc(&vcc->stats->rx_err);
27924 + atomic_inc_unchecked(&vcc->stats->rx_err);
27925 goto return_host_buffers;
27926 }
27927
27928 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27929 vcc->push(vcc, skb);
27930 spin_lock(&he_dev->global_lock);
27931
27932 - atomic_inc(&vcc->stats->rx);
27933 + atomic_inc_unchecked(&vcc->stats->rx);
27934
27935 return_host_buffers:
27936 ++pdus_assembled;
27937 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27938 tpd->vcc->pop(tpd->vcc, tpd->skb);
27939 else
27940 dev_kfree_skb_any(tpd->skb);
27941 - atomic_inc(&tpd->vcc->stats->tx_err);
27942 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27943 }
27944 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27945 return;
27946 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27947 vcc->pop(vcc, skb);
27948 else
27949 dev_kfree_skb_any(skb);
27950 - atomic_inc(&vcc->stats->tx_err);
27951 + atomic_inc_unchecked(&vcc->stats->tx_err);
27952 return -EINVAL;
27953 }
27954
27955 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27956 vcc->pop(vcc, skb);
27957 else
27958 dev_kfree_skb_any(skb);
27959 - atomic_inc(&vcc->stats->tx_err);
27960 + atomic_inc_unchecked(&vcc->stats->tx_err);
27961 return -EINVAL;
27962 }
27963 #endif
27964 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27965 vcc->pop(vcc, skb);
27966 else
27967 dev_kfree_skb_any(skb);
27968 - atomic_inc(&vcc->stats->tx_err);
27969 + atomic_inc_unchecked(&vcc->stats->tx_err);
27970 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27971 return -ENOMEM;
27972 }
27973 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27974 vcc->pop(vcc, skb);
27975 else
27976 dev_kfree_skb_any(skb);
27977 - atomic_inc(&vcc->stats->tx_err);
27978 + atomic_inc_unchecked(&vcc->stats->tx_err);
27979 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27980 return -ENOMEM;
27981 }
27982 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27983 __enqueue_tpd(he_dev, tpd, cid);
27984 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27985
27986 - atomic_inc(&vcc->stats->tx);
27987 + atomic_inc_unchecked(&vcc->stats->tx);
27988
27989 return 0;
27990 }
27991 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
27992 index 75fd691..2d20b14 100644
27993 --- a/drivers/atm/horizon.c
27994 +++ b/drivers/atm/horizon.c
27995 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
27996 {
27997 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
27998 // VC layer stats
27999 - atomic_inc(&vcc->stats->rx);
28000 + atomic_inc_unchecked(&vcc->stats->rx);
28001 __net_timestamp(skb);
28002 // end of our responsibility
28003 vcc->push (vcc, skb);
28004 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28005 dev->tx_iovec = NULL;
28006
28007 // VC layer stats
28008 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28009 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28010
28011 // free the skb
28012 hrz_kfree_skb (skb);
28013 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28014 index 1c05212..c28e200 100644
28015 --- a/drivers/atm/idt77252.c
28016 +++ b/drivers/atm/idt77252.c
28017 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28018 else
28019 dev_kfree_skb(skb);
28020
28021 - atomic_inc(&vcc->stats->tx);
28022 + atomic_inc_unchecked(&vcc->stats->tx);
28023 }
28024
28025 atomic_dec(&scq->used);
28026 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28027 if ((sb = dev_alloc_skb(64)) == NULL) {
28028 printk("%s: Can't allocate buffers for aal0.\n",
28029 card->name);
28030 - atomic_add(i, &vcc->stats->rx_drop);
28031 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28032 break;
28033 }
28034 if (!atm_charge(vcc, sb->truesize)) {
28035 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28036 card->name);
28037 - atomic_add(i - 1, &vcc->stats->rx_drop);
28038 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28039 dev_kfree_skb(sb);
28040 break;
28041 }
28042 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28043 ATM_SKB(sb)->vcc = vcc;
28044 __net_timestamp(sb);
28045 vcc->push(vcc, sb);
28046 - atomic_inc(&vcc->stats->rx);
28047 + atomic_inc_unchecked(&vcc->stats->rx);
28048
28049 cell += ATM_CELL_PAYLOAD;
28050 }
28051 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28052 "(CDC: %08x)\n",
28053 card->name, len, rpp->len, readl(SAR_REG_CDC));
28054 recycle_rx_pool_skb(card, rpp);
28055 - atomic_inc(&vcc->stats->rx_err);
28056 + atomic_inc_unchecked(&vcc->stats->rx_err);
28057 return;
28058 }
28059 if (stat & SAR_RSQE_CRC) {
28060 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28061 recycle_rx_pool_skb(card, rpp);
28062 - atomic_inc(&vcc->stats->rx_err);
28063 + atomic_inc_unchecked(&vcc->stats->rx_err);
28064 return;
28065 }
28066 if (skb_queue_len(&rpp->queue) > 1) {
28067 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28068 RXPRINTK("%s: Can't alloc RX skb.\n",
28069 card->name);
28070 recycle_rx_pool_skb(card, rpp);
28071 - atomic_inc(&vcc->stats->rx_err);
28072 + atomic_inc_unchecked(&vcc->stats->rx_err);
28073 return;
28074 }
28075 if (!atm_charge(vcc, skb->truesize)) {
28076 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28077 __net_timestamp(skb);
28078
28079 vcc->push(vcc, skb);
28080 - atomic_inc(&vcc->stats->rx);
28081 + atomic_inc_unchecked(&vcc->stats->rx);
28082
28083 return;
28084 }
28085 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28086 __net_timestamp(skb);
28087
28088 vcc->push(vcc, skb);
28089 - atomic_inc(&vcc->stats->rx);
28090 + atomic_inc_unchecked(&vcc->stats->rx);
28091
28092 if (skb->truesize > SAR_FB_SIZE_3)
28093 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28094 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28095 if (vcc->qos.aal != ATM_AAL0) {
28096 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28097 card->name, vpi, vci);
28098 - atomic_inc(&vcc->stats->rx_drop);
28099 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28100 goto drop;
28101 }
28102
28103 if ((sb = dev_alloc_skb(64)) == NULL) {
28104 printk("%s: Can't allocate buffers for AAL0.\n",
28105 card->name);
28106 - atomic_inc(&vcc->stats->rx_err);
28107 + atomic_inc_unchecked(&vcc->stats->rx_err);
28108 goto drop;
28109 }
28110
28111 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28112 ATM_SKB(sb)->vcc = vcc;
28113 __net_timestamp(sb);
28114 vcc->push(vcc, sb);
28115 - atomic_inc(&vcc->stats->rx);
28116 + atomic_inc_unchecked(&vcc->stats->rx);
28117
28118 drop:
28119 skb_pull(queue, 64);
28120 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28121
28122 if (vc == NULL) {
28123 printk("%s: NULL connection in send().\n", card->name);
28124 - atomic_inc(&vcc->stats->tx_err);
28125 + atomic_inc_unchecked(&vcc->stats->tx_err);
28126 dev_kfree_skb(skb);
28127 return -EINVAL;
28128 }
28129 if (!test_bit(VCF_TX, &vc->flags)) {
28130 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28131 - atomic_inc(&vcc->stats->tx_err);
28132 + atomic_inc_unchecked(&vcc->stats->tx_err);
28133 dev_kfree_skb(skb);
28134 return -EINVAL;
28135 }
28136 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28137 break;
28138 default:
28139 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28140 - atomic_inc(&vcc->stats->tx_err);
28141 + atomic_inc_unchecked(&vcc->stats->tx_err);
28142 dev_kfree_skb(skb);
28143 return -EINVAL;
28144 }
28145
28146 if (skb_shinfo(skb)->nr_frags != 0) {
28147 printk("%s: No scatter-gather yet.\n", card->name);
28148 - atomic_inc(&vcc->stats->tx_err);
28149 + atomic_inc_unchecked(&vcc->stats->tx_err);
28150 dev_kfree_skb(skb);
28151 return -EINVAL;
28152 }
28153 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28154
28155 err = queue_skb(card, vc, skb, oam);
28156 if (err) {
28157 - atomic_inc(&vcc->stats->tx_err);
28158 + atomic_inc_unchecked(&vcc->stats->tx_err);
28159 dev_kfree_skb(skb);
28160 return err;
28161 }
28162 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28163 skb = dev_alloc_skb(64);
28164 if (!skb) {
28165 printk("%s: Out of memory in send_oam().\n", card->name);
28166 - atomic_inc(&vcc->stats->tx_err);
28167 + atomic_inc_unchecked(&vcc->stats->tx_err);
28168 return -ENOMEM;
28169 }
28170 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28171 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28172 index d438601..8b98495 100644
28173 --- a/drivers/atm/iphase.c
28174 +++ b/drivers/atm/iphase.c
28175 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28176 status = (u_short) (buf_desc_ptr->desc_mode);
28177 if (status & (RX_CER | RX_PTE | RX_OFL))
28178 {
28179 - atomic_inc(&vcc->stats->rx_err);
28180 + atomic_inc_unchecked(&vcc->stats->rx_err);
28181 IF_ERR(printk("IA: bad packet, dropping it");)
28182 if (status & RX_CER) {
28183 IF_ERR(printk(" cause: packet CRC error\n");)
28184 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28185 len = dma_addr - buf_addr;
28186 if (len > iadev->rx_buf_sz) {
28187 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28188 - atomic_inc(&vcc->stats->rx_err);
28189 + atomic_inc_unchecked(&vcc->stats->rx_err);
28190 goto out_free_desc;
28191 }
28192
28193 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28194 ia_vcc = INPH_IA_VCC(vcc);
28195 if (ia_vcc == NULL)
28196 {
28197 - atomic_inc(&vcc->stats->rx_err);
28198 + atomic_inc_unchecked(&vcc->stats->rx_err);
28199 atm_return(vcc, skb->truesize);
28200 dev_kfree_skb_any(skb);
28201 goto INCR_DLE;
28202 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28203 if ((length > iadev->rx_buf_sz) || (length >
28204 (skb->len - sizeof(struct cpcs_trailer))))
28205 {
28206 - atomic_inc(&vcc->stats->rx_err);
28207 + atomic_inc_unchecked(&vcc->stats->rx_err);
28208 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28209 length, skb->len);)
28210 atm_return(vcc, skb->truesize);
28211 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28212
28213 IF_RX(printk("rx_dle_intr: skb push");)
28214 vcc->push(vcc,skb);
28215 - atomic_inc(&vcc->stats->rx);
28216 + atomic_inc_unchecked(&vcc->stats->rx);
28217 iadev->rx_pkt_cnt++;
28218 }
28219 INCR_DLE:
28220 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28221 {
28222 struct k_sonet_stats *stats;
28223 stats = &PRIV(_ia_dev[board])->sonet_stats;
28224 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28225 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28226 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28227 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28228 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28229 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28230 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28231 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28232 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28233 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28234 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28235 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28236 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28237 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28238 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28239 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28240 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28241 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28242 }
28243 ia_cmds.status = 0;
28244 break;
28245 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28246 if ((desc == 0) || (desc > iadev->num_tx_desc))
28247 {
28248 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28249 - atomic_inc(&vcc->stats->tx);
28250 + atomic_inc_unchecked(&vcc->stats->tx);
28251 if (vcc->pop)
28252 vcc->pop(vcc, skb);
28253 else
28254 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28255 ATM_DESC(skb) = vcc->vci;
28256 skb_queue_tail(&iadev->tx_dma_q, skb);
28257
28258 - atomic_inc(&vcc->stats->tx);
28259 + atomic_inc_unchecked(&vcc->stats->tx);
28260 iadev->tx_pkt_cnt++;
28261 /* Increment transaction counter */
28262 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28263
28264 #if 0
28265 /* add flow control logic */
28266 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28267 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28268 if (iavcc->vc_desc_cnt > 10) {
28269 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28270 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28271 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28272 index 68c7588..7036683 100644
28273 --- a/drivers/atm/lanai.c
28274 +++ b/drivers/atm/lanai.c
28275 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28276 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28277 lanai_endtx(lanai, lvcc);
28278 lanai_free_skb(lvcc->tx.atmvcc, skb);
28279 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28280 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28281 }
28282
28283 /* Try to fill the buffer - don't call unless there is backlog */
28284 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28285 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28286 __net_timestamp(skb);
28287 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28288 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28289 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28290 out:
28291 lvcc->rx.buf.ptr = end;
28292 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28293 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28294 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28295 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28296 lanai->stats.service_rxnotaal5++;
28297 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28298 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28299 return 0;
28300 }
28301 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28302 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28303 int bytes;
28304 read_unlock(&vcc_sklist_lock);
28305 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28306 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28307 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28308 lvcc->stats.x.aal5.service_trash++;
28309 bytes = (SERVICE_GET_END(s) * 16) -
28310 (((unsigned long) lvcc->rx.buf.ptr) -
28311 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28312 }
28313 if (s & SERVICE_STREAM) {
28314 read_unlock(&vcc_sklist_lock);
28315 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28316 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28317 lvcc->stats.x.aal5.service_stream++;
28318 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28319 "PDU on VCI %d!\n", lanai->number, vci);
28320 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28321 return 0;
28322 }
28323 DPRINTK("got rx crc error on vci %d\n", vci);
28324 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28325 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28326 lvcc->stats.x.aal5.service_rxcrc++;
28327 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28328 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28329 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28330 index 1c70c45..300718d 100644
28331 --- a/drivers/atm/nicstar.c
28332 +++ b/drivers/atm/nicstar.c
28333 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28334 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28335 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28336 card->index);
28337 - atomic_inc(&vcc->stats->tx_err);
28338 + atomic_inc_unchecked(&vcc->stats->tx_err);
28339 dev_kfree_skb_any(skb);
28340 return -EINVAL;
28341 }
28342 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28343 if (!vc->tx) {
28344 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28345 card->index);
28346 - atomic_inc(&vcc->stats->tx_err);
28347 + atomic_inc_unchecked(&vcc->stats->tx_err);
28348 dev_kfree_skb_any(skb);
28349 return -EINVAL;
28350 }
28351 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28352 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28353 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28354 card->index);
28355 - atomic_inc(&vcc->stats->tx_err);
28356 + atomic_inc_unchecked(&vcc->stats->tx_err);
28357 dev_kfree_skb_any(skb);
28358 return -EINVAL;
28359 }
28360
28361 if (skb_shinfo(skb)->nr_frags != 0) {
28362 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28363 - atomic_inc(&vcc->stats->tx_err);
28364 + atomic_inc_unchecked(&vcc->stats->tx_err);
28365 dev_kfree_skb_any(skb);
28366 return -EINVAL;
28367 }
28368 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28369 }
28370
28371 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28372 - atomic_inc(&vcc->stats->tx_err);
28373 + atomic_inc_unchecked(&vcc->stats->tx_err);
28374 dev_kfree_skb_any(skb);
28375 return -EIO;
28376 }
28377 - atomic_inc(&vcc->stats->tx);
28378 + atomic_inc_unchecked(&vcc->stats->tx);
28379
28380 return 0;
28381 }
28382 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28383 printk
28384 ("nicstar%d: Can't allocate buffers for aal0.\n",
28385 card->index);
28386 - atomic_add(i, &vcc->stats->rx_drop);
28387 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28388 break;
28389 }
28390 if (!atm_charge(vcc, sb->truesize)) {
28391 RXPRINTK
28392 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28393 card->index);
28394 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28395 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28396 dev_kfree_skb_any(sb);
28397 break;
28398 }
28399 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28400 ATM_SKB(sb)->vcc = vcc;
28401 __net_timestamp(sb);
28402 vcc->push(vcc, sb);
28403 - atomic_inc(&vcc->stats->rx);
28404 + atomic_inc_unchecked(&vcc->stats->rx);
28405 cell += ATM_CELL_PAYLOAD;
28406 }
28407
28408 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28409 if (iovb == NULL) {
28410 printk("nicstar%d: Out of iovec buffers.\n",
28411 card->index);
28412 - atomic_inc(&vcc->stats->rx_drop);
28413 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28414 recycle_rx_buf(card, skb);
28415 return;
28416 }
28417 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28418 small or large buffer itself. */
28419 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28420 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28421 - atomic_inc(&vcc->stats->rx_err);
28422 + atomic_inc_unchecked(&vcc->stats->rx_err);
28423 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28424 NS_MAX_IOVECS);
28425 NS_PRV_IOVCNT(iovb) = 0;
28426 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28427 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28428 card->index);
28429 which_list(card, skb);
28430 - atomic_inc(&vcc->stats->rx_err);
28431 + atomic_inc_unchecked(&vcc->stats->rx_err);
28432 recycle_rx_buf(card, skb);
28433 vc->rx_iov = NULL;
28434 recycle_iov_buf(card, iovb);
28435 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28436 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28437 card->index);
28438 which_list(card, skb);
28439 - atomic_inc(&vcc->stats->rx_err);
28440 + atomic_inc_unchecked(&vcc->stats->rx_err);
28441 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28442 NS_PRV_IOVCNT(iovb));
28443 vc->rx_iov = NULL;
28444 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28445 printk(" - PDU size mismatch.\n");
28446 else
28447 printk(".\n");
28448 - atomic_inc(&vcc->stats->rx_err);
28449 + atomic_inc_unchecked(&vcc->stats->rx_err);
28450 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28451 NS_PRV_IOVCNT(iovb));
28452 vc->rx_iov = NULL;
28453 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28454 /* skb points to a small buffer */
28455 if (!atm_charge(vcc, skb->truesize)) {
28456 push_rxbufs(card, skb);
28457 - atomic_inc(&vcc->stats->rx_drop);
28458 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28459 } else {
28460 skb_put(skb, len);
28461 dequeue_sm_buf(card, skb);
28462 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28463 ATM_SKB(skb)->vcc = vcc;
28464 __net_timestamp(skb);
28465 vcc->push(vcc, skb);
28466 - atomic_inc(&vcc->stats->rx);
28467 + atomic_inc_unchecked(&vcc->stats->rx);
28468 }
28469 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28470 struct sk_buff *sb;
28471 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28472 if (len <= NS_SMBUFSIZE) {
28473 if (!atm_charge(vcc, sb->truesize)) {
28474 push_rxbufs(card, sb);
28475 - atomic_inc(&vcc->stats->rx_drop);
28476 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28477 } else {
28478 skb_put(sb, len);
28479 dequeue_sm_buf(card, sb);
28480 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28481 ATM_SKB(sb)->vcc = vcc;
28482 __net_timestamp(sb);
28483 vcc->push(vcc, sb);
28484 - atomic_inc(&vcc->stats->rx);
28485 + atomic_inc_unchecked(&vcc->stats->rx);
28486 }
28487
28488 push_rxbufs(card, skb);
28489 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28490
28491 if (!atm_charge(vcc, skb->truesize)) {
28492 push_rxbufs(card, skb);
28493 - atomic_inc(&vcc->stats->rx_drop);
28494 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28495 } else {
28496 dequeue_lg_buf(card, skb);
28497 #ifdef NS_USE_DESTRUCTORS
28498 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28499 ATM_SKB(skb)->vcc = vcc;
28500 __net_timestamp(skb);
28501 vcc->push(vcc, skb);
28502 - atomic_inc(&vcc->stats->rx);
28503 + atomic_inc_unchecked(&vcc->stats->rx);
28504 }
28505
28506 push_rxbufs(card, sb);
28507 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28508 printk
28509 ("nicstar%d: Out of huge buffers.\n",
28510 card->index);
28511 - atomic_inc(&vcc->stats->rx_drop);
28512 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28513 recycle_iovec_rx_bufs(card,
28514 (struct iovec *)
28515 iovb->data,
28516 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28517 card->hbpool.count++;
28518 } else
28519 dev_kfree_skb_any(hb);
28520 - atomic_inc(&vcc->stats->rx_drop);
28521 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28522 } else {
28523 /* Copy the small buffer to the huge buffer */
28524 sb = (struct sk_buff *)iov->iov_base;
28525 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28526 #endif /* NS_USE_DESTRUCTORS */
28527 __net_timestamp(hb);
28528 vcc->push(vcc, hb);
28529 - atomic_inc(&vcc->stats->rx);
28530 + atomic_inc_unchecked(&vcc->stats->rx);
28531 }
28532 }
28533
28534 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28535 index 9851093..adb2b1e 100644
28536 --- a/drivers/atm/solos-pci.c
28537 +++ b/drivers/atm/solos-pci.c
28538 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28539 }
28540 atm_charge(vcc, skb->truesize);
28541 vcc->push(vcc, skb);
28542 - atomic_inc(&vcc->stats->rx);
28543 + atomic_inc_unchecked(&vcc->stats->rx);
28544 break;
28545
28546 case PKT_STATUS:
28547 @@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28548 vcc = SKB_CB(oldskb)->vcc;
28549
28550 if (vcc) {
28551 - atomic_inc(&vcc->stats->tx);
28552 + atomic_inc_unchecked(&vcc->stats->tx);
28553 solos_pop(vcc, oldskb);
28554 } else
28555 dev_kfree_skb_irq(oldskb);
28556 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28557 index 0215934..ce9f5b1 100644
28558 --- a/drivers/atm/suni.c
28559 +++ b/drivers/atm/suni.c
28560 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28561
28562
28563 #define ADD_LIMITED(s,v) \
28564 - atomic_add((v),&stats->s); \
28565 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28566 + atomic_add_unchecked((v),&stats->s); \
28567 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28568
28569
28570 static void suni_hz(unsigned long from_timer)
28571 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28572 index 5120a96..e2572bd 100644
28573 --- a/drivers/atm/uPD98402.c
28574 +++ b/drivers/atm/uPD98402.c
28575 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28576 struct sonet_stats tmp;
28577 int error = 0;
28578
28579 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28580 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28581 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28582 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28583 if (zero && !error) {
28584 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28585
28586
28587 #define ADD_LIMITED(s,v) \
28588 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28589 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28590 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28591 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28592 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28593 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28594
28595
28596 static void stat_event(struct atm_dev *dev)
28597 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28598 if (reason & uPD98402_INT_PFM) stat_event(dev);
28599 if (reason & uPD98402_INT_PCO) {
28600 (void) GET(PCOCR); /* clear interrupt cause */
28601 - atomic_add(GET(HECCT),
28602 + atomic_add_unchecked(GET(HECCT),
28603 &PRIV(dev)->sonet_stats.uncorr_hcs);
28604 }
28605 if ((reason & uPD98402_INT_RFO) &&
28606 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28607 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28608 uPD98402_INT_LOS),PIMR); /* enable them */
28609 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28610 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28611 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28612 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28613 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28614 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28615 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28616 return 0;
28617 }
28618
28619 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28620 index abe4e20..83c4727 100644
28621 --- a/drivers/atm/zatm.c
28622 +++ b/drivers/atm/zatm.c
28623 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28624 }
28625 if (!size) {
28626 dev_kfree_skb_irq(skb);
28627 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28628 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28629 continue;
28630 }
28631 if (!atm_charge(vcc,skb->truesize)) {
28632 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28633 skb->len = size;
28634 ATM_SKB(skb)->vcc = vcc;
28635 vcc->push(vcc,skb);
28636 - atomic_inc(&vcc->stats->rx);
28637 + atomic_inc_unchecked(&vcc->stats->rx);
28638 }
28639 zout(pos & 0xffff,MTA(mbx));
28640 #if 0 /* probably a stupid idea */
28641 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28642 skb_queue_head(&zatm_vcc->backlog,skb);
28643 break;
28644 }
28645 - atomic_inc(&vcc->stats->tx);
28646 + atomic_inc_unchecked(&vcc->stats->tx);
28647 wake_up(&zatm_vcc->tx_wait);
28648 }
28649
28650 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28651 index 8493536..31adee0 100644
28652 --- a/drivers/base/devtmpfs.c
28653 +++ b/drivers/base/devtmpfs.c
28654 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28655 if (!thread)
28656 return 0;
28657
28658 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28659 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28660 if (err)
28661 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28662 else
28663 diff --git a/drivers/base/node.c b/drivers/base/node.c
28664 index 90aa2a1..af1a177 100644
28665 --- a/drivers/base/node.c
28666 +++ b/drivers/base/node.c
28667 @@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
28668 {
28669 int n;
28670
28671 - n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
28672 - if (n > 0 && PAGE_SIZE > n + 1) {
28673 - *(buf + n++) = '\n';
28674 - *(buf + n++) = '\0';
28675 - }
28676 + n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
28677 + buf[n++] = '\n';
28678 + buf[n] = '\0';
28679 return n;
28680 }
28681
28682 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28683 index 2a3e581..3d6a73f 100644
28684 --- a/drivers/base/power/wakeup.c
28685 +++ b/drivers/base/power/wakeup.c
28686 @@ -30,14 +30,14 @@ bool events_check_enabled;
28687 * They need to be modified together atomically, so it's better to use one
28688 * atomic variable to hold them both.
28689 */
28690 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28691 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28692
28693 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28694 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28695
28696 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28697 {
28698 - unsigned int comb = atomic_read(&combined_event_count);
28699 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28700
28701 *cnt = (comb >> IN_PROGRESS_BITS);
28702 *inpr = comb & MAX_IN_PROGRESS;
28703 @@ -379,7 +379,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28704 ws->last_time = ktime_get();
28705
28706 /* Increment the counter of events in progress. */
28707 - atomic_inc(&combined_event_count);
28708 + atomic_inc_unchecked(&combined_event_count);
28709 }
28710
28711 /**
28712 @@ -475,7 +475,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28713 * Increment the counter of registered wakeup events and decrement the
28714 * couter of wakeup events in progress simultaneously.
28715 */
28716 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28717 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28718 }
28719
28720 /**
28721 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28722 index b0f553b..77b928b 100644
28723 --- a/drivers/block/cciss.c
28724 +++ b/drivers/block/cciss.c
28725 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28726 int err;
28727 u32 cp;
28728
28729 + memset(&arg64, 0, sizeof(arg64));
28730 +
28731 err = 0;
28732 err |=
28733 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28734 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28735 while (!list_empty(&h->reqQ)) {
28736 c = list_entry(h->reqQ.next, CommandList_struct, list);
28737 /* can't do anything if fifo is full */
28738 - if ((h->access.fifo_full(h))) {
28739 + if ((h->access->fifo_full(h))) {
28740 dev_warn(&h->pdev->dev, "fifo full\n");
28741 break;
28742 }
28743 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28744 h->Qdepth--;
28745
28746 /* Tell the controller execute command */
28747 - h->access.submit_command(h, c);
28748 + h->access->submit_command(h, c);
28749
28750 /* Put job onto the completed Q */
28751 addQ(&h->cmpQ, c);
28752 @@ -3443,17 +3445,17 @@ startio:
28753
28754 static inline unsigned long get_next_completion(ctlr_info_t *h)
28755 {
28756 - return h->access.command_completed(h);
28757 + return h->access->command_completed(h);
28758 }
28759
28760 static inline int interrupt_pending(ctlr_info_t *h)
28761 {
28762 - return h->access.intr_pending(h);
28763 + return h->access->intr_pending(h);
28764 }
28765
28766 static inline long interrupt_not_for_us(ctlr_info_t *h)
28767 {
28768 - return ((h->access.intr_pending(h) == 0) ||
28769 + return ((h->access->intr_pending(h) == 0) ||
28770 (h->interrupts_enabled == 0));
28771 }
28772
28773 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28774 u32 a;
28775
28776 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28777 - return h->access.command_completed(h);
28778 + return h->access->command_completed(h);
28779
28780 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28781 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28782 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28783 trans_support & CFGTBL_Trans_use_short_tags);
28784
28785 /* Change the access methods to the performant access methods */
28786 - h->access = SA5_performant_access;
28787 + h->access = &SA5_performant_access;
28788 h->transMethod = CFGTBL_Trans_Performant;
28789
28790 return;
28791 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28792 if (prod_index < 0)
28793 return -ENODEV;
28794 h->product_name = products[prod_index].product_name;
28795 - h->access = *(products[prod_index].access);
28796 + h->access = products[prod_index].access;
28797
28798 if (cciss_board_disabled(h)) {
28799 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28800 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28801 }
28802
28803 /* make sure the board interrupts are off */
28804 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28805 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28806 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28807 if (rc)
28808 goto clean2;
28809 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28810 * fake ones to scoop up any residual completions.
28811 */
28812 spin_lock_irqsave(&h->lock, flags);
28813 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28814 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28815 spin_unlock_irqrestore(&h->lock, flags);
28816 free_irq(h->intr[h->intr_mode], h);
28817 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28818 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28819 dev_info(&h->pdev->dev, "Board READY.\n");
28820 dev_info(&h->pdev->dev,
28821 "Waiting for stale completions to drain.\n");
28822 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28823 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28824 msleep(10000);
28825 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28826 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28827
28828 rc = controller_reset_failed(h->cfgtable);
28829 if (rc)
28830 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28831 cciss_scsi_setup(h);
28832
28833 /* Turn the interrupts on so we can service requests */
28834 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28835 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28836
28837 /* Get the firmware version */
28838 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28839 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28840 kfree(flush_buf);
28841 if (return_code != IO_OK)
28842 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28843 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28844 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28845 free_irq(h->intr[h->intr_mode], h);
28846 }
28847
28848 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28849 index 7fda30e..eb5dfe0 100644
28850 --- a/drivers/block/cciss.h
28851 +++ b/drivers/block/cciss.h
28852 @@ -101,7 +101,7 @@ struct ctlr_info
28853 /* information about each logical volume */
28854 drive_info_struct *drv[CISS_MAX_LUN];
28855
28856 - struct access_method access;
28857 + struct access_method *access;
28858
28859 /* queue and queue Info */
28860 struct list_head reqQ;
28861 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28862 index 9125bbe..eede5c8 100644
28863 --- a/drivers/block/cpqarray.c
28864 +++ b/drivers/block/cpqarray.c
28865 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28866 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28867 goto Enomem4;
28868 }
28869 - hba[i]->access.set_intr_mask(hba[i], 0);
28870 + hba[i]->access->set_intr_mask(hba[i], 0);
28871 if (request_irq(hba[i]->intr, do_ida_intr,
28872 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28873 {
28874 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28875 add_timer(&hba[i]->timer);
28876
28877 /* Enable IRQ now that spinlock and rate limit timer are set up */
28878 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28879 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28880
28881 for(j=0; j<NWD; j++) {
28882 struct gendisk *disk = ida_gendisk[i][j];
28883 @@ -694,7 +694,7 @@ DBGINFO(
28884 for(i=0; i<NR_PRODUCTS; i++) {
28885 if (board_id == products[i].board_id) {
28886 c->product_name = products[i].product_name;
28887 - c->access = *(products[i].access);
28888 + c->access = products[i].access;
28889 break;
28890 }
28891 }
28892 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28893 hba[ctlr]->intr = intr;
28894 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28895 hba[ctlr]->product_name = products[j].product_name;
28896 - hba[ctlr]->access = *(products[j].access);
28897 + hba[ctlr]->access = products[j].access;
28898 hba[ctlr]->ctlr = ctlr;
28899 hba[ctlr]->board_id = board_id;
28900 hba[ctlr]->pci_dev = NULL; /* not PCI */
28901 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28902
28903 while((c = h->reqQ) != NULL) {
28904 /* Can't do anything if we're busy */
28905 - if (h->access.fifo_full(h) == 0)
28906 + if (h->access->fifo_full(h) == 0)
28907 return;
28908
28909 /* Get the first entry from the request Q */
28910 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28911 h->Qdepth--;
28912
28913 /* Tell the controller to do our bidding */
28914 - h->access.submit_command(h, c);
28915 + h->access->submit_command(h, c);
28916
28917 /* Get onto the completion Q */
28918 addQ(&h->cmpQ, c);
28919 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28920 unsigned long flags;
28921 __u32 a,a1;
28922
28923 - istat = h->access.intr_pending(h);
28924 + istat = h->access->intr_pending(h);
28925 /* Is this interrupt for us? */
28926 if (istat == 0)
28927 return IRQ_NONE;
28928 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28929 */
28930 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28931 if (istat & FIFO_NOT_EMPTY) {
28932 - while((a = h->access.command_completed(h))) {
28933 + while((a = h->access->command_completed(h))) {
28934 a1 = a; a &= ~3;
28935 if ((c = h->cmpQ) == NULL)
28936 {
28937 @@ -1449,11 +1449,11 @@ static int sendcmd(
28938 /*
28939 * Disable interrupt
28940 */
28941 - info_p->access.set_intr_mask(info_p, 0);
28942 + info_p->access->set_intr_mask(info_p, 0);
28943 /* Make sure there is room in the command FIFO */
28944 /* Actually it should be completely empty at this time. */
28945 for (i = 200000; i > 0; i--) {
28946 - temp = info_p->access.fifo_full(info_p);
28947 + temp = info_p->access->fifo_full(info_p);
28948 if (temp != 0) {
28949 break;
28950 }
28951 @@ -1466,7 +1466,7 @@ DBG(
28952 /*
28953 * Send the cmd
28954 */
28955 - info_p->access.submit_command(info_p, c);
28956 + info_p->access->submit_command(info_p, c);
28957 complete = pollcomplete(ctlr);
28958
28959 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28960 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28961 * we check the new geometry. Then turn interrupts back on when
28962 * we're done.
28963 */
28964 - host->access.set_intr_mask(host, 0);
28965 + host->access->set_intr_mask(host, 0);
28966 getgeometry(ctlr);
28967 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28968 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28969
28970 for(i=0; i<NWD; i++) {
28971 struct gendisk *disk = ida_gendisk[ctlr][i];
28972 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28973 /* Wait (up to 2 seconds) for a command to complete */
28974
28975 for (i = 200000; i > 0; i--) {
28976 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
28977 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
28978 if (done == 0) {
28979 udelay(10); /* a short fixed delay */
28980 } else
28981 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
28982 index be73e9d..7fbf140 100644
28983 --- a/drivers/block/cpqarray.h
28984 +++ b/drivers/block/cpqarray.h
28985 @@ -99,7 +99,7 @@ struct ctlr_info {
28986 drv_info_t drv[NWD];
28987 struct proc_dir_entry *proc;
28988
28989 - struct access_method access;
28990 + struct access_method *access;
28991
28992 cmdlist_t *reqQ;
28993 cmdlist_t *cmpQ;
28994 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
28995 index 8d68056..e67050f 100644
28996 --- a/drivers/block/drbd/drbd_int.h
28997 +++ b/drivers/block/drbd/drbd_int.h
28998 @@ -736,7 +736,7 @@ struct drbd_request;
28999 struct drbd_epoch {
29000 struct list_head list;
29001 unsigned int barrier_nr;
29002 - atomic_t epoch_size; /* increased on every request added. */
29003 + atomic_unchecked_t epoch_size; /* increased on every request added. */
29004 atomic_t active; /* increased on every req. added, and dec on every finished. */
29005 unsigned long flags;
29006 };
29007 @@ -1108,7 +1108,7 @@ struct drbd_conf {
29008 void *int_dig_in;
29009 void *int_dig_vv;
29010 wait_queue_head_t seq_wait;
29011 - atomic_t packet_seq;
29012 + atomic_unchecked_t packet_seq;
29013 unsigned int peer_seq;
29014 spinlock_t peer_seq_lock;
29015 unsigned int minor;
29016 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29017
29018 static inline void drbd_tcp_cork(struct socket *sock)
29019 {
29020 - int __user val = 1;
29021 + int val = 1;
29022 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29023 - (char __user *)&val, sizeof(val));
29024 + (char __force_user *)&val, sizeof(val));
29025 }
29026
29027 static inline void drbd_tcp_uncork(struct socket *sock)
29028 {
29029 - int __user val = 0;
29030 + int val = 0;
29031 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29032 - (char __user *)&val, sizeof(val));
29033 + (char __force_user *)&val, sizeof(val));
29034 }
29035
29036 static inline void drbd_tcp_nodelay(struct socket *sock)
29037 {
29038 - int __user val = 1;
29039 + int val = 1;
29040 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29041 - (char __user *)&val, sizeof(val));
29042 + (char __force_user *)&val, sizeof(val));
29043 }
29044
29045 static inline void drbd_tcp_quickack(struct socket *sock)
29046 {
29047 - int __user val = 2;
29048 + int val = 2;
29049 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29050 - (char __user *)&val, sizeof(val));
29051 + (char __force_user *)&val, sizeof(val));
29052 }
29053
29054 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29055 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29056 index 211fc44..c5116f1 100644
29057 --- a/drivers/block/drbd/drbd_main.c
29058 +++ b/drivers/block/drbd/drbd_main.c
29059 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29060 p.sector = sector;
29061 p.block_id = block_id;
29062 p.blksize = blksize;
29063 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29064 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29065
29066 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29067 return false;
29068 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29069 p.sector = cpu_to_be64(req->sector);
29070 p.block_id = (unsigned long)req;
29071 p.seq_num = cpu_to_be32(req->seq_num =
29072 - atomic_add_return(1, &mdev->packet_seq));
29073 + atomic_add_return_unchecked(1, &mdev->packet_seq));
29074
29075 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29076
29077 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29078 atomic_set(&mdev->unacked_cnt, 0);
29079 atomic_set(&mdev->local_cnt, 0);
29080 atomic_set(&mdev->net_cnt, 0);
29081 - atomic_set(&mdev->packet_seq, 0);
29082 + atomic_set_unchecked(&mdev->packet_seq, 0);
29083 atomic_set(&mdev->pp_in_use, 0);
29084 atomic_set(&mdev->pp_in_use_by_net, 0);
29085 atomic_set(&mdev->rs_sect_in, 0);
29086 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29087 mdev->receiver.t_state);
29088
29089 /* no need to lock it, I'm the only thread alive */
29090 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29091 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29092 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29093 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29094 mdev->al_writ_cnt =
29095 mdev->bm_writ_cnt =
29096 mdev->read_cnt =
29097 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29098 index 946166e..356b39a 100644
29099 --- a/drivers/block/drbd/drbd_nl.c
29100 +++ b/drivers/block/drbd/drbd_nl.c
29101 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29102 module_put(THIS_MODULE);
29103 }
29104
29105 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29106 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29107
29108 static unsigned short *
29109 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29110 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29111 cn_reply->id.idx = CN_IDX_DRBD;
29112 cn_reply->id.val = CN_VAL_DRBD;
29113
29114 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29115 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29116 cn_reply->ack = 0; /* not used here. */
29117 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29118 (int)((char *)tl - (char *)reply->tag_list);
29119 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29120 cn_reply->id.idx = CN_IDX_DRBD;
29121 cn_reply->id.val = CN_VAL_DRBD;
29122
29123 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29124 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29125 cn_reply->ack = 0; /* not used here. */
29126 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29127 (int)((char *)tl - (char *)reply->tag_list);
29128 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29129 cn_reply->id.idx = CN_IDX_DRBD;
29130 cn_reply->id.val = CN_VAL_DRBD;
29131
29132 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29133 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29134 cn_reply->ack = 0; // not used here.
29135 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29136 (int)((char*)tl - (char*)reply->tag_list);
29137 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29138 cn_reply->id.idx = CN_IDX_DRBD;
29139 cn_reply->id.val = CN_VAL_DRBD;
29140
29141 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29142 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29143 cn_reply->ack = 0; /* not used here. */
29144 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29145 (int)((char *)tl - (char *)reply->tag_list);
29146 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29147 index 43beaca..4a5b1dd 100644
29148 --- a/drivers/block/drbd/drbd_receiver.c
29149 +++ b/drivers/block/drbd/drbd_receiver.c
29150 @@ -894,7 +894,7 @@ retry:
29151 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29152 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29153
29154 - atomic_set(&mdev->packet_seq, 0);
29155 + atomic_set_unchecked(&mdev->packet_seq, 0);
29156 mdev->peer_seq = 0;
29157
29158 drbd_thread_start(&mdev->asender);
29159 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29160 do {
29161 next_epoch = NULL;
29162
29163 - epoch_size = atomic_read(&epoch->epoch_size);
29164 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29165
29166 switch (ev & ~EV_CLEANUP) {
29167 case EV_PUT:
29168 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29169 rv = FE_DESTROYED;
29170 } else {
29171 epoch->flags = 0;
29172 - atomic_set(&epoch->epoch_size, 0);
29173 + atomic_set_unchecked(&epoch->epoch_size, 0);
29174 /* atomic_set(&epoch->active, 0); is already zero */
29175 if (rv == FE_STILL_LIVE)
29176 rv = FE_RECYCLED;
29177 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29178 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29179 drbd_flush(mdev);
29180
29181 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29182 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29183 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29184 if (epoch)
29185 break;
29186 }
29187
29188 epoch = mdev->current_epoch;
29189 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29190 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29191
29192 D_ASSERT(atomic_read(&epoch->active) == 0);
29193 D_ASSERT(epoch->flags == 0);
29194 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29195 }
29196
29197 epoch->flags = 0;
29198 - atomic_set(&epoch->epoch_size, 0);
29199 + atomic_set_unchecked(&epoch->epoch_size, 0);
29200 atomic_set(&epoch->active, 0);
29201
29202 spin_lock(&mdev->epoch_lock);
29203 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29204 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29205 list_add(&epoch->list, &mdev->current_epoch->list);
29206 mdev->current_epoch = epoch;
29207 mdev->epochs++;
29208 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29209 spin_unlock(&mdev->peer_seq_lock);
29210
29211 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29212 - atomic_inc(&mdev->current_epoch->epoch_size);
29213 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29214 return drbd_drain_block(mdev, data_size);
29215 }
29216
29217 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29218
29219 spin_lock(&mdev->epoch_lock);
29220 e->epoch = mdev->current_epoch;
29221 - atomic_inc(&e->epoch->epoch_size);
29222 + atomic_inc_unchecked(&e->epoch->epoch_size);
29223 atomic_inc(&e->epoch->active);
29224 spin_unlock(&mdev->epoch_lock);
29225
29226 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29227 D_ASSERT(list_empty(&mdev->done_ee));
29228
29229 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29230 - atomic_set(&mdev->current_epoch->epoch_size, 0);
29231 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29232 D_ASSERT(list_empty(&mdev->current_epoch->list));
29233 }
29234
29235 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29236 index bbca966..65e37dd 100644
29237 --- a/drivers/block/loop.c
29238 +++ b/drivers/block/loop.c
29239 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29240 mm_segment_t old_fs = get_fs();
29241
29242 set_fs(get_ds());
29243 - bw = file->f_op->write(file, buf, len, &pos);
29244 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29245 set_fs(old_fs);
29246 if (likely(bw == len))
29247 return 0;
29248 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29249 index ee94686..3e09ad3 100644
29250 --- a/drivers/char/Kconfig
29251 +++ b/drivers/char/Kconfig
29252 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29253
29254 config DEVKMEM
29255 bool "/dev/kmem virtual device support"
29256 - default y
29257 + default n
29258 + depends on !GRKERNSEC_KMEM
29259 help
29260 Say Y here if you want to support the /dev/kmem device. The
29261 /dev/kmem device is rarely used, but can be used for certain
29262 @@ -581,6 +582,7 @@ config DEVPORT
29263 bool
29264 depends on !M68K
29265 depends on ISA || PCI
29266 + depends on !GRKERNSEC_KMEM
29267 default y
29268
29269 source "drivers/s390/char/Kconfig"
29270 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29271 index 2e04433..22afc64 100644
29272 --- a/drivers/char/agp/frontend.c
29273 +++ b/drivers/char/agp/frontend.c
29274 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29275 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29276 return -EFAULT;
29277
29278 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29279 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29280 return -EFAULT;
29281
29282 client = agp_find_client_by_pid(reserve.pid);
29283 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29284 index 21cb980..f15107c 100644
29285 --- a/drivers/char/genrtc.c
29286 +++ b/drivers/char/genrtc.c
29287 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29288 switch (cmd) {
29289
29290 case RTC_PLL_GET:
29291 + memset(&pll, 0, sizeof(pll));
29292 if (get_rtc_pll(&pll))
29293 return -EINVAL;
29294 else
29295 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29296 index dfd7876..c0b0885 100644
29297 --- a/drivers/char/hpet.c
29298 +++ b/drivers/char/hpet.c
29299 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29300 }
29301
29302 static int
29303 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29304 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29305 struct hpet_info *info)
29306 {
29307 struct hpet_timer __iomem *timer;
29308 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29309 index 2c29942..604c5ba 100644
29310 --- a/drivers/char/ipmi/ipmi_msghandler.c
29311 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29312 @@ -420,7 +420,7 @@ struct ipmi_smi {
29313 struct proc_dir_entry *proc_dir;
29314 char proc_dir_name[10];
29315
29316 - atomic_t stats[IPMI_NUM_STATS];
29317 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29318
29319 /*
29320 * run_to_completion duplicate of smb_info, smi_info
29321 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29322
29323
29324 #define ipmi_inc_stat(intf, stat) \
29325 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29326 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29327 #define ipmi_get_stat(intf, stat) \
29328 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29329 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29330
29331 static int is_lan_addr(struct ipmi_addr *addr)
29332 {
29333 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29334 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29335 init_waitqueue_head(&intf->waitq);
29336 for (i = 0; i < IPMI_NUM_STATS; i++)
29337 - atomic_set(&intf->stats[i], 0);
29338 + atomic_set_unchecked(&intf->stats[i], 0);
29339
29340 intf->proc_dir = NULL;
29341
29342 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29343 index 1e638ff..a869ef5 100644
29344 --- a/drivers/char/ipmi/ipmi_si_intf.c
29345 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29346 @@ -275,7 +275,7 @@ struct smi_info {
29347 unsigned char slave_addr;
29348
29349 /* Counters and things for the proc filesystem. */
29350 - atomic_t stats[SI_NUM_STATS];
29351 + atomic_unchecked_t stats[SI_NUM_STATS];
29352
29353 struct task_struct *thread;
29354
29355 @@ -284,9 +284,9 @@ struct smi_info {
29356 };
29357
29358 #define smi_inc_stat(smi, stat) \
29359 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29360 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29361 #define smi_get_stat(smi, stat) \
29362 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29363 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29364
29365 #define SI_MAX_PARMS 4
29366
29367 @@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29368 atomic_set(&new_smi->req_events, 0);
29369 new_smi->run_to_completion = 0;
29370 for (i = 0; i < SI_NUM_STATS; i++)
29371 - atomic_set(&new_smi->stats[i], 0);
29372 + atomic_set_unchecked(&new_smi->stats[i], 0);
29373
29374 new_smi->interrupt_disabled = 1;
29375 atomic_set(&new_smi->stop_operation, 0);
29376 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29377 index 47ff7e4..0c7d340 100644
29378 --- a/drivers/char/mbcs.c
29379 +++ b/drivers/char/mbcs.c
29380 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29381 return 0;
29382 }
29383
29384 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29385 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29386 {
29387 .part_num = MBCS_PART_NUM,
29388 .mfg_num = MBCS_MFG_NUM,
29389 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29390 index d6e9d08..4493e89 100644
29391 --- a/drivers/char/mem.c
29392 +++ b/drivers/char/mem.c
29393 @@ -18,6 +18,7 @@
29394 #include <linux/raw.h>
29395 #include <linux/tty.h>
29396 #include <linux/capability.h>
29397 +#include <linux/security.h>
29398 #include <linux/ptrace.h>
29399 #include <linux/device.h>
29400 #include <linux/highmem.h>
29401 @@ -35,6 +36,10 @@
29402 # include <linux/efi.h>
29403 #endif
29404
29405 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29406 +extern const struct file_operations grsec_fops;
29407 +#endif
29408 +
29409 static inline unsigned long size_inside_page(unsigned long start,
29410 unsigned long size)
29411 {
29412 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29413
29414 while (cursor < to) {
29415 if (!devmem_is_allowed(pfn)) {
29416 +#ifdef CONFIG_GRKERNSEC_KMEM
29417 + gr_handle_mem_readwrite(from, to);
29418 +#else
29419 printk(KERN_INFO
29420 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29421 current->comm, from, to);
29422 +#endif
29423 return 0;
29424 }
29425 cursor += PAGE_SIZE;
29426 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29427 }
29428 return 1;
29429 }
29430 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29431 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29432 +{
29433 + return 0;
29434 +}
29435 #else
29436 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29437 {
29438 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29439
29440 while (count > 0) {
29441 unsigned long remaining;
29442 + char *temp;
29443
29444 sz = size_inside_page(p, count);
29445
29446 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29447 if (!ptr)
29448 return -EFAULT;
29449
29450 - remaining = copy_to_user(buf, ptr, sz);
29451 +#ifdef CONFIG_PAX_USERCOPY
29452 + temp = kmalloc(sz, GFP_KERNEL);
29453 + if (!temp) {
29454 + unxlate_dev_mem_ptr(p, ptr);
29455 + return -ENOMEM;
29456 + }
29457 + memcpy(temp, ptr, sz);
29458 +#else
29459 + temp = ptr;
29460 +#endif
29461 +
29462 + remaining = copy_to_user(buf, temp, sz);
29463 +
29464 +#ifdef CONFIG_PAX_USERCOPY
29465 + kfree(temp);
29466 +#endif
29467 +
29468 unxlate_dev_mem_ptr(p, ptr);
29469 if (remaining)
29470 return -EFAULT;
29471 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29472 size_t count, loff_t *ppos)
29473 {
29474 unsigned long p = *ppos;
29475 - ssize_t low_count, read, sz;
29476 + ssize_t low_count, read, sz, err = 0;
29477 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29478 - int err = 0;
29479
29480 read = 0;
29481 if (p < (unsigned long) high_memory) {
29482 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29483 }
29484 #endif
29485 while (low_count > 0) {
29486 + char *temp;
29487 +
29488 sz = size_inside_page(p, low_count);
29489
29490 /*
29491 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29492 */
29493 kbuf = xlate_dev_kmem_ptr((char *)p);
29494
29495 - if (copy_to_user(buf, kbuf, sz))
29496 +#ifdef CONFIG_PAX_USERCOPY
29497 + temp = kmalloc(sz, GFP_KERNEL);
29498 + if (!temp)
29499 + return -ENOMEM;
29500 + memcpy(temp, kbuf, sz);
29501 +#else
29502 + temp = kbuf;
29503 +#endif
29504 +
29505 + err = copy_to_user(buf, temp, sz);
29506 +
29507 +#ifdef CONFIG_PAX_USERCOPY
29508 + kfree(temp);
29509 +#endif
29510 +
29511 + if (err)
29512 return -EFAULT;
29513 buf += sz;
29514 p += sz;
29515 @@ -867,6 +914,9 @@ static const struct memdev {
29516 #ifdef CONFIG_CRASH_DUMP
29517 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29518 #endif
29519 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29520 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29521 +#endif
29522 };
29523
29524 static int memory_open(struct inode *inode, struct file *filp)
29525 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29526 index 9df78e2..01ba9ae 100644
29527 --- a/drivers/char/nvram.c
29528 +++ b/drivers/char/nvram.c
29529 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29530
29531 spin_unlock_irq(&rtc_lock);
29532
29533 - if (copy_to_user(buf, contents, tmp - contents))
29534 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29535 return -EFAULT;
29536
29537 *ppos = i;
29538 diff --git a/drivers/char/random.c b/drivers/char/random.c
29539 index 4ec04a7..4a092ed 100644
29540 --- a/drivers/char/random.c
29541 +++ b/drivers/char/random.c
29542 @@ -261,8 +261,13 @@
29543 /*
29544 * Configuration information
29545 */
29546 +#ifdef CONFIG_GRKERNSEC_RANDNET
29547 +#define INPUT_POOL_WORDS 512
29548 +#define OUTPUT_POOL_WORDS 128
29549 +#else
29550 #define INPUT_POOL_WORDS 128
29551 #define OUTPUT_POOL_WORDS 32
29552 +#endif
29553 #define SEC_XFER_SIZE 512
29554 #define EXTRACT_SIZE 10
29555
29556 @@ -300,10 +305,17 @@ static struct poolinfo {
29557 int poolwords;
29558 int tap1, tap2, tap3, tap4, tap5;
29559 } poolinfo_table[] = {
29560 +#ifdef CONFIG_GRKERNSEC_RANDNET
29561 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29562 + { 512, 411, 308, 208, 104, 1 },
29563 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29564 + { 128, 103, 76, 51, 25, 1 },
29565 +#else
29566 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29567 { 128, 103, 76, 51, 25, 1 },
29568 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29569 { 32, 26, 20, 14, 7, 1 },
29570 +#endif
29571 #if 0
29572 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29573 { 2048, 1638, 1231, 819, 411, 1 },
29574 @@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29575
29576 extract_buf(r, tmp);
29577 i = min_t(int, nbytes, EXTRACT_SIZE);
29578 - if (copy_to_user(buf, tmp, i)) {
29579 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29580 ret = -EFAULT;
29581 break;
29582 }
29583 @@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29584 #include <linux/sysctl.h>
29585
29586 static int min_read_thresh = 8, min_write_thresh;
29587 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29588 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29589 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29590 static char sysctl_bootid[16];
29591
29592 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29593 index 45713f0..8286d21 100644
29594 --- a/drivers/char/sonypi.c
29595 +++ b/drivers/char/sonypi.c
29596 @@ -54,6 +54,7 @@
29597
29598 #include <asm/uaccess.h>
29599 #include <asm/io.h>
29600 +#include <asm/local.h>
29601
29602 #include <linux/sonypi.h>
29603
29604 @@ -490,7 +491,7 @@ static struct sonypi_device {
29605 spinlock_t fifo_lock;
29606 wait_queue_head_t fifo_proc_list;
29607 struct fasync_struct *fifo_async;
29608 - int open_count;
29609 + local_t open_count;
29610 int model;
29611 struct input_dev *input_jog_dev;
29612 struct input_dev *input_key_dev;
29613 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29614 static int sonypi_misc_release(struct inode *inode, struct file *file)
29615 {
29616 mutex_lock(&sonypi_device.lock);
29617 - sonypi_device.open_count--;
29618 + local_dec(&sonypi_device.open_count);
29619 mutex_unlock(&sonypi_device.lock);
29620 return 0;
29621 }
29622 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29623 {
29624 mutex_lock(&sonypi_device.lock);
29625 /* Flush input queue on first open */
29626 - if (!sonypi_device.open_count)
29627 + if (!local_read(&sonypi_device.open_count))
29628 kfifo_reset(&sonypi_device.fifo);
29629 - sonypi_device.open_count++;
29630 + local_inc(&sonypi_device.open_count);
29631 mutex_unlock(&sonypi_device.lock);
29632
29633 return 0;
29634 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29635 index ad7c732..5aa8054 100644
29636 --- a/drivers/char/tpm/tpm.c
29637 +++ b/drivers/char/tpm/tpm.c
29638 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29639 chip->vendor.req_complete_val)
29640 goto out_recv;
29641
29642 - if ((status == chip->vendor.req_canceled)) {
29643 + if (status == chip->vendor.req_canceled) {
29644 dev_err(chip->dev, "Operation Canceled\n");
29645 rc = -ECANCELED;
29646 goto out;
29647 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29648 index 0636520..169c1d0 100644
29649 --- a/drivers/char/tpm/tpm_bios.c
29650 +++ b/drivers/char/tpm/tpm_bios.c
29651 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29652 event = addr;
29653
29654 if ((event->event_type == 0 && event->event_size == 0) ||
29655 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29656 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29657 return NULL;
29658
29659 return addr;
29660 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29661 return NULL;
29662
29663 if ((event->event_type == 0 && event->event_size == 0) ||
29664 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29665 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29666 return NULL;
29667
29668 (*pos)++;
29669 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29670 int i;
29671
29672 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29673 - seq_putc(m, data[i]);
29674 + if (!seq_putc(m, data[i]))
29675 + return -EFAULT;
29676
29677 return 0;
29678 }
29679 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29680 log->bios_event_log_end = log->bios_event_log + len;
29681
29682 virt = acpi_os_map_memory(start, len);
29683 + if (!virt) {
29684 + kfree(log->bios_event_log);
29685 + log->bios_event_log = NULL;
29686 + return -EFAULT;
29687 + }
29688
29689 - memcpy(log->bios_event_log, virt, len);
29690 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29691
29692 acpi_os_unmap_memory(virt, len);
29693 return 0;
29694 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29695 index cdf2f54..e55c197 100644
29696 --- a/drivers/char/virtio_console.c
29697 +++ b/drivers/char/virtio_console.c
29698 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29699 if (to_user) {
29700 ssize_t ret;
29701
29702 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29703 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29704 if (ret)
29705 return -EFAULT;
29706 } else {
29707 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29708 if (!port_has_data(port) && !port->host_connected)
29709 return 0;
29710
29711 - return fill_readbuf(port, ubuf, count, true);
29712 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29713 }
29714
29715 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29716 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29717 index 97f5064..202b6e6 100644
29718 --- a/drivers/edac/edac_pci_sysfs.c
29719 +++ b/drivers/edac/edac_pci_sysfs.c
29720 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29721 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29722 static int edac_pci_poll_msec = 1000; /* one second workq period */
29723
29724 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
29725 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29726 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29727 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29728
29729 static struct kobject *edac_pci_top_main_kobj;
29730 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29731 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29732 edac_printk(KERN_CRIT, EDAC_PCI,
29733 "Signaled System Error on %s\n",
29734 pci_name(dev));
29735 - atomic_inc(&pci_nonparity_count);
29736 + atomic_inc_unchecked(&pci_nonparity_count);
29737 }
29738
29739 if (status & (PCI_STATUS_PARITY)) {
29740 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29741 "Master Data Parity Error on %s\n",
29742 pci_name(dev));
29743
29744 - atomic_inc(&pci_parity_count);
29745 + atomic_inc_unchecked(&pci_parity_count);
29746 }
29747
29748 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29749 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29750 "Detected Parity Error on %s\n",
29751 pci_name(dev));
29752
29753 - atomic_inc(&pci_parity_count);
29754 + atomic_inc_unchecked(&pci_parity_count);
29755 }
29756 }
29757
29758 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29759 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29760 "Signaled System Error on %s\n",
29761 pci_name(dev));
29762 - atomic_inc(&pci_nonparity_count);
29763 + atomic_inc_unchecked(&pci_nonparity_count);
29764 }
29765
29766 if (status & (PCI_STATUS_PARITY)) {
29767 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29768 "Master Data Parity Error on "
29769 "%s\n", pci_name(dev));
29770
29771 - atomic_inc(&pci_parity_count);
29772 + atomic_inc_unchecked(&pci_parity_count);
29773 }
29774
29775 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29776 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29777 "Detected Parity Error on %s\n",
29778 pci_name(dev));
29779
29780 - atomic_inc(&pci_parity_count);
29781 + atomic_inc_unchecked(&pci_parity_count);
29782 }
29783 }
29784 }
29785 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29786 if (!check_pci_errors)
29787 return;
29788
29789 - before_count = atomic_read(&pci_parity_count);
29790 + before_count = atomic_read_unchecked(&pci_parity_count);
29791
29792 /* scan all PCI devices looking for a Parity Error on devices and
29793 * bridges.
29794 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29795 /* Only if operator has selected panic on PCI Error */
29796 if (edac_pci_get_panic_on_pe()) {
29797 /* If the count is different 'after' from 'before' */
29798 - if (before_count != atomic_read(&pci_parity_count))
29799 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29800 panic("EDAC: PCI Parity Error");
29801 }
29802 }
29803 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29804 index c6074c5..88a9e2e 100644
29805 --- a/drivers/edac/mce_amd.h
29806 +++ b/drivers/edac/mce_amd.h
29807 @@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
29808 struct amd_decoder_ops {
29809 bool (*dc_mce)(u16, u8);
29810 bool (*ic_mce)(u16, u8);
29811 -};
29812 +} __no_const;
29813
29814 void amd_report_gart_errors(bool);
29815 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29816 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29817 index cc595eb..4ec702a 100644
29818 --- a/drivers/firewire/core-card.c
29819 +++ b/drivers/firewire/core-card.c
29820 @@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
29821
29822 void fw_core_remove_card(struct fw_card *card)
29823 {
29824 - struct fw_card_driver dummy_driver = dummy_driver_template;
29825 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29826
29827 card->driver->update_phy_reg(card, 4,
29828 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29829 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29830 index 2e6b245..c3857d9 100644
29831 --- a/drivers/firewire/core-cdev.c
29832 +++ b/drivers/firewire/core-cdev.c
29833 @@ -1341,8 +1341,7 @@ static int init_iso_resource(struct client *client,
29834 int ret;
29835
29836 if ((request->channels == 0 && request->bandwidth == 0) ||
29837 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29838 - request->bandwidth < 0)
29839 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29840 return -EINVAL;
29841
29842 r = kmalloc(sizeof(*r), GFP_KERNEL);
29843 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29844 index dea2dcc..a4fb978 100644
29845 --- a/drivers/firewire/core-transaction.c
29846 +++ b/drivers/firewire/core-transaction.c
29847 @@ -37,6 +37,7 @@
29848 #include <linux/timer.h>
29849 #include <linux/types.h>
29850 #include <linux/workqueue.h>
29851 +#include <linux/sched.h>
29852
29853 #include <asm/byteorder.h>
29854
29855 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29856 index 9047f55..e47c7ff 100644
29857 --- a/drivers/firewire/core.h
29858 +++ b/drivers/firewire/core.h
29859 @@ -110,6 +110,7 @@ struct fw_card_driver {
29860
29861 int (*stop_iso)(struct fw_iso_context *ctx);
29862 };
29863 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29864
29865 void fw_card_initialize(struct fw_card *card,
29866 const struct fw_card_driver *driver, struct device *device);
29867 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29868 index 153980b..4b4d046 100644
29869 --- a/drivers/firmware/dmi_scan.c
29870 +++ b/drivers/firmware/dmi_scan.c
29871 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29872 }
29873 }
29874 else {
29875 - /*
29876 - * no iounmap() for that ioremap(); it would be a no-op, but
29877 - * it's so early in setup that sucker gets confused into doing
29878 - * what it shouldn't if we actually call it.
29879 - */
29880 p = dmi_ioremap(0xF0000, 0x10000);
29881 if (p == NULL)
29882 goto error;
29883 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29884 if (buf == NULL)
29885 return -1;
29886
29887 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29888 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29889
29890 iounmap(buf);
29891 return 0;
29892 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29893 index 82d5c20..44a7177 100644
29894 --- a/drivers/gpio/gpio-vr41xx.c
29895 +++ b/drivers/gpio/gpio-vr41xx.c
29896 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29897 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29898 maskl, pendl, maskh, pendh);
29899
29900 - atomic_inc(&irq_err_count);
29901 + atomic_inc_unchecked(&irq_err_count);
29902
29903 return -EINVAL;
29904 }
29905 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29906 index 8111889..367b253 100644
29907 --- a/drivers/gpu/drm/drm_crtc_helper.c
29908 +++ b/drivers/gpu/drm/drm_crtc_helper.c
29909 @@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29910 struct drm_crtc *tmp;
29911 int crtc_mask = 1;
29912
29913 - WARN(!crtc, "checking null crtc?\n");
29914 + BUG_ON(!crtc);
29915
29916 dev = crtc->dev;
29917
29918 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29919 index 6116e3b..c29dd16 100644
29920 --- a/drivers/gpu/drm/drm_drv.c
29921 +++ b/drivers/gpu/drm/drm_drv.c
29922 @@ -316,7 +316,7 @@ module_exit(drm_core_exit);
29923 /**
29924 * Copy and IOCTL return string to user space
29925 */
29926 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29927 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29928 {
29929 int len;
29930
29931 @@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
29932 return -ENODEV;
29933
29934 atomic_inc(&dev->ioctl_count);
29935 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29936 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29937 ++file_priv->ioctl_count;
29938
29939 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29940 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29941 index 123de28..43a0897 100644
29942 --- a/drivers/gpu/drm/drm_fops.c
29943 +++ b/drivers/gpu/drm/drm_fops.c
29944 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29945 }
29946
29947 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29948 - atomic_set(&dev->counts[i], 0);
29949 + atomic_set_unchecked(&dev->counts[i], 0);
29950
29951 dev->sigdata.lock = NULL;
29952
29953 @@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
29954
29955 retcode = drm_open_helper(inode, filp, dev);
29956 if (!retcode) {
29957 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29958 - if (!dev->open_count++)
29959 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29960 + if (local_inc_return(&dev->open_count) == 1)
29961 retcode = drm_setup(dev);
29962 }
29963 if (!retcode) {
29964 @@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
29965
29966 mutex_lock(&drm_global_mutex);
29967
29968 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29969 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
29970
29971 if (dev->driver->preclose)
29972 dev->driver->preclose(dev, file_priv);
29973 @@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
29974 * Begin inline drm_release
29975 */
29976
29977 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29978 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
29979 task_pid_nr(current),
29980 (long)old_encode_dev(file_priv->minor->device),
29981 - dev->open_count);
29982 + local_read(&dev->open_count));
29983
29984 /* Release any auth tokens that might point to this file_priv,
29985 (do that under the drm_global_mutex) */
29986 @@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
29987 * End inline drm_release
29988 */
29989
29990 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29991 - if (!--dev->open_count) {
29992 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29993 + if (local_dec_and_test(&dev->open_count)) {
29994 if (atomic_read(&dev->ioctl_count)) {
29995 DRM_ERROR("Device busy: %d\n",
29996 atomic_read(&dev->ioctl_count));
29997 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
29998 index c87dc96..326055d 100644
29999 --- a/drivers/gpu/drm/drm_global.c
30000 +++ b/drivers/gpu/drm/drm_global.c
30001 @@ -36,7 +36,7 @@
30002 struct drm_global_item {
30003 struct mutex mutex;
30004 void *object;
30005 - int refcount;
30006 + atomic_t refcount;
30007 };
30008
30009 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30010 @@ -49,7 +49,7 @@ void drm_global_init(void)
30011 struct drm_global_item *item = &glob[i];
30012 mutex_init(&item->mutex);
30013 item->object = NULL;
30014 - item->refcount = 0;
30015 + atomic_set(&item->refcount, 0);
30016 }
30017 }
30018
30019 @@ -59,7 +59,7 @@ void drm_global_release(void)
30020 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30021 struct drm_global_item *item = &glob[i];
30022 BUG_ON(item->object != NULL);
30023 - BUG_ON(item->refcount != 0);
30024 + BUG_ON(atomic_read(&item->refcount) != 0);
30025 }
30026 }
30027
30028 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30029 void *object;
30030
30031 mutex_lock(&item->mutex);
30032 - if (item->refcount == 0) {
30033 + if (atomic_read(&item->refcount) == 0) {
30034 item->object = kzalloc(ref->size, GFP_KERNEL);
30035 if (unlikely(item->object == NULL)) {
30036 ret = -ENOMEM;
30037 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30038 goto out_err;
30039
30040 }
30041 - ++item->refcount;
30042 + atomic_inc(&item->refcount);
30043 ref->object = item->object;
30044 object = item->object;
30045 mutex_unlock(&item->mutex);
30046 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30047 struct drm_global_item *item = &glob[ref->global_type];
30048
30049 mutex_lock(&item->mutex);
30050 - BUG_ON(item->refcount == 0);
30051 + BUG_ON(atomic_read(&item->refcount) == 0);
30052 BUG_ON(ref->object != item->object);
30053 - if (--item->refcount == 0) {
30054 + if (atomic_dec_and_test(&item->refcount)) {
30055 ref->release(ref);
30056 item->object = NULL;
30057 }
30058 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30059 index ab1162d..42587b2 100644
30060 --- a/drivers/gpu/drm/drm_info.c
30061 +++ b/drivers/gpu/drm/drm_info.c
30062 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30063 struct drm_local_map *map;
30064 struct drm_map_list *r_list;
30065
30066 - /* Hardcoded from _DRM_FRAME_BUFFER,
30067 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30068 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30069 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30070 + static const char * const types[] = {
30071 + [_DRM_FRAME_BUFFER] = "FB",
30072 + [_DRM_REGISTERS] = "REG",
30073 + [_DRM_SHM] = "SHM",
30074 + [_DRM_AGP] = "AGP",
30075 + [_DRM_SCATTER_GATHER] = "SG",
30076 + [_DRM_CONSISTENT] = "PCI",
30077 + [_DRM_GEM] = "GEM" };
30078 const char *type;
30079 int i;
30080
30081 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30082 map = r_list->map;
30083 if (!map)
30084 continue;
30085 - if (map->type < 0 || map->type > 5)
30086 + if (map->type >= ARRAY_SIZE(types))
30087 type = "??";
30088 else
30089 type = types[map->type];
30090 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30091 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30092 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30093 vma->vm_flags & VM_IO ? 'i' : '-',
30094 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30095 + 0);
30096 +#else
30097 vma->vm_pgoff);
30098 +#endif
30099
30100 #if defined(__i386__)
30101 pgprot = pgprot_val(vma->vm_page_prot);
30102 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30103 index 637fcc3..e890b33 100644
30104 --- a/drivers/gpu/drm/drm_ioc32.c
30105 +++ b/drivers/gpu/drm/drm_ioc32.c
30106 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30107 request = compat_alloc_user_space(nbytes);
30108 if (!access_ok(VERIFY_WRITE, request, nbytes))
30109 return -EFAULT;
30110 - list = (struct drm_buf_desc *) (request + 1);
30111 + list = (struct drm_buf_desc __user *) (request + 1);
30112
30113 if (__put_user(count, &request->count)
30114 || __put_user(list, &request->list))
30115 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30116 request = compat_alloc_user_space(nbytes);
30117 if (!access_ok(VERIFY_WRITE, request, nbytes))
30118 return -EFAULT;
30119 - list = (struct drm_buf_pub *) (request + 1);
30120 + list = (struct drm_buf_pub __user *) (request + 1);
30121
30122 if (__put_user(count, &request->count)
30123 || __put_user(list, &request->list))
30124 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30125 index cf85155..f2665cb 100644
30126 --- a/drivers/gpu/drm/drm_ioctl.c
30127 +++ b/drivers/gpu/drm/drm_ioctl.c
30128 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30129 stats->data[i].value =
30130 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30131 else
30132 - stats->data[i].value = atomic_read(&dev->counts[i]);
30133 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30134 stats->data[i].type = dev->types[i];
30135 }
30136
30137 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30138 index c79c713..2048588 100644
30139 --- a/drivers/gpu/drm/drm_lock.c
30140 +++ b/drivers/gpu/drm/drm_lock.c
30141 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30142 if (drm_lock_take(&master->lock, lock->context)) {
30143 master->lock.file_priv = file_priv;
30144 master->lock.lock_time = jiffies;
30145 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30146 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30147 break; /* Got lock */
30148 }
30149
30150 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30151 return -EINVAL;
30152 }
30153
30154 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30155 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30156
30157 if (drm_lock_free(&master->lock, lock->context)) {
30158 /* FIXME: Should really bail out here. */
30159 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30160 index aa454f8..6d38580 100644
30161 --- a/drivers/gpu/drm/drm_stub.c
30162 +++ b/drivers/gpu/drm/drm_stub.c
30163 @@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
30164
30165 drm_device_set_unplugged(dev);
30166
30167 - if (dev->open_count == 0) {
30168 + if (local_read(&dev->open_count) == 0) {
30169 drm_put_dev(dev);
30170 }
30171 mutex_unlock(&drm_global_mutex);
30172 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30173 index f920fb5..001c52d 100644
30174 --- a/drivers/gpu/drm/i810/i810_dma.c
30175 +++ b/drivers/gpu/drm/i810/i810_dma.c
30176 @@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30177 dma->buflist[vertex->idx],
30178 vertex->discard, vertex->used);
30179
30180 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30181 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30182 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30183 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30184 sarea_priv->last_enqueue = dev_priv->counter - 1;
30185 sarea_priv->last_dispatch = (int)hw_status[5];
30186
30187 @@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30188 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30189 mc->last_render);
30190
30191 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30192 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30193 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30194 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30195 sarea_priv->last_enqueue = dev_priv->counter - 1;
30196 sarea_priv->last_dispatch = (int)hw_status[5];
30197
30198 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30199 index c9339f4..f5e1b9d 100644
30200 --- a/drivers/gpu/drm/i810/i810_drv.h
30201 +++ b/drivers/gpu/drm/i810/i810_drv.h
30202 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30203 int page_flipping;
30204
30205 wait_queue_head_t irq_queue;
30206 - atomic_t irq_received;
30207 - atomic_t irq_emitted;
30208 + atomic_unchecked_t irq_received;
30209 + atomic_unchecked_t irq_emitted;
30210
30211 int front_offset;
30212 } drm_i810_private_t;
30213 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30214 index e6162a1..b2ff486 100644
30215 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30216 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30217 @@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30218 I915_READ(GTIMR));
30219 }
30220 seq_printf(m, "Interrupts received: %d\n",
30221 - atomic_read(&dev_priv->irq_received));
30222 + atomic_read_unchecked(&dev_priv->irq_received));
30223 for (i = 0; i < I915_NUM_RINGS; i++) {
30224 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30225 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30226 @@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30227 return ret;
30228
30229 if (opregion->header)
30230 - seq_write(m, opregion->header, OPREGION_SIZE);
30231 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30232
30233 mutex_unlock(&dev->struct_mutex);
30234
30235 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30236 index ba60f3c..e2dff7f 100644
30237 --- a/drivers/gpu/drm/i915/i915_dma.c
30238 +++ b/drivers/gpu/drm/i915/i915_dma.c
30239 @@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30240 bool can_switch;
30241
30242 spin_lock(&dev->count_lock);
30243 - can_switch = (dev->open_count == 0);
30244 + can_switch = (local_read(&dev->open_count) == 0);
30245 spin_unlock(&dev->count_lock);
30246 return can_switch;
30247 }
30248 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30249 index 5fabc6c..0b08aa1 100644
30250 --- a/drivers/gpu/drm/i915/i915_drv.h
30251 +++ b/drivers/gpu/drm/i915/i915_drv.h
30252 @@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
30253 /* render clock increase/decrease */
30254 /* display clock increase/decrease */
30255 /* pll clock increase/decrease */
30256 -};
30257 +} __no_const;
30258
30259 struct intel_device_info {
30260 u8 gen;
30261 @@ -350,7 +350,7 @@ typedef struct drm_i915_private {
30262 int current_page;
30263 int page_flipping;
30264
30265 - atomic_t irq_received;
30266 + atomic_unchecked_t irq_received;
30267
30268 /* protects the irq masks */
30269 spinlock_t irq_lock;
30270 @@ -937,7 +937,7 @@ struct drm_i915_gem_object {
30271 * will be page flipped away on the next vblank. When it
30272 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30273 */
30274 - atomic_t pending_flip;
30275 + atomic_unchecked_t pending_flip;
30276 };
30277
30278 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30279 @@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30280 extern void intel_teardown_gmbus(struct drm_device *dev);
30281 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30282 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30283 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30284 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30285 {
30286 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30287 }
30288 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30289 index de43194..a14c4cc 100644
30290 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30291 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30292 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30293 i915_gem_clflush_object(obj);
30294
30295 if (obj->base.pending_write_domain)
30296 - cd->flips |= atomic_read(&obj->pending_flip);
30297 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30298
30299 /* The actual obj->write_domain will be updated with
30300 * pending_write_domain after we emit the accumulated flush for all
30301 @@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30302
30303 static int
30304 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30305 - int count)
30306 + unsigned int count)
30307 {
30308 - int i;
30309 + unsigned int i;
30310
30311 for (i = 0; i < count; i++) {
30312 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30313 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30314 index f57e5cf..c82f79d 100644
30315 --- a/drivers/gpu/drm/i915/i915_irq.c
30316 +++ b/drivers/gpu/drm/i915/i915_irq.c
30317 @@ -472,7 +472,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30318 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30319 struct drm_i915_master_private *master_priv;
30320
30321 - atomic_inc(&dev_priv->irq_received);
30322 + atomic_inc_unchecked(&dev_priv->irq_received);
30323
30324 /* disable master interrupt before clearing iir */
30325 de_ier = I915_READ(DEIER);
30326 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30327 struct drm_i915_master_private *master_priv;
30328 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30329
30330 - atomic_inc(&dev_priv->irq_received);
30331 + atomic_inc_unchecked(&dev_priv->irq_received);
30332
30333 if (IS_GEN6(dev))
30334 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30335 @@ -1292,7 +1292,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30336 int ret = IRQ_NONE, pipe;
30337 bool blc_event = false;
30338
30339 - atomic_inc(&dev_priv->irq_received);
30340 + atomic_inc_unchecked(&dev_priv->irq_received);
30341
30342 iir = I915_READ(IIR);
30343
30344 @@ -1803,7 +1803,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30345 {
30346 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30347
30348 - atomic_set(&dev_priv->irq_received, 0);
30349 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30350
30351 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30352 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30353 @@ -1980,7 +1980,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30354 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30355 int pipe;
30356
30357 - atomic_set(&dev_priv->irq_received, 0);
30358 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30359
30360 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30361 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30362 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30363 index d4d162f..e80037c 100644
30364 --- a/drivers/gpu/drm/i915/intel_display.c
30365 +++ b/drivers/gpu/drm/i915/intel_display.c
30366 @@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30367
30368 wait_event(dev_priv->pending_flip_queue,
30369 atomic_read(&dev_priv->mm.wedged) ||
30370 - atomic_read(&obj->pending_flip) == 0);
30371 + atomic_read_unchecked(&obj->pending_flip) == 0);
30372
30373 /* Big Hammer, we also need to ensure that any pending
30374 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30375 @@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30376 obj = to_intel_framebuffer(crtc->fb)->obj;
30377 dev_priv = crtc->dev->dev_private;
30378 wait_event(dev_priv->pending_flip_queue,
30379 - atomic_read(&obj->pending_flip) == 0);
30380 + atomic_read_unchecked(&obj->pending_flip) == 0);
30381 }
30382
30383 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30384 @@ -7286,7 +7286,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30385
30386 atomic_clear_mask(1 << intel_crtc->plane,
30387 &obj->pending_flip.counter);
30388 - if (atomic_read(&obj->pending_flip) == 0)
30389 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30390 wake_up(&dev_priv->pending_flip_queue);
30391
30392 schedule_work(&work->work);
30393 @@ -7582,7 +7582,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30394 /* Block clients from rendering to the new back buffer until
30395 * the flip occurs and the object is no longer visible.
30396 */
30397 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30398 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30399
30400 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30401 if (ret)
30402 @@ -7596,7 +7596,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30403 return 0;
30404
30405 cleanup_pending:
30406 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30407 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30408 drm_gem_object_unreference(&work->old_fb_obj->base);
30409 drm_gem_object_unreference(&obj->base);
30410 mutex_unlock(&dev->struct_mutex);
30411 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30412 index 54558a0..2d97005 100644
30413 --- a/drivers/gpu/drm/mga/mga_drv.h
30414 +++ b/drivers/gpu/drm/mga/mga_drv.h
30415 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30416 u32 clear_cmd;
30417 u32 maccess;
30418
30419 - atomic_t vbl_received; /**< Number of vblanks received. */
30420 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30421 wait_queue_head_t fence_queue;
30422 - atomic_t last_fence_retired;
30423 + atomic_unchecked_t last_fence_retired;
30424 u32 next_fence_to_post;
30425
30426 unsigned int fb_cpp;
30427 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30428 index 2581202..f230a8d9 100644
30429 --- a/drivers/gpu/drm/mga/mga_irq.c
30430 +++ b/drivers/gpu/drm/mga/mga_irq.c
30431 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30432 if (crtc != 0)
30433 return 0;
30434
30435 - return atomic_read(&dev_priv->vbl_received);
30436 + return atomic_read_unchecked(&dev_priv->vbl_received);
30437 }
30438
30439
30440 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30441 /* VBLANK interrupt */
30442 if (status & MGA_VLINEPEN) {
30443 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30444 - atomic_inc(&dev_priv->vbl_received);
30445 + atomic_inc_unchecked(&dev_priv->vbl_received);
30446 drm_handle_vblank(dev, 0);
30447 handled = 1;
30448 }
30449 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30450 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30451 MGA_WRITE(MGA_PRIMEND, prim_end);
30452
30453 - atomic_inc(&dev_priv->last_fence_retired);
30454 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30455 DRM_WAKEUP(&dev_priv->fence_queue);
30456 handled = 1;
30457 }
30458 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30459 * using fences.
30460 */
30461 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30462 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30463 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30464 - *sequence) <= (1 << 23)));
30465
30466 *sequence = cur_fence;
30467 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30468 index 0be4a81..7464804 100644
30469 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30470 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30471 @@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30472 struct bit_table {
30473 const char id;
30474 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30475 -};
30476 +} __no_const;
30477
30478 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30479
30480 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30481 index 3aef353..0ad1322 100644
30482 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30483 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30484 @@ -240,7 +240,7 @@ struct nouveau_channel {
30485 struct list_head pending;
30486 uint32_t sequence;
30487 uint32_t sequence_ack;
30488 - atomic_t last_sequence_irq;
30489 + atomic_unchecked_t last_sequence_irq;
30490 struct nouveau_vma vma;
30491 } fence;
30492
30493 @@ -321,7 +321,7 @@ struct nouveau_exec_engine {
30494 u32 handle, u16 class);
30495 void (*set_tile_region)(struct drm_device *dev, int i);
30496 void (*tlb_flush)(struct drm_device *, int engine);
30497 -};
30498 +} __no_const;
30499
30500 struct nouveau_instmem_engine {
30501 void *priv;
30502 @@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
30503 struct nouveau_mc_engine {
30504 int (*init)(struct drm_device *dev);
30505 void (*takedown)(struct drm_device *dev);
30506 -};
30507 +} __no_const;
30508
30509 struct nouveau_timer_engine {
30510 int (*init)(struct drm_device *dev);
30511 void (*takedown)(struct drm_device *dev);
30512 uint64_t (*read)(struct drm_device *dev);
30513 -};
30514 +} __no_const;
30515
30516 struct nouveau_fb_engine {
30517 int num_tiles;
30518 @@ -590,7 +590,7 @@ struct nouveau_vram_engine {
30519 void (*put)(struct drm_device *, struct nouveau_mem **);
30520
30521 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30522 -};
30523 +} __no_const;
30524
30525 struct nouveau_engine {
30526 struct nouveau_instmem_engine instmem;
30527 @@ -739,7 +739,7 @@ struct drm_nouveau_private {
30528 struct drm_global_reference mem_global_ref;
30529 struct ttm_bo_global_ref bo_global_ref;
30530 struct ttm_bo_device bdev;
30531 - atomic_t validate_sequence;
30532 + atomic_unchecked_t validate_sequence;
30533 } ttm;
30534
30535 struct {
30536 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30537 index c1dc20f..4df673c 100644
30538 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30539 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30540 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30541 if (USE_REFCNT(dev))
30542 sequence = nvchan_rd32(chan, 0x48);
30543 else
30544 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30545 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30546
30547 if (chan->fence.sequence_ack == sequence)
30548 goto out;
30549 @@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30550 return ret;
30551 }
30552
30553 - atomic_set(&chan->fence.last_sequence_irq, 0);
30554 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30555 return 0;
30556 }
30557
30558 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30559 index ed52a6f..484acdc 100644
30560 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30561 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30562 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30563 int trycnt = 0;
30564 int ret, i;
30565
30566 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30567 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30568 retry:
30569 if (++trycnt > 100000) {
30570 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30571 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30572 index c2a8511..4b996f9 100644
30573 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30574 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30575 @@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30576 bool can_switch;
30577
30578 spin_lock(&dev->count_lock);
30579 - can_switch = (dev->open_count == 0);
30580 + can_switch = (local_read(&dev->open_count) == 0);
30581 spin_unlock(&dev->count_lock);
30582 return can_switch;
30583 }
30584 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30585 index dbdea8e..cd6eeeb 100644
30586 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30587 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30588 @@ -554,7 +554,7 @@ static int
30589 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30590 u32 class, u32 mthd, u32 data)
30591 {
30592 - atomic_set(&chan->fence.last_sequence_irq, data);
30593 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30594 return 0;
30595 }
30596
30597 diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30598 index 2746402..c8dc4a4 100644
30599 --- a/drivers/gpu/drm/nouveau/nv50_sor.c
30600 +++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30601 @@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30602 }
30603
30604 if (nv_encoder->dcb->type == OUTPUT_DP) {
30605 - struct dp_train_func func = {
30606 + static struct dp_train_func func = {
30607 .link_set = nv50_sor_dp_link_set,
30608 .train_set = nv50_sor_dp_train_set,
30609 .train_adj = nv50_sor_dp_train_adj
30610 diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30611 index 0247250..d2f6aaf 100644
30612 --- a/drivers/gpu/drm/nouveau/nvd0_display.c
30613 +++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30614 @@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30615 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30616
30617 if (nv_encoder->dcb->type == OUTPUT_DP) {
30618 - struct dp_train_func func = {
30619 + static struct dp_train_func func = {
30620 .link_set = nvd0_sor_dp_link_set,
30621 .train_set = nvd0_sor_dp_train_set,
30622 .train_adj = nvd0_sor_dp_train_adj
30623 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30624 index bcac90b..53bfc76 100644
30625 --- a/drivers/gpu/drm/r128/r128_cce.c
30626 +++ b/drivers/gpu/drm/r128/r128_cce.c
30627 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30628
30629 /* GH: Simple idle check.
30630 */
30631 - atomic_set(&dev_priv->idle_count, 0);
30632 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30633
30634 /* We don't support anything other than bus-mastering ring mode,
30635 * but the ring can be in either AGP or PCI space for the ring
30636 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30637 index 930c71b..499aded 100644
30638 --- a/drivers/gpu/drm/r128/r128_drv.h
30639 +++ b/drivers/gpu/drm/r128/r128_drv.h
30640 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30641 int is_pci;
30642 unsigned long cce_buffers_offset;
30643
30644 - atomic_t idle_count;
30645 + atomic_unchecked_t idle_count;
30646
30647 int page_flipping;
30648 int current_page;
30649 u32 crtc_offset;
30650 u32 crtc_offset_cntl;
30651
30652 - atomic_t vbl_received;
30653 + atomic_unchecked_t vbl_received;
30654
30655 u32 color_fmt;
30656 unsigned int front_offset;
30657 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30658 index 429d5a0..7e899ed 100644
30659 --- a/drivers/gpu/drm/r128/r128_irq.c
30660 +++ b/drivers/gpu/drm/r128/r128_irq.c
30661 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30662 if (crtc != 0)
30663 return 0;
30664
30665 - return atomic_read(&dev_priv->vbl_received);
30666 + return atomic_read_unchecked(&dev_priv->vbl_received);
30667 }
30668
30669 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30670 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30671 /* VBLANK interrupt */
30672 if (status & R128_CRTC_VBLANK_INT) {
30673 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30674 - atomic_inc(&dev_priv->vbl_received);
30675 + atomic_inc_unchecked(&dev_priv->vbl_received);
30676 drm_handle_vblank(dev, 0);
30677 return IRQ_HANDLED;
30678 }
30679 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30680 index a9e33ce..09edd4b 100644
30681 --- a/drivers/gpu/drm/r128/r128_state.c
30682 +++ b/drivers/gpu/drm/r128/r128_state.c
30683 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30684
30685 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30686 {
30687 - if (atomic_read(&dev_priv->idle_count) == 0)
30688 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30689 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30690 else
30691 - atomic_set(&dev_priv->idle_count, 0);
30692 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30693 }
30694
30695 #endif
30696 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30697 index 5a82b6b..9e69c73 100644
30698 --- a/drivers/gpu/drm/radeon/mkregtable.c
30699 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30700 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30701 regex_t mask_rex;
30702 regmatch_t match[4];
30703 char buf[1024];
30704 - size_t end;
30705 + long end;
30706 int len;
30707 int done = 0;
30708 int r;
30709 unsigned o;
30710 struct offset *offset;
30711 char last_reg_s[10];
30712 - int last_reg;
30713 + unsigned long last_reg;
30714
30715 if (regcomp
30716 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30717 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30718 index 138b952..d74f9cb 100644
30719 --- a/drivers/gpu/drm/radeon/radeon.h
30720 +++ b/drivers/gpu/drm/radeon/radeon.h
30721 @@ -253,7 +253,7 @@ struct radeon_fence_driver {
30722 uint32_t scratch_reg;
30723 uint64_t gpu_addr;
30724 volatile uint32_t *cpu_addr;
30725 - atomic_t seq;
30726 + atomic_unchecked_t seq;
30727 uint32_t last_seq;
30728 unsigned long last_jiffies;
30729 unsigned long last_timeout;
30730 @@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
30731 int x2, int y2);
30732 void (*draw_auto)(struct radeon_device *rdev);
30733 void (*set_default_state)(struct radeon_device *rdev);
30734 -};
30735 +} __no_const;
30736
30737 struct r600_blit {
30738 struct mutex mutex;
30739 @@ -1246,7 +1246,7 @@ struct radeon_asic {
30740 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30741 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30742 } pflip;
30743 -};
30744 +} __no_const;
30745
30746 /*
30747 * Asic structures
30748 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30749 index 5992502..c19c633 100644
30750 --- a/drivers/gpu/drm/radeon/radeon_device.c
30751 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30752 @@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30753 bool can_switch;
30754
30755 spin_lock(&dev->count_lock);
30756 - can_switch = (dev->open_count == 0);
30757 + can_switch = (local_read(&dev->open_count) == 0);
30758 spin_unlock(&dev->count_lock);
30759 return can_switch;
30760 }
30761 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30762 index a1b59ca..86f2d44 100644
30763 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30764 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30765 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30766
30767 /* SW interrupt */
30768 wait_queue_head_t swi_queue;
30769 - atomic_t swi_emitted;
30770 + atomic_unchecked_t swi_emitted;
30771 int vblank_crtc;
30772 uint32_t irq_enable_reg;
30773 uint32_t r500_disp_irq_reg;
30774 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30775 index 4bd36a3..e66fe9c 100644
30776 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30777 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30778 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30779 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30780 return 0;
30781 }
30782 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30783 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30784 if (!rdev->ring[fence->ring].ready)
30785 /* FIXME: cp is not running assume everythings is done right
30786 * away
30787 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30788 }
30789 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30790 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30791 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30792 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30793 rdev->fence_drv[ring].initialized = true;
30794 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30795 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30796 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30797 rdev->fence_drv[ring].scratch_reg = -1;
30798 rdev->fence_drv[ring].cpu_addr = NULL;
30799 rdev->fence_drv[ring].gpu_addr = 0;
30800 - atomic_set(&rdev->fence_drv[ring].seq, 0);
30801 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30802 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30803 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30804 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30805 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30806 index 48b7cea..342236f 100644
30807 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30808 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30809 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30810 request = compat_alloc_user_space(sizeof(*request));
30811 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30812 || __put_user(req32.param, &request->param)
30813 - || __put_user((void __user *)(unsigned long)req32.value,
30814 + || __put_user((unsigned long)req32.value,
30815 &request->value))
30816 return -EFAULT;
30817
30818 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30819 index 00da384..32f972d 100644
30820 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30821 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30822 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30823 unsigned int ret;
30824 RING_LOCALS;
30825
30826 - atomic_inc(&dev_priv->swi_emitted);
30827 - ret = atomic_read(&dev_priv->swi_emitted);
30828 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30829 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30830
30831 BEGIN_RING(4);
30832 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30833 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30834 drm_radeon_private_t *dev_priv =
30835 (drm_radeon_private_t *) dev->dev_private;
30836
30837 - atomic_set(&dev_priv->swi_emitted, 0);
30838 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30839 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30840
30841 dev->max_vblank_count = 0x001fffff;
30842 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30843 index e8422ae..d22d4a8 100644
30844 --- a/drivers/gpu/drm/radeon/radeon_state.c
30845 +++ b/drivers/gpu/drm/radeon/radeon_state.c
30846 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30847 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30848 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30849
30850 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30851 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30852 sarea_priv->nbox * sizeof(depth_boxes[0])))
30853 return -EFAULT;
30854
30855 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30856 {
30857 drm_radeon_private_t *dev_priv = dev->dev_private;
30858 drm_radeon_getparam_t *param = data;
30859 - int value;
30860 + int value = 0;
30861
30862 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30863
30864 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30865 index f493c64..524ab6b 100644
30866 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
30867 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30868 @@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30869 }
30870 if (unlikely(ttm_vm_ops == NULL)) {
30871 ttm_vm_ops = vma->vm_ops;
30872 - radeon_ttm_vm_ops = *ttm_vm_ops;
30873 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30874 + pax_open_kernel();
30875 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30876 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30877 + pax_close_kernel();
30878 }
30879 vma->vm_ops = &radeon_ttm_vm_ops;
30880 return 0;
30881 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30882 index f2c3b9d..d5a376b 100644
30883 --- a/drivers/gpu/drm/radeon/rs690.c
30884 +++ b/drivers/gpu/drm/radeon/rs690.c
30885 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30886 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30887 rdev->pm.sideport_bandwidth.full)
30888 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30889 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30890 + read_delay_latency.full = dfixed_const(800 * 1000);
30891 read_delay_latency.full = dfixed_div(read_delay_latency,
30892 rdev->pm.igp_sideport_mclk);
30893 + a.full = dfixed_const(370);
30894 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30895 } else {
30896 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30897 rdev->pm.k8_bandwidth.full)
30898 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30899 index ebc6fac..a8313ed 100644
30900 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30901 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30902 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
30903 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30904 struct shrink_control *sc)
30905 {
30906 - static atomic_t start_pool = ATOMIC_INIT(0);
30907 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30908 unsigned i;
30909 - unsigned pool_offset = atomic_add_return(1, &start_pool);
30910 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30911 struct ttm_page_pool *pool;
30912 int shrink_pages = sc->nr_to_scan;
30913
30914 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30915 index 88edacc..1e5412b 100644
30916 --- a/drivers/gpu/drm/via/via_drv.h
30917 +++ b/drivers/gpu/drm/via/via_drv.h
30918 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30919 typedef uint32_t maskarray_t[5];
30920
30921 typedef struct drm_via_irq {
30922 - atomic_t irq_received;
30923 + atomic_unchecked_t irq_received;
30924 uint32_t pending_mask;
30925 uint32_t enable_mask;
30926 wait_queue_head_t irq_queue;
30927 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30928 struct timeval last_vblank;
30929 int last_vblank_valid;
30930 unsigned usec_per_vblank;
30931 - atomic_t vbl_received;
30932 + atomic_unchecked_t vbl_received;
30933 drm_via_state_t hc_state;
30934 char pci_buf[VIA_PCI_BUF_SIZE];
30935 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30936 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30937 index d391f48..10c8ca3 100644
30938 --- a/drivers/gpu/drm/via/via_irq.c
30939 +++ b/drivers/gpu/drm/via/via_irq.c
30940 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30941 if (crtc != 0)
30942 return 0;
30943
30944 - return atomic_read(&dev_priv->vbl_received);
30945 + return atomic_read_unchecked(&dev_priv->vbl_received);
30946 }
30947
30948 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30949 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30950
30951 status = VIA_READ(VIA_REG_INTERRUPT);
30952 if (status & VIA_IRQ_VBLANK_PENDING) {
30953 - atomic_inc(&dev_priv->vbl_received);
30954 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30955 + atomic_inc_unchecked(&dev_priv->vbl_received);
30956 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30957 do_gettimeofday(&cur_vblank);
30958 if (dev_priv->last_vblank_valid) {
30959 dev_priv->usec_per_vblank =
30960 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30961 dev_priv->last_vblank = cur_vblank;
30962 dev_priv->last_vblank_valid = 1;
30963 }
30964 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30965 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30966 DRM_DEBUG("US per vblank is: %u\n",
30967 dev_priv->usec_per_vblank);
30968 }
30969 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30970
30971 for (i = 0; i < dev_priv->num_irqs; ++i) {
30972 if (status & cur_irq->pending_mask) {
30973 - atomic_inc(&cur_irq->irq_received);
30974 + atomic_inc_unchecked(&cur_irq->irq_received);
30975 DRM_WAKEUP(&cur_irq->irq_queue);
30976 handled = 1;
30977 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
30978 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
30979 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30980 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30981 masks[irq][4]));
30982 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30983 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30984 } else {
30985 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30986 (((cur_irq_sequence =
30987 - atomic_read(&cur_irq->irq_received)) -
30988 + atomic_read_unchecked(&cur_irq->irq_received)) -
30989 *sequence) <= (1 << 23)));
30990 }
30991 *sequence = cur_irq_sequence;
30992 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
30993 }
30994
30995 for (i = 0; i < dev_priv->num_irqs; ++i) {
30996 - atomic_set(&cur_irq->irq_received, 0);
30997 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30998 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30999 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31000 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31001 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31002 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31003 case VIA_IRQ_RELATIVE:
31004 irqwait->request.sequence +=
31005 - atomic_read(&cur_irq->irq_received);
31006 + atomic_read_unchecked(&cur_irq->irq_received);
31007 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31008 case VIA_IRQ_ABSOLUTE:
31009 break;
31010 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31011 index d0f2c07..9ebd9c3 100644
31012 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31013 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31014 @@ -263,7 +263,7 @@ struct vmw_private {
31015 * Fencing and IRQs.
31016 */
31017
31018 - atomic_t marker_seq;
31019 + atomic_unchecked_t marker_seq;
31020 wait_queue_head_t fence_queue;
31021 wait_queue_head_t fifo_queue;
31022 int fence_queue_waiters; /* Protected by hw_mutex */
31023 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31024 index a0c2f12..68ae6cb 100644
31025 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31026 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31027 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31028 (unsigned int) min,
31029 (unsigned int) fifo->capabilities);
31030
31031 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31032 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31033 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31034 vmw_marker_queue_init(&fifo->marker_queue);
31035 return vmw_fifo_send_fence(dev_priv, &dummy);
31036 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31037 if (reserveable)
31038 iowrite32(bytes, fifo_mem +
31039 SVGA_FIFO_RESERVED);
31040 - return fifo_mem + (next_cmd >> 2);
31041 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31042 } else {
31043 need_bounce = true;
31044 }
31045 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31046
31047 fm = vmw_fifo_reserve(dev_priv, bytes);
31048 if (unlikely(fm == NULL)) {
31049 - *seqno = atomic_read(&dev_priv->marker_seq);
31050 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31051 ret = -ENOMEM;
31052 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31053 false, 3*HZ);
31054 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31055 }
31056
31057 do {
31058 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31059 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31060 } while (*seqno == 0);
31061
31062 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31063 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31064 index cabc95f..14b3d77 100644
31065 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31066 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31067 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31068 * emitted. Then the fence is stale and signaled.
31069 */
31070
31071 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31072 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31073 > VMW_FENCE_WRAP);
31074
31075 return ret;
31076 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31077
31078 if (fifo_idle)
31079 down_read(&fifo_state->rwsem);
31080 - signal_seq = atomic_read(&dev_priv->marker_seq);
31081 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31082 ret = 0;
31083
31084 for (;;) {
31085 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31086 index 8a8725c..afed796 100644
31087 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31088 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31089 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31090 while (!vmw_lag_lt(queue, us)) {
31091 spin_lock(&queue->lock);
31092 if (list_empty(&queue->head))
31093 - seqno = atomic_read(&dev_priv->marker_seq);
31094 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31095 else {
31096 marker = list_first_entry(&queue->head,
31097 struct vmw_marker, head);
31098 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31099 index 4da66b4..e948655 100644
31100 --- a/drivers/hid/hid-core.c
31101 +++ b/drivers/hid/hid-core.c
31102 @@ -2063,7 +2063,7 @@ static bool hid_ignore(struct hid_device *hdev)
31103
31104 int hid_add_device(struct hid_device *hdev)
31105 {
31106 - static atomic_t id = ATOMIC_INIT(0);
31107 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31108 int ret;
31109
31110 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31111 @@ -2078,7 +2078,7 @@ int hid_add_device(struct hid_device *hdev)
31112 /* XXX hack, any other cleaner solution after the driver core
31113 * is converted to allow more than 20 bytes as the device name? */
31114 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31115 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31116 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31117
31118 hid_debug_register(hdev, dev_name(&hdev->dev));
31119 ret = device_add(&hdev->dev);
31120 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31121 index eec3291..8ed706b 100644
31122 --- a/drivers/hid/hid-wiimote-debug.c
31123 +++ b/drivers/hid/hid-wiimote-debug.c
31124 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31125 else if (size == 0)
31126 return -EIO;
31127
31128 - if (copy_to_user(u, buf, size))
31129 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
31130 return -EFAULT;
31131
31132 *off += size;
31133 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31134 index b1ec0e2..c295a61 100644
31135 --- a/drivers/hid/usbhid/hiddev.c
31136 +++ b/drivers/hid/usbhid/hiddev.c
31137 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31138 break;
31139
31140 case HIDIOCAPPLICATION:
31141 - if (arg < 0 || arg >= hid->maxapplication)
31142 + if (arg >= hid->maxapplication)
31143 break;
31144
31145 for (i = 0; i < hid->maxcollection; i++)
31146 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31147 index 4065374..10ed7dc 100644
31148 --- a/drivers/hv/channel.c
31149 +++ b/drivers/hv/channel.c
31150 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31151 int ret = 0;
31152 int t;
31153
31154 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31155 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31156 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31157 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31158
31159 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31160 if (ret)
31161 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31162 index 15956bd..ea34398 100644
31163 --- a/drivers/hv/hv.c
31164 +++ b/drivers/hv/hv.c
31165 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31166 u64 output_address = (output) ? virt_to_phys(output) : 0;
31167 u32 output_address_hi = output_address >> 32;
31168 u32 output_address_lo = output_address & 0xFFFFFFFF;
31169 - void *hypercall_page = hv_context.hypercall_page;
31170 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31171
31172 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31173 "=a"(hv_status_lo) : "d" (control_hi),
31174 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31175 index 699f0d8..f4f19250 100644
31176 --- a/drivers/hv/hyperv_vmbus.h
31177 +++ b/drivers/hv/hyperv_vmbus.h
31178 @@ -555,7 +555,7 @@ enum vmbus_connect_state {
31179 struct vmbus_connection {
31180 enum vmbus_connect_state conn_state;
31181
31182 - atomic_t next_gpadl_handle;
31183 + atomic_unchecked_t next_gpadl_handle;
31184
31185 /*
31186 * Represents channel interrupts. Each bit position represents a
31187 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31188 index a220e57..428f54d 100644
31189 --- a/drivers/hv/vmbus_drv.c
31190 +++ b/drivers/hv/vmbus_drv.c
31191 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31192 {
31193 int ret = 0;
31194
31195 - static atomic_t device_num = ATOMIC_INIT(0);
31196 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31197
31198 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31199 - atomic_inc_return(&device_num));
31200 + atomic_inc_return_unchecked(&device_num));
31201
31202 child_device_obj->device.bus = &hv_bus;
31203 child_device_obj->device.parent = &hv_acpi_dev->dev;
31204 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31205 index 9140236..ceaef4e 100644
31206 --- a/drivers/hwmon/acpi_power_meter.c
31207 +++ b/drivers/hwmon/acpi_power_meter.c
31208 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31209 return res;
31210
31211 temp /= 1000;
31212 - if (temp < 0)
31213 - return -EINVAL;
31214
31215 mutex_lock(&resource->lock);
31216 resource->trip[attr->index - 7] = temp;
31217 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31218 index 8b011d0..3de24a1 100644
31219 --- a/drivers/hwmon/sht15.c
31220 +++ b/drivers/hwmon/sht15.c
31221 @@ -166,7 +166,7 @@ struct sht15_data {
31222 int supply_uV;
31223 bool supply_uV_valid;
31224 struct work_struct update_supply_work;
31225 - atomic_t interrupt_handled;
31226 + atomic_unchecked_t interrupt_handled;
31227 };
31228
31229 /**
31230 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31231 return ret;
31232
31233 gpio_direction_input(data->pdata->gpio_data);
31234 - atomic_set(&data->interrupt_handled, 0);
31235 + atomic_set_unchecked(&data->interrupt_handled, 0);
31236
31237 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31238 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31239 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31240 /* Only relevant if the interrupt hasn't occurred. */
31241 - if (!atomic_read(&data->interrupt_handled))
31242 + if (!atomic_read_unchecked(&data->interrupt_handled))
31243 schedule_work(&data->read_work);
31244 }
31245 ret = wait_event_timeout(data->wait_queue,
31246 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31247
31248 /* First disable the interrupt */
31249 disable_irq_nosync(irq);
31250 - atomic_inc(&data->interrupt_handled);
31251 + atomic_inc_unchecked(&data->interrupt_handled);
31252 /* Then schedule a reading work struct */
31253 if (data->state != SHT15_READING_NOTHING)
31254 schedule_work(&data->read_work);
31255 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31256 * If not, then start the interrupt again - care here as could
31257 * have gone low in meantime so verify it hasn't!
31258 */
31259 - atomic_set(&data->interrupt_handled, 0);
31260 + atomic_set_unchecked(&data->interrupt_handled, 0);
31261 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31262 /* If still not occurred or another handler was scheduled */
31263 if (gpio_get_value(data->pdata->gpio_data)
31264 - || atomic_read(&data->interrupt_handled))
31265 + || atomic_read_unchecked(&data->interrupt_handled))
31266 return;
31267 }
31268
31269 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31270 index 378fcb5..5e91fa8 100644
31271 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31272 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31273 @@ -43,7 +43,7 @@
31274 extern struct i2c_adapter amd756_smbus;
31275
31276 static struct i2c_adapter *s4882_adapter;
31277 -static struct i2c_algorithm *s4882_algo;
31278 +static i2c_algorithm_no_const *s4882_algo;
31279
31280 /* Wrapper access functions for multiplexed SMBus */
31281 static DEFINE_MUTEX(amd756_lock);
31282 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31283 index 29015eb..af2d8e9 100644
31284 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31285 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31286 @@ -41,7 +41,7 @@
31287 extern struct i2c_adapter *nforce2_smbus;
31288
31289 static struct i2c_adapter *s4985_adapter;
31290 -static struct i2c_algorithm *s4985_algo;
31291 +static i2c_algorithm_no_const *s4985_algo;
31292
31293 /* Wrapper access functions for multiplexed SMBus */
31294 static DEFINE_MUTEX(nforce2_lock);
31295 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31296 index d7a4833..7fae376 100644
31297 --- a/drivers/i2c/i2c-mux.c
31298 +++ b/drivers/i2c/i2c-mux.c
31299 @@ -28,7 +28,7 @@
31300 /* multiplexer per channel data */
31301 struct i2c_mux_priv {
31302 struct i2c_adapter adap;
31303 - struct i2c_algorithm algo;
31304 + i2c_algorithm_no_const algo;
31305
31306 struct i2c_adapter *parent;
31307 void *mux_dev; /* the mux chip/device */
31308 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31309 index 57d00ca..0145194 100644
31310 --- a/drivers/ide/aec62xx.c
31311 +++ b/drivers/ide/aec62xx.c
31312 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31313 .cable_detect = atp86x_cable_detect,
31314 };
31315
31316 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31317 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31318 { /* 0: AEC6210 */
31319 .name = DRV_NAME,
31320 .init_chipset = init_chipset_aec62xx,
31321 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31322 index 2c8016a..911a27c 100644
31323 --- a/drivers/ide/alim15x3.c
31324 +++ b/drivers/ide/alim15x3.c
31325 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31326 .dma_sff_read_status = ide_dma_sff_read_status,
31327 };
31328
31329 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31330 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31331 .name = DRV_NAME,
31332 .init_chipset = init_chipset_ali15x3,
31333 .init_hwif = init_hwif_ali15x3,
31334 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31335 index 3747b25..56fc995 100644
31336 --- a/drivers/ide/amd74xx.c
31337 +++ b/drivers/ide/amd74xx.c
31338 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31339 .udma_mask = udma, \
31340 }
31341
31342 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31343 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31344 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31345 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31346 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31347 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31348 index 15f0ead..cb43480 100644
31349 --- a/drivers/ide/atiixp.c
31350 +++ b/drivers/ide/atiixp.c
31351 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31352 .cable_detect = atiixp_cable_detect,
31353 };
31354
31355 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31356 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31357 { /* 0: IXP200/300/400/700 */
31358 .name = DRV_NAME,
31359 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31360 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31361 index 5f80312..d1fc438 100644
31362 --- a/drivers/ide/cmd64x.c
31363 +++ b/drivers/ide/cmd64x.c
31364 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31365 .dma_sff_read_status = ide_dma_sff_read_status,
31366 };
31367
31368 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31369 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31370 { /* 0: CMD643 */
31371 .name = DRV_NAME,
31372 .init_chipset = init_chipset_cmd64x,
31373 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31374 index 2c1e5f7..1444762 100644
31375 --- a/drivers/ide/cs5520.c
31376 +++ b/drivers/ide/cs5520.c
31377 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31378 .set_dma_mode = cs5520_set_dma_mode,
31379 };
31380
31381 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31382 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31383 .name = DRV_NAME,
31384 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31385 .port_ops = &cs5520_port_ops,
31386 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31387 index 4dc4eb9..49b40ad 100644
31388 --- a/drivers/ide/cs5530.c
31389 +++ b/drivers/ide/cs5530.c
31390 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31391 .udma_filter = cs5530_udma_filter,
31392 };
31393
31394 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31395 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31396 .name = DRV_NAME,
31397 .init_chipset = init_chipset_cs5530,
31398 .init_hwif = init_hwif_cs5530,
31399 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31400 index 5059faf..18d4c85 100644
31401 --- a/drivers/ide/cs5535.c
31402 +++ b/drivers/ide/cs5535.c
31403 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31404 .cable_detect = cs5535_cable_detect,
31405 };
31406
31407 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31408 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31409 .name = DRV_NAME,
31410 .port_ops = &cs5535_port_ops,
31411 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31412 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31413 index 847553f..3ffb49d 100644
31414 --- a/drivers/ide/cy82c693.c
31415 +++ b/drivers/ide/cy82c693.c
31416 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31417 .set_dma_mode = cy82c693_set_dma_mode,
31418 };
31419
31420 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31421 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31422 .name = DRV_NAME,
31423 .init_iops = init_iops_cy82c693,
31424 .port_ops = &cy82c693_port_ops,
31425 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31426 index 58c51cd..4aec3b8 100644
31427 --- a/drivers/ide/hpt366.c
31428 +++ b/drivers/ide/hpt366.c
31429 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31430 }
31431 };
31432
31433 -static const struct hpt_info hpt36x __devinitdata = {
31434 +static const struct hpt_info hpt36x __devinitconst = {
31435 .chip_name = "HPT36x",
31436 .chip_type = HPT36x,
31437 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31438 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31439 .timings = &hpt36x_timings
31440 };
31441
31442 -static const struct hpt_info hpt370 __devinitdata = {
31443 +static const struct hpt_info hpt370 __devinitconst = {
31444 .chip_name = "HPT370",
31445 .chip_type = HPT370,
31446 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31447 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31448 .timings = &hpt37x_timings
31449 };
31450
31451 -static const struct hpt_info hpt370a __devinitdata = {
31452 +static const struct hpt_info hpt370a __devinitconst = {
31453 .chip_name = "HPT370A",
31454 .chip_type = HPT370A,
31455 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31456 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31457 .timings = &hpt37x_timings
31458 };
31459
31460 -static const struct hpt_info hpt374 __devinitdata = {
31461 +static const struct hpt_info hpt374 __devinitconst = {
31462 .chip_name = "HPT374",
31463 .chip_type = HPT374,
31464 .udma_mask = ATA_UDMA5,
31465 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31466 .timings = &hpt37x_timings
31467 };
31468
31469 -static const struct hpt_info hpt372 __devinitdata = {
31470 +static const struct hpt_info hpt372 __devinitconst = {
31471 .chip_name = "HPT372",
31472 .chip_type = HPT372,
31473 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31474 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31475 .timings = &hpt37x_timings
31476 };
31477
31478 -static const struct hpt_info hpt372a __devinitdata = {
31479 +static const struct hpt_info hpt372a __devinitconst = {
31480 .chip_name = "HPT372A",
31481 .chip_type = HPT372A,
31482 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31483 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31484 .timings = &hpt37x_timings
31485 };
31486
31487 -static const struct hpt_info hpt302 __devinitdata = {
31488 +static const struct hpt_info hpt302 __devinitconst = {
31489 .chip_name = "HPT302",
31490 .chip_type = HPT302,
31491 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31492 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31493 .timings = &hpt37x_timings
31494 };
31495
31496 -static const struct hpt_info hpt371 __devinitdata = {
31497 +static const struct hpt_info hpt371 __devinitconst = {
31498 .chip_name = "HPT371",
31499 .chip_type = HPT371,
31500 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31501 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31502 .timings = &hpt37x_timings
31503 };
31504
31505 -static const struct hpt_info hpt372n __devinitdata = {
31506 +static const struct hpt_info hpt372n __devinitconst = {
31507 .chip_name = "HPT372N",
31508 .chip_type = HPT372N,
31509 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31510 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31511 .timings = &hpt37x_timings
31512 };
31513
31514 -static const struct hpt_info hpt302n __devinitdata = {
31515 +static const struct hpt_info hpt302n __devinitconst = {
31516 .chip_name = "HPT302N",
31517 .chip_type = HPT302N,
31518 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31519 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31520 .timings = &hpt37x_timings
31521 };
31522
31523 -static const struct hpt_info hpt371n __devinitdata = {
31524 +static const struct hpt_info hpt371n __devinitconst = {
31525 .chip_name = "HPT371N",
31526 .chip_type = HPT371N,
31527 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31528 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31529 .dma_sff_read_status = ide_dma_sff_read_status,
31530 };
31531
31532 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31533 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31534 { /* 0: HPT36x */
31535 .name = DRV_NAME,
31536 .init_chipset = init_chipset_hpt366,
31537 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31538 index 8126824..55a2798 100644
31539 --- a/drivers/ide/ide-cd.c
31540 +++ b/drivers/ide/ide-cd.c
31541 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31542 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31543 if ((unsigned long)buf & alignment
31544 || blk_rq_bytes(rq) & q->dma_pad_mask
31545 - || object_is_on_stack(buf))
31546 + || object_starts_on_stack(buf))
31547 drive->dma = 0;
31548 }
31549 }
31550 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31551 index 7f56b73..dab5b67 100644
31552 --- a/drivers/ide/ide-pci-generic.c
31553 +++ b/drivers/ide/ide-pci-generic.c
31554 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31555 .udma_mask = ATA_UDMA6, \
31556 }
31557
31558 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31559 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31560 /* 0: Unknown */
31561 DECLARE_GENERIC_PCI_DEV(0),
31562
31563 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31564 index 560e66d..d5dd180 100644
31565 --- a/drivers/ide/it8172.c
31566 +++ b/drivers/ide/it8172.c
31567 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31568 .set_dma_mode = it8172_set_dma_mode,
31569 };
31570
31571 -static const struct ide_port_info it8172_port_info __devinitdata = {
31572 +static const struct ide_port_info it8172_port_info __devinitconst = {
31573 .name = DRV_NAME,
31574 .port_ops = &it8172_port_ops,
31575 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31576 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31577 index 46816ba..1847aeb 100644
31578 --- a/drivers/ide/it8213.c
31579 +++ b/drivers/ide/it8213.c
31580 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31581 .cable_detect = it8213_cable_detect,
31582 };
31583
31584 -static const struct ide_port_info it8213_chipset __devinitdata = {
31585 +static const struct ide_port_info it8213_chipset __devinitconst = {
31586 .name = DRV_NAME,
31587 .enablebits = { {0x41, 0x80, 0x80} },
31588 .port_ops = &it8213_port_ops,
31589 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31590 index 2e3169f..c5611db 100644
31591 --- a/drivers/ide/it821x.c
31592 +++ b/drivers/ide/it821x.c
31593 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31594 .cable_detect = it821x_cable_detect,
31595 };
31596
31597 -static const struct ide_port_info it821x_chipset __devinitdata = {
31598 +static const struct ide_port_info it821x_chipset __devinitconst = {
31599 .name = DRV_NAME,
31600 .init_chipset = init_chipset_it821x,
31601 .init_hwif = init_hwif_it821x,
31602 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31603 index 74c2c4a..efddd7d 100644
31604 --- a/drivers/ide/jmicron.c
31605 +++ b/drivers/ide/jmicron.c
31606 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31607 .cable_detect = jmicron_cable_detect,
31608 };
31609
31610 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31611 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31612 .name = DRV_NAME,
31613 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31614 .port_ops = &jmicron_port_ops,
31615 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31616 index 95327a2..73f78d8 100644
31617 --- a/drivers/ide/ns87415.c
31618 +++ b/drivers/ide/ns87415.c
31619 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31620 .dma_sff_read_status = superio_dma_sff_read_status,
31621 };
31622
31623 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31624 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31625 .name = DRV_NAME,
31626 .init_hwif = init_hwif_ns87415,
31627 .tp_ops = &ns87415_tp_ops,
31628 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31629 index 1a53a4c..39edc66 100644
31630 --- a/drivers/ide/opti621.c
31631 +++ b/drivers/ide/opti621.c
31632 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31633 .set_pio_mode = opti621_set_pio_mode,
31634 };
31635
31636 -static const struct ide_port_info opti621_chipset __devinitdata = {
31637 +static const struct ide_port_info opti621_chipset __devinitconst = {
31638 .name = DRV_NAME,
31639 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31640 .port_ops = &opti621_port_ops,
31641 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31642 index 9546fe2..2e5ceb6 100644
31643 --- a/drivers/ide/pdc202xx_new.c
31644 +++ b/drivers/ide/pdc202xx_new.c
31645 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31646 .udma_mask = udma, \
31647 }
31648
31649 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31650 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31651 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31652 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31653 };
31654 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31655 index 3a35ec6..5634510 100644
31656 --- a/drivers/ide/pdc202xx_old.c
31657 +++ b/drivers/ide/pdc202xx_old.c
31658 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31659 .max_sectors = sectors, \
31660 }
31661
31662 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31663 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31664 { /* 0: PDC20246 */
31665 .name = DRV_NAME,
31666 .init_chipset = init_chipset_pdc202xx,
31667 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31668 index 1892e81..fe0fd60 100644
31669 --- a/drivers/ide/piix.c
31670 +++ b/drivers/ide/piix.c
31671 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31672 .udma_mask = udma, \
31673 }
31674
31675 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31676 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31677 /* 0: MPIIX */
31678 { /*
31679 * MPIIX actually has only a single IDE channel mapped to
31680 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31681 index a6414a8..c04173e 100644
31682 --- a/drivers/ide/rz1000.c
31683 +++ b/drivers/ide/rz1000.c
31684 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31685 }
31686 }
31687
31688 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31689 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31690 .name = DRV_NAME,
31691 .host_flags = IDE_HFLAG_NO_DMA,
31692 };
31693 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31694 index 356b9b5..d4758eb 100644
31695 --- a/drivers/ide/sc1200.c
31696 +++ b/drivers/ide/sc1200.c
31697 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31698 .dma_sff_read_status = ide_dma_sff_read_status,
31699 };
31700
31701 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31702 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31703 .name = DRV_NAME,
31704 .port_ops = &sc1200_port_ops,
31705 .dma_ops = &sc1200_dma_ops,
31706 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31707 index b7f5b0c..9701038 100644
31708 --- a/drivers/ide/scc_pata.c
31709 +++ b/drivers/ide/scc_pata.c
31710 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31711 .dma_sff_read_status = scc_dma_sff_read_status,
31712 };
31713
31714 -static const struct ide_port_info scc_chipset __devinitdata = {
31715 +static const struct ide_port_info scc_chipset __devinitconst = {
31716 .name = "sccIDE",
31717 .init_iops = init_iops_scc,
31718 .init_dma = scc_init_dma,
31719 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31720 index 35fb8da..24d72ef 100644
31721 --- a/drivers/ide/serverworks.c
31722 +++ b/drivers/ide/serverworks.c
31723 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31724 .cable_detect = svwks_cable_detect,
31725 };
31726
31727 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31728 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31729 { /* 0: OSB4 */
31730 .name = DRV_NAME,
31731 .init_chipset = init_chipset_svwks,
31732 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31733 index ddeda44..46f7e30 100644
31734 --- a/drivers/ide/siimage.c
31735 +++ b/drivers/ide/siimage.c
31736 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31737 .udma_mask = ATA_UDMA6, \
31738 }
31739
31740 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31741 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31742 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31743 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31744 };
31745 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31746 index 4a00225..09e61b4 100644
31747 --- a/drivers/ide/sis5513.c
31748 +++ b/drivers/ide/sis5513.c
31749 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31750 .cable_detect = sis_cable_detect,
31751 };
31752
31753 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31754 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31755 .name = DRV_NAME,
31756 .init_chipset = init_chipset_sis5513,
31757 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31758 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31759 index f21dc2a..d051cd2 100644
31760 --- a/drivers/ide/sl82c105.c
31761 +++ b/drivers/ide/sl82c105.c
31762 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31763 .dma_sff_read_status = ide_dma_sff_read_status,
31764 };
31765
31766 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31767 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31768 .name = DRV_NAME,
31769 .init_chipset = init_chipset_sl82c105,
31770 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31771 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31772 index 864ffe0..863a5e9 100644
31773 --- a/drivers/ide/slc90e66.c
31774 +++ b/drivers/ide/slc90e66.c
31775 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31776 .cable_detect = slc90e66_cable_detect,
31777 };
31778
31779 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31780 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31781 .name = DRV_NAME,
31782 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31783 .port_ops = &slc90e66_port_ops,
31784 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31785 index 4799d5c..1794678 100644
31786 --- a/drivers/ide/tc86c001.c
31787 +++ b/drivers/ide/tc86c001.c
31788 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31789 .dma_sff_read_status = ide_dma_sff_read_status,
31790 };
31791
31792 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31793 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31794 .name = DRV_NAME,
31795 .init_hwif = init_hwif_tc86c001,
31796 .port_ops = &tc86c001_port_ops,
31797 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31798 index 281c914..55ce1b8 100644
31799 --- a/drivers/ide/triflex.c
31800 +++ b/drivers/ide/triflex.c
31801 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31802 .set_dma_mode = triflex_set_mode,
31803 };
31804
31805 -static const struct ide_port_info triflex_device __devinitdata = {
31806 +static const struct ide_port_info triflex_device __devinitconst = {
31807 .name = DRV_NAME,
31808 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31809 .port_ops = &triflex_port_ops,
31810 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31811 index 4b42ca0..e494a98 100644
31812 --- a/drivers/ide/trm290.c
31813 +++ b/drivers/ide/trm290.c
31814 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31815 .dma_check = trm290_dma_check,
31816 };
31817
31818 -static const struct ide_port_info trm290_chipset __devinitdata = {
31819 +static const struct ide_port_info trm290_chipset __devinitconst = {
31820 .name = DRV_NAME,
31821 .init_hwif = init_hwif_trm290,
31822 .tp_ops = &trm290_tp_ops,
31823 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31824 index f46f49c..eb77678 100644
31825 --- a/drivers/ide/via82cxxx.c
31826 +++ b/drivers/ide/via82cxxx.c
31827 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31828 .cable_detect = via82cxxx_cable_detect,
31829 };
31830
31831 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31832 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31833 .name = DRV_NAME,
31834 .init_chipset = init_chipset_via82cxxx,
31835 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31836 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31837 index 73d4531..c90cd2d 100644
31838 --- a/drivers/ieee802154/fakehard.c
31839 +++ b/drivers/ieee802154/fakehard.c
31840 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31841 phy->transmit_power = 0xbf;
31842
31843 dev->netdev_ops = &fake_ops;
31844 - dev->ml_priv = &fake_mlme;
31845 + dev->ml_priv = (void *)&fake_mlme;
31846
31847 priv = netdev_priv(dev);
31848 priv->phy = phy;
31849 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31850 index c889aae..6cf5aa7 100644
31851 --- a/drivers/infiniband/core/cm.c
31852 +++ b/drivers/infiniband/core/cm.c
31853 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31854
31855 struct cm_counter_group {
31856 struct kobject obj;
31857 - atomic_long_t counter[CM_ATTR_COUNT];
31858 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31859 };
31860
31861 struct cm_counter_attribute {
31862 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31863 struct ib_mad_send_buf *msg = NULL;
31864 int ret;
31865
31866 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31867 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31868 counter[CM_REQ_COUNTER]);
31869
31870 /* Quick state check to discard duplicate REQs. */
31871 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31872 if (!cm_id_priv)
31873 return;
31874
31875 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31876 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31877 counter[CM_REP_COUNTER]);
31878 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31879 if (ret)
31880 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31881 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31882 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31883 spin_unlock_irq(&cm_id_priv->lock);
31884 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31885 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31886 counter[CM_RTU_COUNTER]);
31887 goto out;
31888 }
31889 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31890 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31891 dreq_msg->local_comm_id);
31892 if (!cm_id_priv) {
31893 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31894 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31895 counter[CM_DREQ_COUNTER]);
31896 cm_issue_drep(work->port, work->mad_recv_wc);
31897 return -EINVAL;
31898 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31899 case IB_CM_MRA_REP_RCVD:
31900 break;
31901 case IB_CM_TIMEWAIT:
31902 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31903 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31904 counter[CM_DREQ_COUNTER]);
31905 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31906 goto unlock;
31907 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31908 cm_free_msg(msg);
31909 goto deref;
31910 case IB_CM_DREQ_RCVD:
31911 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31912 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31913 counter[CM_DREQ_COUNTER]);
31914 goto unlock;
31915 default:
31916 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31917 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31918 cm_id_priv->msg, timeout)) {
31919 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31920 - atomic_long_inc(&work->port->
31921 + atomic_long_inc_unchecked(&work->port->
31922 counter_group[CM_RECV_DUPLICATES].
31923 counter[CM_MRA_COUNTER]);
31924 goto out;
31925 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31926 break;
31927 case IB_CM_MRA_REQ_RCVD:
31928 case IB_CM_MRA_REP_RCVD:
31929 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31930 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31931 counter[CM_MRA_COUNTER]);
31932 /* fall through */
31933 default:
31934 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31935 case IB_CM_LAP_IDLE:
31936 break;
31937 case IB_CM_MRA_LAP_SENT:
31938 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31939 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31940 counter[CM_LAP_COUNTER]);
31941 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31942 goto unlock;
31943 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31944 cm_free_msg(msg);
31945 goto deref;
31946 case IB_CM_LAP_RCVD:
31947 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31948 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31949 counter[CM_LAP_COUNTER]);
31950 goto unlock;
31951 default:
31952 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31953 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31954 if (cur_cm_id_priv) {
31955 spin_unlock_irq(&cm.lock);
31956 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31957 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31958 counter[CM_SIDR_REQ_COUNTER]);
31959 goto out; /* Duplicate message. */
31960 }
31961 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31962 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31963 msg->retries = 1;
31964
31965 - atomic_long_add(1 + msg->retries,
31966 + atomic_long_add_unchecked(1 + msg->retries,
31967 &port->counter_group[CM_XMIT].counter[attr_index]);
31968 if (msg->retries)
31969 - atomic_long_add(msg->retries,
31970 + atomic_long_add_unchecked(msg->retries,
31971 &port->counter_group[CM_XMIT_RETRIES].
31972 counter[attr_index]);
31973
31974 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
31975 }
31976
31977 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31978 - atomic_long_inc(&port->counter_group[CM_RECV].
31979 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31980 counter[attr_id - CM_ATTR_ID_OFFSET]);
31981
31982 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31983 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
31984 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31985
31986 return sprintf(buf, "%ld\n",
31987 - atomic_long_read(&group->counter[cm_attr->index]));
31988 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
31989 }
31990
31991 static const struct sysfs_ops cm_counter_ops = {
31992 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
31993 index 176c8f9..2627b62 100644
31994 --- a/drivers/infiniband/core/fmr_pool.c
31995 +++ b/drivers/infiniband/core/fmr_pool.c
31996 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
31997
31998 struct task_struct *thread;
31999
32000 - atomic_t req_ser;
32001 - atomic_t flush_ser;
32002 + atomic_unchecked_t req_ser;
32003 + atomic_unchecked_t flush_ser;
32004
32005 wait_queue_head_t force_wait;
32006 };
32007 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32008 struct ib_fmr_pool *pool = pool_ptr;
32009
32010 do {
32011 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32012 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32013 ib_fmr_batch_release(pool);
32014
32015 - atomic_inc(&pool->flush_ser);
32016 + atomic_inc_unchecked(&pool->flush_ser);
32017 wake_up_interruptible(&pool->force_wait);
32018
32019 if (pool->flush_function)
32020 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32021 }
32022
32023 set_current_state(TASK_INTERRUPTIBLE);
32024 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32025 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32026 !kthread_should_stop())
32027 schedule();
32028 __set_current_state(TASK_RUNNING);
32029 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32030 pool->dirty_watermark = params->dirty_watermark;
32031 pool->dirty_len = 0;
32032 spin_lock_init(&pool->pool_lock);
32033 - atomic_set(&pool->req_ser, 0);
32034 - atomic_set(&pool->flush_ser, 0);
32035 + atomic_set_unchecked(&pool->req_ser, 0);
32036 + atomic_set_unchecked(&pool->flush_ser, 0);
32037 init_waitqueue_head(&pool->force_wait);
32038
32039 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32040 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32041 }
32042 spin_unlock_irq(&pool->pool_lock);
32043
32044 - serial = atomic_inc_return(&pool->req_ser);
32045 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32046 wake_up_process(pool->thread);
32047
32048 if (wait_event_interruptible(pool->force_wait,
32049 - atomic_read(&pool->flush_ser) - serial >= 0))
32050 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32051 return -EINTR;
32052
32053 return 0;
32054 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32055 } else {
32056 list_add_tail(&fmr->list, &pool->dirty_list);
32057 if (++pool->dirty_len >= pool->dirty_watermark) {
32058 - atomic_inc(&pool->req_ser);
32059 + atomic_inc_unchecked(&pool->req_ser);
32060 wake_up_process(pool->thread);
32061 }
32062 }
32063 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32064 index 40c8353..946b0e4 100644
32065 --- a/drivers/infiniband/hw/cxgb4/mem.c
32066 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32067 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32068 int err;
32069 struct fw_ri_tpte tpt;
32070 u32 stag_idx;
32071 - static atomic_t key;
32072 + static atomic_unchecked_t key;
32073
32074 if (c4iw_fatal_error(rdev))
32075 return -EIO;
32076 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32077 &rdev->resource.tpt_fifo_lock);
32078 if (!stag_idx)
32079 return -ENOMEM;
32080 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32081 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32082 }
32083 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32084 __func__, stag_state, type, pdid, stag_idx);
32085 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32086 index 79b3dbc..96e5fcc 100644
32087 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32088 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32089 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32090 struct ib_atomic_eth *ateth;
32091 struct ipath_ack_entry *e;
32092 u64 vaddr;
32093 - atomic64_t *maddr;
32094 + atomic64_unchecked_t *maddr;
32095 u64 sdata;
32096 u32 rkey;
32097 u8 next;
32098 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32099 IB_ACCESS_REMOTE_ATOMIC)))
32100 goto nack_acc_unlck;
32101 /* Perform atomic OP and save result. */
32102 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32103 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32104 sdata = be64_to_cpu(ateth->swap_data);
32105 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32106 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32107 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32108 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32109 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32110 be64_to_cpu(ateth->compare_data),
32111 sdata);
32112 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32113 index 1f95bba..9530f87 100644
32114 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32115 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32116 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32117 unsigned long flags;
32118 struct ib_wc wc;
32119 u64 sdata;
32120 - atomic64_t *maddr;
32121 + atomic64_unchecked_t *maddr;
32122 enum ib_wc_status send_status;
32123
32124 /*
32125 @@ -382,11 +382,11 @@ again:
32126 IB_ACCESS_REMOTE_ATOMIC)))
32127 goto acc_err;
32128 /* Perform atomic OP and save result. */
32129 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32130 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32131 sdata = wqe->wr.wr.atomic.compare_add;
32132 *(u64 *) sqp->s_sge.sge.vaddr =
32133 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32134 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32135 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32136 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32137 sdata, wqe->wr.wr.atomic.swap);
32138 goto send_comp;
32139 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32140 index 7140199..da60063 100644
32141 --- a/drivers/infiniband/hw/nes/nes.c
32142 +++ b/drivers/infiniband/hw/nes/nes.c
32143 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32144 LIST_HEAD(nes_adapter_list);
32145 static LIST_HEAD(nes_dev_list);
32146
32147 -atomic_t qps_destroyed;
32148 +atomic_unchecked_t qps_destroyed;
32149
32150 static unsigned int ee_flsh_adapter;
32151 static unsigned int sysfs_nonidx_addr;
32152 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32153 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32154 struct nes_adapter *nesadapter = nesdev->nesadapter;
32155
32156 - atomic_inc(&qps_destroyed);
32157 + atomic_inc_unchecked(&qps_destroyed);
32158
32159 /* Free the control structures */
32160
32161 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32162 index c438e46..ca30356 100644
32163 --- a/drivers/infiniband/hw/nes/nes.h
32164 +++ b/drivers/infiniband/hw/nes/nes.h
32165 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32166 extern unsigned int wqm_quanta;
32167 extern struct list_head nes_adapter_list;
32168
32169 -extern atomic_t cm_connects;
32170 -extern atomic_t cm_accepts;
32171 -extern atomic_t cm_disconnects;
32172 -extern atomic_t cm_closes;
32173 -extern atomic_t cm_connecteds;
32174 -extern atomic_t cm_connect_reqs;
32175 -extern atomic_t cm_rejects;
32176 -extern atomic_t mod_qp_timouts;
32177 -extern atomic_t qps_created;
32178 -extern atomic_t qps_destroyed;
32179 -extern atomic_t sw_qps_destroyed;
32180 +extern atomic_unchecked_t cm_connects;
32181 +extern atomic_unchecked_t cm_accepts;
32182 +extern atomic_unchecked_t cm_disconnects;
32183 +extern atomic_unchecked_t cm_closes;
32184 +extern atomic_unchecked_t cm_connecteds;
32185 +extern atomic_unchecked_t cm_connect_reqs;
32186 +extern atomic_unchecked_t cm_rejects;
32187 +extern atomic_unchecked_t mod_qp_timouts;
32188 +extern atomic_unchecked_t qps_created;
32189 +extern atomic_unchecked_t qps_destroyed;
32190 +extern atomic_unchecked_t sw_qps_destroyed;
32191 extern u32 mh_detected;
32192 extern u32 mh_pauses_sent;
32193 extern u32 cm_packets_sent;
32194 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32195 extern u32 cm_packets_received;
32196 extern u32 cm_packets_dropped;
32197 extern u32 cm_packets_retrans;
32198 -extern atomic_t cm_listens_created;
32199 -extern atomic_t cm_listens_destroyed;
32200 +extern atomic_unchecked_t cm_listens_created;
32201 +extern atomic_unchecked_t cm_listens_destroyed;
32202 extern u32 cm_backlog_drops;
32203 -extern atomic_t cm_loopbacks;
32204 -extern atomic_t cm_nodes_created;
32205 -extern atomic_t cm_nodes_destroyed;
32206 -extern atomic_t cm_accel_dropped_pkts;
32207 -extern atomic_t cm_resets_recvd;
32208 -extern atomic_t pau_qps_created;
32209 -extern atomic_t pau_qps_destroyed;
32210 +extern atomic_unchecked_t cm_loopbacks;
32211 +extern atomic_unchecked_t cm_nodes_created;
32212 +extern atomic_unchecked_t cm_nodes_destroyed;
32213 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32214 +extern atomic_unchecked_t cm_resets_recvd;
32215 +extern atomic_unchecked_t pau_qps_created;
32216 +extern atomic_unchecked_t pau_qps_destroyed;
32217
32218 extern u32 int_mod_timer_init;
32219 extern u32 int_mod_cq_depth_256;
32220 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32221 index 71edfbb..15b62ae 100644
32222 --- a/drivers/infiniband/hw/nes/nes_cm.c
32223 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32224 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32225 u32 cm_packets_retrans;
32226 u32 cm_packets_created;
32227 u32 cm_packets_received;
32228 -atomic_t cm_listens_created;
32229 -atomic_t cm_listens_destroyed;
32230 +atomic_unchecked_t cm_listens_created;
32231 +atomic_unchecked_t cm_listens_destroyed;
32232 u32 cm_backlog_drops;
32233 -atomic_t cm_loopbacks;
32234 -atomic_t cm_nodes_created;
32235 -atomic_t cm_nodes_destroyed;
32236 -atomic_t cm_accel_dropped_pkts;
32237 -atomic_t cm_resets_recvd;
32238 +atomic_unchecked_t cm_loopbacks;
32239 +atomic_unchecked_t cm_nodes_created;
32240 +atomic_unchecked_t cm_nodes_destroyed;
32241 +atomic_unchecked_t cm_accel_dropped_pkts;
32242 +atomic_unchecked_t cm_resets_recvd;
32243
32244 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32245 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32246 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32247
32248 static struct nes_cm_core *g_cm_core;
32249
32250 -atomic_t cm_connects;
32251 -atomic_t cm_accepts;
32252 -atomic_t cm_disconnects;
32253 -atomic_t cm_closes;
32254 -atomic_t cm_connecteds;
32255 -atomic_t cm_connect_reqs;
32256 -atomic_t cm_rejects;
32257 +atomic_unchecked_t cm_connects;
32258 +atomic_unchecked_t cm_accepts;
32259 +atomic_unchecked_t cm_disconnects;
32260 +atomic_unchecked_t cm_closes;
32261 +atomic_unchecked_t cm_connecteds;
32262 +atomic_unchecked_t cm_connect_reqs;
32263 +atomic_unchecked_t cm_rejects;
32264
32265 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32266 {
32267 @@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32268 kfree(listener);
32269 listener = NULL;
32270 ret = 0;
32271 - atomic_inc(&cm_listens_destroyed);
32272 + atomic_inc_unchecked(&cm_listens_destroyed);
32273 } else {
32274 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32275 }
32276 @@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32277 cm_node->rem_mac);
32278
32279 add_hte_node(cm_core, cm_node);
32280 - atomic_inc(&cm_nodes_created);
32281 + atomic_inc_unchecked(&cm_nodes_created);
32282
32283 return cm_node;
32284 }
32285 @@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32286 }
32287
32288 atomic_dec(&cm_core->node_cnt);
32289 - atomic_inc(&cm_nodes_destroyed);
32290 + atomic_inc_unchecked(&cm_nodes_destroyed);
32291 nesqp = cm_node->nesqp;
32292 if (nesqp) {
32293 nesqp->cm_node = NULL;
32294 @@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32295
32296 static void drop_packet(struct sk_buff *skb)
32297 {
32298 - atomic_inc(&cm_accel_dropped_pkts);
32299 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32300 dev_kfree_skb_any(skb);
32301 }
32302
32303 @@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32304 {
32305
32306 int reset = 0; /* whether to send reset in case of err.. */
32307 - atomic_inc(&cm_resets_recvd);
32308 + atomic_inc_unchecked(&cm_resets_recvd);
32309 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32310 " refcnt=%d\n", cm_node, cm_node->state,
32311 atomic_read(&cm_node->ref_count));
32312 @@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32313 rem_ref_cm_node(cm_node->cm_core, cm_node);
32314 return NULL;
32315 }
32316 - atomic_inc(&cm_loopbacks);
32317 + atomic_inc_unchecked(&cm_loopbacks);
32318 loopbackremotenode->loopbackpartner = cm_node;
32319 loopbackremotenode->tcp_cntxt.rcv_wscale =
32320 NES_CM_DEFAULT_RCV_WND_SCALE;
32321 @@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32322 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32323 else {
32324 rem_ref_cm_node(cm_core, cm_node);
32325 - atomic_inc(&cm_accel_dropped_pkts);
32326 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32327 dev_kfree_skb_any(skb);
32328 }
32329 break;
32330 @@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32331
32332 if ((cm_id) && (cm_id->event_handler)) {
32333 if (issue_disconn) {
32334 - atomic_inc(&cm_disconnects);
32335 + atomic_inc_unchecked(&cm_disconnects);
32336 cm_event.event = IW_CM_EVENT_DISCONNECT;
32337 cm_event.status = disconn_status;
32338 cm_event.local_addr = cm_id->local_addr;
32339 @@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32340 }
32341
32342 if (issue_close) {
32343 - atomic_inc(&cm_closes);
32344 + atomic_inc_unchecked(&cm_closes);
32345 nes_disconnect(nesqp, 1);
32346
32347 cm_id->provider_data = nesqp;
32348 @@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32349
32350 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32351 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32352 - atomic_inc(&cm_accepts);
32353 + atomic_inc_unchecked(&cm_accepts);
32354
32355 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32356 netdev_refcnt_read(nesvnic->netdev));
32357 @@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32358 struct nes_cm_core *cm_core;
32359 u8 *start_buff;
32360
32361 - atomic_inc(&cm_rejects);
32362 + atomic_inc_unchecked(&cm_rejects);
32363 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32364 loopback = cm_node->loopbackpartner;
32365 cm_core = cm_node->cm_core;
32366 @@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32367 ntohl(cm_id->local_addr.sin_addr.s_addr),
32368 ntohs(cm_id->local_addr.sin_port));
32369
32370 - atomic_inc(&cm_connects);
32371 + atomic_inc_unchecked(&cm_connects);
32372 nesqp->active_conn = 1;
32373
32374 /* cache the cm_id in the qp */
32375 @@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32376 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32377 return err;
32378 }
32379 - atomic_inc(&cm_listens_created);
32380 + atomic_inc_unchecked(&cm_listens_created);
32381 }
32382
32383 cm_id->add_ref(cm_id);
32384 @@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32385
32386 if (nesqp->destroyed)
32387 return;
32388 - atomic_inc(&cm_connecteds);
32389 + atomic_inc_unchecked(&cm_connecteds);
32390 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32391 " local port 0x%04X. jiffies = %lu.\n",
32392 nesqp->hwqp.qp_id,
32393 @@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32394
32395 cm_id->add_ref(cm_id);
32396 ret = cm_id->event_handler(cm_id, &cm_event);
32397 - atomic_inc(&cm_closes);
32398 + atomic_inc_unchecked(&cm_closes);
32399 cm_event.event = IW_CM_EVENT_CLOSE;
32400 cm_event.status = 0;
32401 cm_event.provider_data = cm_id->provider_data;
32402 @@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32403 return;
32404 cm_id = cm_node->cm_id;
32405
32406 - atomic_inc(&cm_connect_reqs);
32407 + atomic_inc_unchecked(&cm_connect_reqs);
32408 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32409 cm_node, cm_id, jiffies);
32410
32411 @@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32412 return;
32413 cm_id = cm_node->cm_id;
32414
32415 - atomic_inc(&cm_connect_reqs);
32416 + atomic_inc_unchecked(&cm_connect_reqs);
32417 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32418 cm_node, cm_id, jiffies);
32419
32420 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32421 index 3ba7be3..c81f6ff 100644
32422 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32423 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32424 @@ -40,8 +40,8 @@
32425 #include "nes.h"
32426 #include "nes_mgt.h"
32427
32428 -atomic_t pau_qps_created;
32429 -atomic_t pau_qps_destroyed;
32430 +atomic_unchecked_t pau_qps_created;
32431 +atomic_unchecked_t pau_qps_destroyed;
32432
32433 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32434 {
32435 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32436 {
32437 struct sk_buff *skb;
32438 unsigned long flags;
32439 - atomic_inc(&pau_qps_destroyed);
32440 + atomic_inc_unchecked(&pau_qps_destroyed);
32441
32442 /* Free packets that have not yet been forwarded */
32443 /* Lock is acquired by skb_dequeue when removing the skb */
32444 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32445 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32446 skb_queue_head_init(&nesqp->pau_list);
32447 spin_lock_init(&nesqp->pau_lock);
32448 - atomic_inc(&pau_qps_created);
32449 + atomic_inc_unchecked(&pau_qps_created);
32450 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32451 }
32452
32453 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32454 index f3a3ecf..57d311d 100644
32455 --- a/drivers/infiniband/hw/nes/nes_nic.c
32456 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32457 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32458 target_stat_values[++index] = mh_detected;
32459 target_stat_values[++index] = mh_pauses_sent;
32460 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32461 - target_stat_values[++index] = atomic_read(&cm_connects);
32462 - target_stat_values[++index] = atomic_read(&cm_accepts);
32463 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32464 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32465 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32466 - target_stat_values[++index] = atomic_read(&cm_rejects);
32467 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32468 - target_stat_values[++index] = atomic_read(&qps_created);
32469 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32470 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32471 - target_stat_values[++index] = atomic_read(&cm_closes);
32472 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32473 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32474 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32475 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32476 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32477 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32478 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32479 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32480 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32481 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32482 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32483 target_stat_values[++index] = cm_packets_sent;
32484 target_stat_values[++index] = cm_packets_bounced;
32485 target_stat_values[++index] = cm_packets_created;
32486 target_stat_values[++index] = cm_packets_received;
32487 target_stat_values[++index] = cm_packets_dropped;
32488 target_stat_values[++index] = cm_packets_retrans;
32489 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32490 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32491 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32492 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32493 target_stat_values[++index] = cm_backlog_drops;
32494 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32495 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32496 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32497 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32498 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32499 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32500 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32501 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32502 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32503 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32504 target_stat_values[++index] = nesadapter->free_4kpbl;
32505 target_stat_values[++index] = nesadapter->free_256pbl;
32506 target_stat_values[++index] = int_mod_timer_init;
32507 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32508 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32509 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32510 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32511 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32512 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32513 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32514 }
32515
32516 /**
32517 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32518 index 8b8812d..a5e1133 100644
32519 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32520 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32521 @@ -46,9 +46,9 @@
32522
32523 #include <rdma/ib_umem.h>
32524
32525 -atomic_t mod_qp_timouts;
32526 -atomic_t qps_created;
32527 -atomic_t sw_qps_destroyed;
32528 +atomic_unchecked_t mod_qp_timouts;
32529 +atomic_unchecked_t qps_created;
32530 +atomic_unchecked_t sw_qps_destroyed;
32531
32532 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32533
32534 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32535 if (init_attr->create_flags)
32536 return ERR_PTR(-EINVAL);
32537
32538 - atomic_inc(&qps_created);
32539 + atomic_inc_unchecked(&qps_created);
32540 switch (init_attr->qp_type) {
32541 case IB_QPT_RC:
32542 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32543 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32544 struct iw_cm_event cm_event;
32545 int ret = 0;
32546
32547 - atomic_inc(&sw_qps_destroyed);
32548 + atomic_inc_unchecked(&sw_qps_destroyed);
32549 nesqp->destroyed = 1;
32550
32551 /* Blow away the connection if it exists. */
32552 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32553 index 6b811e3..f8acf88 100644
32554 --- a/drivers/infiniband/hw/qib/qib.h
32555 +++ b/drivers/infiniband/hw/qib/qib.h
32556 @@ -51,6 +51,7 @@
32557 #include <linux/completion.h>
32558 #include <linux/kref.h>
32559 #include <linux/sched.h>
32560 +#include <linux/slab.h>
32561
32562 #include "qib_common.h"
32563 #include "qib_verbs.h"
32564 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32565 index da739d9..da1c7f4 100644
32566 --- a/drivers/input/gameport/gameport.c
32567 +++ b/drivers/input/gameport/gameport.c
32568 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32569 */
32570 static void gameport_init_port(struct gameport *gameport)
32571 {
32572 - static atomic_t gameport_no = ATOMIC_INIT(0);
32573 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32574
32575 __module_get(THIS_MODULE);
32576
32577 mutex_init(&gameport->drv_mutex);
32578 device_initialize(&gameport->dev);
32579 dev_set_name(&gameport->dev, "gameport%lu",
32580 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32581 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32582 gameport->dev.bus = &gameport_bus;
32583 gameport->dev.release = gameport_release_port;
32584 if (gameport->parent)
32585 diff --git a/drivers/input/input.c b/drivers/input/input.c
32586 index 8921c61..f5cd63d 100644
32587 --- a/drivers/input/input.c
32588 +++ b/drivers/input/input.c
32589 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32590 */
32591 int input_register_device(struct input_dev *dev)
32592 {
32593 - static atomic_t input_no = ATOMIC_INIT(0);
32594 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32595 struct input_handler *handler;
32596 const char *path;
32597 int error;
32598 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32599 dev->setkeycode = input_default_setkeycode;
32600
32601 dev_set_name(&dev->dev, "input%ld",
32602 - (unsigned long) atomic_inc_return(&input_no) - 1);
32603 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32604
32605 error = device_add(&dev->dev);
32606 if (error)
32607 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32608 index b8d8611..7a4a04b 100644
32609 --- a/drivers/input/joystick/sidewinder.c
32610 +++ b/drivers/input/joystick/sidewinder.c
32611 @@ -30,6 +30,7 @@
32612 #include <linux/kernel.h>
32613 #include <linux/module.h>
32614 #include <linux/slab.h>
32615 +#include <linux/sched.h>
32616 #include <linux/init.h>
32617 #include <linux/input.h>
32618 #include <linux/gameport.h>
32619 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32620 index fd7a0d5..a4af10c 100644
32621 --- a/drivers/input/joystick/xpad.c
32622 +++ b/drivers/input/joystick/xpad.c
32623 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32624
32625 static int xpad_led_probe(struct usb_xpad *xpad)
32626 {
32627 - static atomic_t led_seq = ATOMIC_INIT(0);
32628 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32629 long led_no;
32630 struct xpad_led *led;
32631 struct led_classdev *led_cdev;
32632 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32633 if (!led)
32634 return -ENOMEM;
32635
32636 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32637 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32638
32639 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32640 led->xpad = xpad;
32641 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32642 index 0110b5a..d3ad144 100644
32643 --- a/drivers/input/mousedev.c
32644 +++ b/drivers/input/mousedev.c
32645 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32646
32647 spin_unlock_irq(&client->packet_lock);
32648
32649 - if (copy_to_user(buffer, data, count))
32650 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32651 return -EFAULT;
32652
32653 return count;
32654 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32655 index d0f7533..fb8215b 100644
32656 --- a/drivers/input/serio/serio.c
32657 +++ b/drivers/input/serio/serio.c
32658 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32659 */
32660 static void serio_init_port(struct serio *serio)
32661 {
32662 - static atomic_t serio_no = ATOMIC_INIT(0);
32663 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32664
32665 __module_get(THIS_MODULE);
32666
32667 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32668 mutex_init(&serio->drv_mutex);
32669 device_initialize(&serio->dev);
32670 dev_set_name(&serio->dev, "serio%ld",
32671 - (long)atomic_inc_return(&serio_no) - 1);
32672 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32673 serio->dev.bus = &serio_bus;
32674 serio->dev.release = serio_release_port;
32675 serio->dev.groups = serio_device_attr_groups;
32676 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32677 index b902794..fc7b85b 100644
32678 --- a/drivers/isdn/capi/capi.c
32679 +++ b/drivers/isdn/capi/capi.c
32680 @@ -83,8 +83,8 @@ struct capiminor {
32681
32682 struct capi20_appl *ap;
32683 u32 ncci;
32684 - atomic_t datahandle;
32685 - atomic_t msgid;
32686 + atomic_unchecked_t datahandle;
32687 + atomic_unchecked_t msgid;
32688
32689 struct tty_port port;
32690 int ttyinstop;
32691 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32692 capimsg_setu16(s, 2, mp->ap->applid);
32693 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32694 capimsg_setu8 (s, 5, CAPI_RESP);
32695 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32696 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32697 capimsg_setu32(s, 8, mp->ncci);
32698 capimsg_setu16(s, 12, datahandle);
32699 }
32700 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32701 mp->outbytes -= len;
32702 spin_unlock_bh(&mp->outlock);
32703
32704 - datahandle = atomic_inc_return(&mp->datahandle);
32705 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32706 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32707 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32708 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32709 capimsg_setu16(skb->data, 2, mp->ap->applid);
32710 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32711 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32712 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32713 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32714 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32715 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32716 capimsg_setu16(skb->data, 16, len); /* Data length */
32717 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32718 index 821f7ac..28d4030 100644
32719 --- a/drivers/isdn/hardware/avm/b1.c
32720 +++ b/drivers/isdn/hardware/avm/b1.c
32721 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
32722 }
32723 if (left) {
32724 if (t4file->user) {
32725 - if (copy_from_user(buf, dp, left))
32726 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32727 return -EFAULT;
32728 } else {
32729 memcpy(buf, dp, left);
32730 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
32731 }
32732 if (left) {
32733 if (config->user) {
32734 - if (copy_from_user(buf, dp, left))
32735 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32736 return -EFAULT;
32737 } else {
32738 memcpy(buf, dp, left);
32739 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32740 index dd6b53a..19d9ee6 100644
32741 --- a/drivers/isdn/hardware/eicon/divasync.h
32742 +++ b/drivers/isdn/hardware/eicon/divasync.h
32743 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32744 } diva_didd_add_adapter_t;
32745 typedef struct _diva_didd_remove_adapter {
32746 IDI_CALL p_request;
32747 -} diva_didd_remove_adapter_t;
32748 +} __no_const diva_didd_remove_adapter_t;
32749 typedef struct _diva_didd_read_adapter_array {
32750 void *buffer;
32751 dword length;
32752 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32753 index d303e65..28bcb7b 100644
32754 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32755 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32756 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32757 typedef struct _diva_os_idi_adapter_interface {
32758 diva_init_card_proc_t cleanup_adapter_proc;
32759 diva_cmd_card_proc_t cmd_proc;
32760 -} diva_os_idi_adapter_interface_t;
32761 +} __no_const diva_os_idi_adapter_interface_t;
32762
32763 typedef struct _diva_os_xdi_adapter {
32764 struct list_head link;
32765 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32766 index e74df7c..03a03ba 100644
32767 --- a/drivers/isdn/icn/icn.c
32768 +++ b/drivers/isdn/icn/icn.c
32769 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
32770 if (count > len)
32771 count = len;
32772 if (user) {
32773 - if (copy_from_user(msg, buf, count))
32774 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32775 return -EFAULT;
32776 } else
32777 memcpy(msg, buf, count);
32778 diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
32779 index 8bc4915..4cc6a2e 100644
32780 --- a/drivers/leds/leds-mc13783.c
32781 +++ b/drivers/leds/leds-mc13783.c
32782 @@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
32783 return -EINVAL;
32784 }
32785
32786 - led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
32787 + led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
32788 if (led == NULL) {
32789 dev_err(&pdev->dev, "failed to alloc memory\n");
32790 return -ENOMEM;
32791 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32792 index b5fdcb7..5b6c59f 100644
32793 --- a/drivers/lguest/core.c
32794 +++ b/drivers/lguest/core.c
32795 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32796 * it's worked so far. The end address needs +1 because __get_vm_area
32797 * allocates an extra guard page, so we need space for that.
32798 */
32799 +
32800 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32801 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32802 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32803 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32804 +#else
32805 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32806 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32807 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32808 +#endif
32809 +
32810 if (!switcher_vma) {
32811 err = -ENOMEM;
32812 printk("lguest: could not map switcher pages high\n");
32813 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32814 * Now the Switcher is mapped at the right address, we can't fail!
32815 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32816 */
32817 - memcpy(switcher_vma->addr, start_switcher_text,
32818 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32819 end_switcher_text - start_switcher_text);
32820
32821 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32822 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32823 index 3980903..ce25c5e 100644
32824 --- a/drivers/lguest/x86/core.c
32825 +++ b/drivers/lguest/x86/core.c
32826 @@ -59,7 +59,7 @@ static struct {
32827 /* Offset from where switcher.S was compiled to where we've copied it */
32828 static unsigned long switcher_offset(void)
32829 {
32830 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32831 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32832 }
32833
32834 /* This cpu's struct lguest_pages. */
32835 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32836 * These copies are pretty cheap, so we do them unconditionally: */
32837 /* Save the current Host top-level page directory.
32838 */
32839 +
32840 +#ifdef CONFIG_PAX_PER_CPU_PGD
32841 + pages->state.host_cr3 = read_cr3();
32842 +#else
32843 pages->state.host_cr3 = __pa(current->mm->pgd);
32844 +#endif
32845 +
32846 /*
32847 * Set up the Guest's page tables to see this CPU's pages (and no
32848 * other CPU's pages).
32849 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32850 * compiled-in switcher code and the high-mapped copy we just made.
32851 */
32852 for (i = 0; i < IDT_ENTRIES; i++)
32853 - default_idt_entries[i] += switcher_offset();
32854 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32855
32856 /*
32857 * Set up the Switcher's per-cpu areas.
32858 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32859 * it will be undisturbed when we switch. To change %cs and jump we
32860 * need this structure to feed to Intel's "lcall" instruction.
32861 */
32862 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32863 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32864 lguest_entry.segment = LGUEST_CS;
32865
32866 /*
32867 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32868 index 40634b0..4f5855e 100644
32869 --- a/drivers/lguest/x86/switcher_32.S
32870 +++ b/drivers/lguest/x86/switcher_32.S
32871 @@ -87,6 +87,7 @@
32872 #include <asm/page.h>
32873 #include <asm/segment.h>
32874 #include <asm/lguest.h>
32875 +#include <asm/processor-flags.h>
32876
32877 // We mark the start of the code to copy
32878 // It's placed in .text tho it's never run here
32879 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32880 // Changes type when we load it: damn Intel!
32881 // For after we switch over our page tables
32882 // That entry will be read-only: we'd crash.
32883 +
32884 +#ifdef CONFIG_PAX_KERNEXEC
32885 + mov %cr0, %edx
32886 + xor $X86_CR0_WP, %edx
32887 + mov %edx, %cr0
32888 +#endif
32889 +
32890 movl $(GDT_ENTRY_TSS*8), %edx
32891 ltr %dx
32892
32893 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32894 // Let's clear it again for our return.
32895 // The GDT descriptor of the Host
32896 // Points to the table after two "size" bytes
32897 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32898 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32899 // Clear "used" from type field (byte 5, bit 2)
32900 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32901 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32902 +
32903 +#ifdef CONFIG_PAX_KERNEXEC
32904 + mov %cr0, %eax
32905 + xor $X86_CR0_WP, %eax
32906 + mov %eax, %cr0
32907 +#endif
32908
32909 // Once our page table's switched, the Guest is live!
32910 // The Host fades as we run this final step.
32911 @@ -295,13 +309,12 @@ deliver_to_host:
32912 // I consulted gcc, and it gave
32913 // These instructions, which I gladly credit:
32914 leal (%edx,%ebx,8), %eax
32915 - movzwl (%eax),%edx
32916 - movl 4(%eax), %eax
32917 - xorw %ax, %ax
32918 - orl %eax, %edx
32919 + movl 4(%eax), %edx
32920 + movw (%eax), %dx
32921 // Now the address of the handler's in %edx
32922 // We call it now: its "iret" drops us home.
32923 - jmp *%edx
32924 + ljmp $__KERNEL_CS, $1f
32925 +1: jmp *%edx
32926
32927 // Every interrupt can come to us here
32928 // But we must truly tell each apart.
32929 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
32930 index 20e5c2c..9e849a9 100644
32931 --- a/drivers/macintosh/macio_asic.c
32932 +++ b/drivers/macintosh/macio_asic.c
32933 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
32934 * MacIO is matched against any Apple ID, it's probe() function
32935 * will then decide wether it applies or not
32936 */
32937 -static const struct pci_device_id __devinitdata pci_ids [] = { {
32938 +static const struct pci_device_id __devinitconst pci_ids [] = { {
32939 .vendor = PCI_VENDOR_ID_APPLE,
32940 .device = PCI_ANY_ID,
32941 .subvendor = PCI_ANY_ID,
32942 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
32943 index 17e2b47..bcbeec4 100644
32944 --- a/drivers/md/bitmap.c
32945 +++ b/drivers/md/bitmap.c
32946 @@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
32947 chunk_kb ? "KB" : "B");
32948 if (bitmap->file) {
32949 seq_printf(seq, ", file: ");
32950 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32951 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32952 }
32953
32954 seq_printf(seq, "\n");
32955 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
32956 index a1a3e6d..1918bfc 100644
32957 --- a/drivers/md/dm-ioctl.c
32958 +++ b/drivers/md/dm-ioctl.c
32959 @@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
32960 cmd == DM_LIST_VERSIONS_CMD)
32961 return 0;
32962
32963 - if ((cmd == DM_DEV_CREATE_CMD)) {
32964 + if (cmd == DM_DEV_CREATE_CMD) {
32965 if (!*param->name) {
32966 DMWARN("name not supplied when creating device");
32967 return -EINVAL;
32968 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
32969 index d039de8..0cf5b87 100644
32970 --- a/drivers/md/dm-raid1.c
32971 +++ b/drivers/md/dm-raid1.c
32972 @@ -40,7 +40,7 @@ enum dm_raid1_error {
32973
32974 struct mirror {
32975 struct mirror_set *ms;
32976 - atomic_t error_count;
32977 + atomic_unchecked_t error_count;
32978 unsigned long error_type;
32979 struct dm_dev *dev;
32980 sector_t offset;
32981 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
32982 struct mirror *m;
32983
32984 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
32985 - if (!atomic_read(&m->error_count))
32986 + if (!atomic_read_unchecked(&m->error_count))
32987 return m;
32988
32989 return NULL;
32990 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
32991 * simple way to tell if a device has encountered
32992 * errors.
32993 */
32994 - atomic_inc(&m->error_count);
32995 + atomic_inc_unchecked(&m->error_count);
32996
32997 if (test_and_set_bit(error_type, &m->error_type))
32998 return;
32999 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33000 struct mirror *m = get_default_mirror(ms);
33001
33002 do {
33003 - if (likely(!atomic_read(&m->error_count)))
33004 + if (likely(!atomic_read_unchecked(&m->error_count)))
33005 return m;
33006
33007 if (m-- == ms->mirror)
33008 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33009 {
33010 struct mirror *default_mirror = get_default_mirror(m->ms);
33011
33012 - return !atomic_read(&default_mirror->error_count);
33013 + return !atomic_read_unchecked(&default_mirror->error_count);
33014 }
33015
33016 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33017 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33018 */
33019 if (likely(region_in_sync(ms, region, 1)))
33020 m = choose_mirror(ms, bio->bi_sector);
33021 - else if (m && atomic_read(&m->error_count))
33022 + else if (m && atomic_read_unchecked(&m->error_count))
33023 m = NULL;
33024
33025 if (likely(m))
33026 @@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33027 }
33028
33029 ms->mirror[mirror].ms = ms;
33030 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33031 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33032 ms->mirror[mirror].error_type = 0;
33033 ms->mirror[mirror].offset = offset;
33034
33035 @@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33036 */
33037 static char device_status_char(struct mirror *m)
33038 {
33039 - if (!atomic_read(&(m->error_count)))
33040 + if (!atomic_read_unchecked(&(m->error_count)))
33041 return 'A';
33042
33043 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33044 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33045 index 35c94ff..20d4c17 100644
33046 --- a/drivers/md/dm-stripe.c
33047 +++ b/drivers/md/dm-stripe.c
33048 @@ -20,7 +20,7 @@ struct stripe {
33049 struct dm_dev *dev;
33050 sector_t physical_start;
33051
33052 - atomic_t error_count;
33053 + atomic_unchecked_t error_count;
33054 };
33055
33056 struct stripe_c {
33057 @@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33058 kfree(sc);
33059 return r;
33060 }
33061 - atomic_set(&(sc->stripe[i].error_count), 0);
33062 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33063 }
33064
33065 ti->private = sc;
33066 @@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33067 DMEMIT("%d ", sc->stripes);
33068 for (i = 0; i < sc->stripes; i++) {
33069 DMEMIT("%s ", sc->stripe[i].dev->name);
33070 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33071 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33072 'D' : 'A';
33073 }
33074 buffer[i] = '\0';
33075 @@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33076 */
33077 for (i = 0; i < sc->stripes; i++)
33078 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33079 - atomic_inc(&(sc->stripe[i].error_count));
33080 - if (atomic_read(&(sc->stripe[i].error_count)) <
33081 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33082 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33083 DM_IO_ERROR_THRESHOLD)
33084 schedule_work(&sc->trigger_event);
33085 }
33086 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33087 index 2e227fb..44ead1f 100644
33088 --- a/drivers/md/dm-table.c
33089 +++ b/drivers/md/dm-table.c
33090 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33091 if (!dev_size)
33092 return 0;
33093
33094 - if ((start >= dev_size) || (start + len > dev_size)) {
33095 + if ((start >= dev_size) || (len > dev_size - start)) {
33096 DMWARN("%s: %s too small for target: "
33097 "start=%llu, len=%llu, dev_size=%llu",
33098 dm_device_name(ti->table->md), bdevname(bdev, b),
33099 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33100 index 737d388..811ad5a 100644
33101 --- a/drivers/md/dm-thin-metadata.c
33102 +++ b/drivers/md/dm-thin-metadata.c
33103 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33104
33105 pmd->info.tm = tm;
33106 pmd->info.levels = 2;
33107 - pmd->info.value_type.context = pmd->data_sm;
33108 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33109 pmd->info.value_type.size = sizeof(__le64);
33110 pmd->info.value_type.inc = data_block_inc;
33111 pmd->info.value_type.dec = data_block_dec;
33112 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33113
33114 pmd->bl_info.tm = tm;
33115 pmd->bl_info.levels = 1;
33116 - pmd->bl_info.value_type.context = pmd->data_sm;
33117 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33118 pmd->bl_info.value_type.size = sizeof(__le64);
33119 pmd->bl_info.value_type.inc = data_block_inc;
33120 pmd->bl_info.value_type.dec = data_block_dec;
33121 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33122 index e24143c..ce2f21a1 100644
33123 --- a/drivers/md/dm.c
33124 +++ b/drivers/md/dm.c
33125 @@ -176,9 +176,9 @@ struct mapped_device {
33126 /*
33127 * Event handling.
33128 */
33129 - atomic_t event_nr;
33130 + atomic_unchecked_t event_nr;
33131 wait_queue_head_t eventq;
33132 - atomic_t uevent_seq;
33133 + atomic_unchecked_t uevent_seq;
33134 struct list_head uevent_list;
33135 spinlock_t uevent_lock; /* Protect access to uevent_list */
33136
33137 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33138 rwlock_init(&md->map_lock);
33139 atomic_set(&md->holders, 1);
33140 atomic_set(&md->open_count, 0);
33141 - atomic_set(&md->event_nr, 0);
33142 - atomic_set(&md->uevent_seq, 0);
33143 + atomic_set_unchecked(&md->event_nr, 0);
33144 + atomic_set_unchecked(&md->uevent_seq, 0);
33145 INIT_LIST_HEAD(&md->uevent_list);
33146 spin_lock_init(&md->uevent_lock);
33147
33148 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33149
33150 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33151
33152 - atomic_inc(&md->event_nr);
33153 + atomic_inc_unchecked(&md->event_nr);
33154 wake_up(&md->eventq);
33155 }
33156
33157 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33158
33159 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33160 {
33161 - return atomic_add_return(1, &md->uevent_seq);
33162 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33163 }
33164
33165 uint32_t dm_get_event_nr(struct mapped_device *md)
33166 {
33167 - return atomic_read(&md->event_nr);
33168 + return atomic_read_unchecked(&md->event_nr);
33169 }
33170
33171 int dm_wait_event(struct mapped_device *md, int event_nr)
33172 {
33173 return wait_event_interruptible(md->eventq,
33174 - (event_nr != atomic_read(&md->event_nr)));
33175 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33176 }
33177
33178 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33179 diff --git a/drivers/md/md.c b/drivers/md/md.c
33180 index 2b30ffd..bf789ce 100644
33181 --- a/drivers/md/md.c
33182 +++ b/drivers/md/md.c
33183 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33184 * start build, activate spare
33185 */
33186 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33187 -static atomic_t md_event_count;
33188 +static atomic_unchecked_t md_event_count;
33189 void md_new_event(struct mddev *mddev)
33190 {
33191 - atomic_inc(&md_event_count);
33192 + atomic_inc_unchecked(&md_event_count);
33193 wake_up(&md_event_waiters);
33194 }
33195 EXPORT_SYMBOL_GPL(md_new_event);
33196 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33197 */
33198 static void md_new_event_inintr(struct mddev *mddev)
33199 {
33200 - atomic_inc(&md_event_count);
33201 + atomic_inc_unchecked(&md_event_count);
33202 wake_up(&md_event_waiters);
33203 }
33204
33205 @@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33206
33207 rdev->preferred_minor = 0xffff;
33208 rdev->data_offset = le64_to_cpu(sb->data_offset);
33209 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33210 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33211
33212 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33213 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33214 @@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33215 else
33216 sb->resync_offset = cpu_to_le64(0);
33217
33218 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33219 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33220
33221 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33222 sb->size = cpu_to_le64(mddev->dev_sectors);
33223 @@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33224 static ssize_t
33225 errors_show(struct md_rdev *rdev, char *page)
33226 {
33227 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33228 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33229 }
33230
33231 static ssize_t
33232 @@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33233 char *e;
33234 unsigned long n = simple_strtoul(buf, &e, 10);
33235 if (*buf && (*e == 0 || *e == '\n')) {
33236 - atomic_set(&rdev->corrected_errors, n);
33237 + atomic_set_unchecked(&rdev->corrected_errors, n);
33238 return len;
33239 }
33240 return -EINVAL;
33241 @@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
33242 rdev->sb_loaded = 0;
33243 rdev->bb_page = NULL;
33244 atomic_set(&rdev->nr_pending, 0);
33245 - atomic_set(&rdev->read_errors, 0);
33246 - atomic_set(&rdev->corrected_errors, 0);
33247 + atomic_set_unchecked(&rdev->read_errors, 0);
33248 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33249
33250 INIT_LIST_HEAD(&rdev->same_set);
33251 init_waitqueue_head(&rdev->blocked_wait);
33252 @@ -6738,7 +6738,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33253
33254 spin_unlock(&pers_lock);
33255 seq_printf(seq, "\n");
33256 - seq->poll_event = atomic_read(&md_event_count);
33257 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33258 return 0;
33259 }
33260 if (v == (void*)2) {
33261 @@ -6841,7 +6841,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33262 return error;
33263
33264 seq = file->private_data;
33265 - seq->poll_event = atomic_read(&md_event_count);
33266 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33267 return error;
33268 }
33269
33270 @@ -6855,7 +6855,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33271 /* always allow read */
33272 mask = POLLIN | POLLRDNORM;
33273
33274 - if (seq->poll_event != atomic_read(&md_event_count))
33275 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33276 mask |= POLLERR | POLLPRI;
33277 return mask;
33278 }
33279 @@ -6899,7 +6899,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33280 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33281 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33282 (int)part_stat_read(&disk->part0, sectors[1]) -
33283 - atomic_read(&disk->sync_io);
33284 + atomic_read_unchecked(&disk->sync_io);
33285 /* sync IO will cause sync_io to increase before the disk_stats
33286 * as sync_io is counted when a request starts, and
33287 * disk_stats is counted when it completes.
33288 diff --git a/drivers/md/md.h b/drivers/md/md.h
33289 index 1c2063c..9639970 100644
33290 --- a/drivers/md/md.h
33291 +++ b/drivers/md/md.h
33292 @@ -93,13 +93,13 @@ struct md_rdev {
33293 * only maintained for arrays that
33294 * support hot removal
33295 */
33296 - atomic_t read_errors; /* number of consecutive read errors that
33297 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33298 * we have tried to ignore.
33299 */
33300 struct timespec last_read_error; /* monotonic time since our
33301 * last read error
33302 */
33303 - atomic_t corrected_errors; /* number of corrected read errors,
33304 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33305 * for reporting to userspace and storing
33306 * in superblock.
33307 */
33308 @@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33309
33310 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33311 {
33312 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33313 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33314 }
33315
33316 struct md_personality
33317 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33318 index 50ed53b..4f29d7d 100644
33319 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33320 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33321 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
33322 /*----------------------------------------------------------------*/
33323
33324 struct sm_checker {
33325 - struct dm_space_map sm;
33326 + dm_space_map_no_const sm;
33327
33328 struct count_array old_counts;
33329 struct count_array counts;
33330 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33331 index fc469ba..2d91555 100644
33332 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33333 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33334 @@ -23,7 +23,7 @@
33335 * Space map interface.
33336 */
33337 struct sm_disk {
33338 - struct dm_space_map sm;
33339 + dm_space_map_no_const sm;
33340
33341 struct ll_disk ll;
33342 struct ll_disk old_ll;
33343 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33344 index e89ae5e..062e4c2 100644
33345 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33346 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33347 @@ -43,7 +43,7 @@ struct block_op {
33348 };
33349
33350 struct sm_metadata {
33351 - struct dm_space_map sm;
33352 + dm_space_map_no_const sm;
33353
33354 struct ll_disk ll;
33355 struct ll_disk old_ll;
33356 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33357 index 1cbfc6b..56e1dbb 100644
33358 --- a/drivers/md/persistent-data/dm-space-map.h
33359 +++ b/drivers/md/persistent-data/dm-space-map.h
33360 @@ -60,6 +60,7 @@ struct dm_space_map {
33361 int (*root_size)(struct dm_space_map *sm, size_t *result);
33362 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33363 };
33364 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33365
33366 /*----------------------------------------------------------------*/
33367
33368 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33369 index d7e9577..faa512f2 100644
33370 --- a/drivers/md/raid1.c
33371 +++ b/drivers/md/raid1.c
33372 @@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33373 if (r1_sync_page_io(rdev, sect, s,
33374 bio->bi_io_vec[idx].bv_page,
33375 READ) != 0)
33376 - atomic_add(s, &rdev->corrected_errors);
33377 + atomic_add_unchecked(s, &rdev->corrected_errors);
33378 }
33379 sectors -= s;
33380 sect += s;
33381 @@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33382 test_bit(In_sync, &rdev->flags)) {
33383 if (r1_sync_page_io(rdev, sect, s,
33384 conf->tmppage, READ)) {
33385 - atomic_add(s, &rdev->corrected_errors);
33386 + atomic_add_unchecked(s, &rdev->corrected_errors);
33387 printk(KERN_INFO
33388 "md/raid1:%s: read error corrected "
33389 "(%d sectors at %llu on %s)\n",
33390 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33391 index d037adb..ed17dc9 100644
33392 --- a/drivers/md/raid10.c
33393 +++ b/drivers/md/raid10.c
33394 @@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bio, int error)
33395 /* The write handler will notice the lack of
33396 * R10BIO_Uptodate and record any errors etc
33397 */
33398 - atomic_add(r10_bio->sectors,
33399 + atomic_add_unchecked(r10_bio->sectors,
33400 &conf->mirrors[d].rdev->corrected_errors);
33401
33402 /* for reconstruct, we always reschedule after a read.
33403 @@ -2033,7 +2033,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33404 {
33405 struct timespec cur_time_mon;
33406 unsigned long hours_since_last;
33407 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33408 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33409
33410 ktime_get_ts(&cur_time_mon);
33411
33412 @@ -2055,9 +2055,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33413 * overflowing the shift of read_errors by hours_since_last.
33414 */
33415 if (hours_since_last >= 8 * sizeof(read_errors))
33416 - atomic_set(&rdev->read_errors, 0);
33417 + atomic_set_unchecked(&rdev->read_errors, 0);
33418 else
33419 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33420 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33421 }
33422
33423 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33424 @@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33425 return;
33426
33427 check_decay_read_errors(mddev, rdev);
33428 - atomic_inc(&rdev->read_errors);
33429 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33430 + atomic_inc_unchecked(&rdev->read_errors);
33431 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33432 char b[BDEVNAME_SIZE];
33433 bdevname(rdev->bdev, b);
33434
33435 @@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33436 "md/raid10:%s: %s: Raid device exceeded "
33437 "read_error threshold [cur %d:max %d]\n",
33438 mdname(mddev), b,
33439 - atomic_read(&rdev->read_errors), max_read_errors);
33440 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33441 printk(KERN_NOTICE
33442 "md/raid10:%s: %s: Failing raid device\n",
33443 mdname(mddev), b);
33444 @@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33445 (unsigned long long)(
33446 sect + rdev->data_offset),
33447 bdevname(rdev->bdev, b));
33448 - atomic_add(s, &rdev->corrected_errors);
33449 + atomic_add_unchecked(s, &rdev->corrected_errors);
33450 }
33451
33452 rdev_dec_pending(rdev, mddev);
33453 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33454 index f351422..85c01bb 100644
33455 --- a/drivers/md/raid5.c
33456 +++ b/drivers/md/raid5.c
33457 @@ -1686,18 +1686,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33458 (unsigned long long)(sh->sector
33459 + rdev->data_offset),
33460 bdevname(rdev->bdev, b));
33461 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33462 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33463 clear_bit(R5_ReadError, &sh->dev[i].flags);
33464 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33465 }
33466 - if (atomic_read(&rdev->read_errors))
33467 - atomic_set(&rdev->read_errors, 0);
33468 + if (atomic_read_unchecked(&rdev->read_errors))
33469 + atomic_set_unchecked(&rdev->read_errors, 0);
33470 } else {
33471 const char *bdn = bdevname(rdev->bdev, b);
33472 int retry = 0;
33473
33474 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33475 - atomic_inc(&rdev->read_errors);
33476 + atomic_inc_unchecked(&rdev->read_errors);
33477 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33478 printk_ratelimited(
33479 KERN_WARNING
33480 @@ -1726,7 +1726,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33481 (unsigned long long)(sh->sector
33482 + rdev->data_offset),
33483 bdn);
33484 - else if (atomic_read(&rdev->read_errors)
33485 + else if (atomic_read_unchecked(&rdev->read_errors)
33486 > conf->max_nr_stripes)
33487 printk(KERN_WARNING
33488 "md/raid:%s: Too many read errors, failing device %s.\n",
33489 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33490 index d88c4aa..17c80b1 100644
33491 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33492 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33493 @@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
33494 .subvendor = _subvend, .subdevice = _subdev, \
33495 .driver_data = (unsigned long)&_driverdata }
33496
33497 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33498 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33499 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33500 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33501 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33502 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33503 index a7d876f..8c21b61 100644
33504 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33505 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33506 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33507 union {
33508 dmx_ts_cb ts;
33509 dmx_section_cb sec;
33510 - } cb;
33511 + } __no_const cb;
33512
33513 struct dvb_demux *demux;
33514 void *priv;
33515 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33516 index 00a6732..70a682e 100644
33517 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33518 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33519 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33520 const struct dvb_device *template, void *priv, int type)
33521 {
33522 struct dvb_device *dvbdev;
33523 - struct file_operations *dvbdevfops;
33524 + file_operations_no_const *dvbdevfops;
33525 struct device *clsdev;
33526 int minor;
33527 int id;
33528 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33529 index 3940bb0..fb3952a 100644
33530 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33531 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33532 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33533
33534 struct dib0700_adapter_state {
33535 int (*set_param_save) (struct dvb_frontend *);
33536 -};
33537 +} __no_const;
33538
33539 static int dib7070_set_param_override(struct dvb_frontend *fe)
33540 {
33541 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33542 index 451c5a7..649f711 100644
33543 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33544 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33545 @@ -95,7 +95,7 @@ struct su3000_state {
33546
33547 struct s6x0_state {
33548 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33549 -};
33550 +} __no_const;
33551
33552 /* debug */
33553 static int dvb_usb_dw2102_debug;
33554 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33555 index 404f63a..4796533 100644
33556 --- a/drivers/media/dvb/frontends/dib3000.h
33557 +++ b/drivers/media/dvb/frontends/dib3000.h
33558 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33559 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33560 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33561 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33562 -};
33563 +} __no_const;
33564
33565 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33566 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33567 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33568 index 7539a5d..06531a6 100644
33569 --- a/drivers/media/dvb/ngene/ngene-cards.c
33570 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33571 @@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33572
33573 /****************************************************************************/
33574
33575 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33576 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33577 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33578 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33579 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33580 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33581 index 16a089f..1661b11 100644
33582 --- a/drivers/media/radio/radio-cadet.c
33583 +++ b/drivers/media/radio/radio-cadet.c
33584 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33585 unsigned char readbuf[RDS_BUFFER];
33586 int i = 0;
33587
33588 + if (count > RDS_BUFFER)
33589 + return -EFAULT;
33590 mutex_lock(&dev->lock);
33591 if (dev->rdsstat == 0) {
33592 dev->rdsstat = 1;
33593 @@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33594 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33595 mutex_unlock(&dev->lock);
33596
33597 - if (copy_to_user(data, readbuf, i))
33598 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33599 return -EFAULT;
33600 return i;
33601 }
33602 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33603 index 9cde353..8c6a1c3 100644
33604 --- a/drivers/media/video/au0828/au0828.h
33605 +++ b/drivers/media/video/au0828/au0828.h
33606 @@ -191,7 +191,7 @@ struct au0828_dev {
33607
33608 /* I2C */
33609 struct i2c_adapter i2c_adap;
33610 - struct i2c_algorithm i2c_algo;
33611 + i2c_algorithm_no_const i2c_algo;
33612 struct i2c_client i2c_client;
33613 u32 i2c_rc;
33614
33615 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33616 index 04bf662..e0ac026 100644
33617 --- a/drivers/media/video/cx88/cx88-alsa.c
33618 +++ b/drivers/media/video/cx88/cx88-alsa.c
33619 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33620 * Only boards with eeprom and byte 1 at eeprom=1 have it
33621 */
33622
33623 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33624 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33625 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33626 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33627 {0, }
33628 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33629 index 88cf9d9..bbc4b2c 100644
33630 --- a/drivers/media/video/omap/omap_vout.c
33631 +++ b/drivers/media/video/omap/omap_vout.c
33632 @@ -64,7 +64,6 @@ enum omap_vout_channels {
33633 OMAP_VIDEO2,
33634 };
33635
33636 -static struct videobuf_queue_ops video_vbq_ops;
33637 /* Variables configurable through module params*/
33638 static u32 video1_numbuffers = 3;
33639 static u32 video2_numbuffers = 3;
33640 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33641 {
33642 struct videobuf_queue *q;
33643 struct omap_vout_device *vout = NULL;
33644 + static struct videobuf_queue_ops video_vbq_ops = {
33645 + .buf_setup = omap_vout_buffer_setup,
33646 + .buf_prepare = omap_vout_buffer_prepare,
33647 + .buf_release = omap_vout_buffer_release,
33648 + .buf_queue = omap_vout_buffer_queue,
33649 + };
33650
33651 vout = video_drvdata(file);
33652 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33653 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33654 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33655
33656 q = &vout->vbq;
33657 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33658 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33659 - video_vbq_ops.buf_release = omap_vout_buffer_release;
33660 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33661 spin_lock_init(&vout->vbq_lock);
33662
33663 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33664 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33665 index 305e6aa..0143317 100644
33666 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33667 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33668 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33669
33670 /* I2C stuff */
33671 struct i2c_adapter i2c_adap;
33672 - struct i2c_algorithm i2c_algo;
33673 + i2c_algorithm_no_const i2c_algo;
33674 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33675 int i2c_cx25840_hack_state;
33676 int i2c_linked;
33677 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33678 index 02194c0..091733b 100644
33679 --- a/drivers/media/video/timblogiw.c
33680 +++ b/drivers/media/video/timblogiw.c
33681 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33682
33683 /* Platform device functions */
33684
33685 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33686 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33687 .vidioc_querycap = timblogiw_querycap,
33688 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33689 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33690 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33691 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33692 };
33693
33694 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33695 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33696 .owner = THIS_MODULE,
33697 .open = timblogiw_open,
33698 .release = timblogiw_close,
33699 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33700 index a5c591f..db692a3 100644
33701 --- a/drivers/message/fusion/mptbase.c
33702 +++ b/drivers/message/fusion/mptbase.c
33703 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33704 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33705 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33706
33707 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33708 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33709 +#else
33710 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33711 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33712 +#endif
33713 +
33714 /*
33715 * Rounding UP to nearest 4-kB boundary here...
33716 */
33717 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33718 index 551262e..7551198 100644
33719 --- a/drivers/message/fusion/mptsas.c
33720 +++ b/drivers/message/fusion/mptsas.c
33721 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33722 return 0;
33723 }
33724
33725 +static inline void
33726 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33727 +{
33728 + if (phy_info->port_details) {
33729 + phy_info->port_details->rphy = rphy;
33730 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33731 + ioc->name, rphy));
33732 + }
33733 +
33734 + if (rphy) {
33735 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33736 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33737 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33738 + ioc->name, rphy, rphy->dev.release));
33739 + }
33740 +}
33741 +
33742 /* no mutex */
33743 static void
33744 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33745 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33746 return NULL;
33747 }
33748
33749 -static inline void
33750 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33751 -{
33752 - if (phy_info->port_details) {
33753 - phy_info->port_details->rphy = rphy;
33754 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33755 - ioc->name, rphy));
33756 - }
33757 -
33758 - if (rphy) {
33759 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33760 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33761 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33762 - ioc->name, rphy, rphy->dev.release));
33763 - }
33764 -}
33765 -
33766 static inline struct sas_port *
33767 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33768 {
33769 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33770 index 0c3ced7..1fe34ec 100644
33771 --- a/drivers/message/fusion/mptscsih.c
33772 +++ b/drivers/message/fusion/mptscsih.c
33773 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33774
33775 h = shost_priv(SChost);
33776
33777 - if (h) {
33778 - if (h->info_kbuf == NULL)
33779 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33780 - return h->info_kbuf;
33781 - h->info_kbuf[0] = '\0';
33782 + if (!h)
33783 + return NULL;
33784
33785 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33786 - h->info_kbuf[size-1] = '\0';
33787 - }
33788 + if (h->info_kbuf == NULL)
33789 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33790 + return h->info_kbuf;
33791 + h->info_kbuf[0] = '\0';
33792 +
33793 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33794 + h->info_kbuf[size-1] = '\0';
33795
33796 return h->info_kbuf;
33797 }
33798 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33799 index 6d115c7..58ff7fd 100644
33800 --- a/drivers/message/i2o/i2o_proc.c
33801 +++ b/drivers/message/i2o/i2o_proc.c
33802 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33803 "Array Controller Device"
33804 };
33805
33806 -static char *chtostr(u8 * chars, int n)
33807 -{
33808 - char tmp[256];
33809 - tmp[0] = 0;
33810 - return strncat(tmp, (char *)chars, n);
33811 -}
33812 -
33813 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33814 char *group)
33815 {
33816 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33817
33818 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33819 seq_printf(seq, "%-#8x", ddm_table.module_id);
33820 - seq_printf(seq, "%-29s",
33821 - chtostr(ddm_table.module_name_version, 28));
33822 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33823 seq_printf(seq, "%9d ", ddm_table.data_size);
33824 seq_printf(seq, "%8d", ddm_table.code_size);
33825
33826 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33827
33828 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33829 seq_printf(seq, "%-#8x", dst->module_id);
33830 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33831 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33832 + seq_printf(seq, "%-.28s", dst->module_name_version);
33833 + seq_printf(seq, "%-.8s", dst->date);
33834 seq_printf(seq, "%8d ", dst->module_size);
33835 seq_printf(seq, "%8d ", dst->mpb_size);
33836 seq_printf(seq, "0x%04x", dst->module_flags);
33837 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33838 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33839 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33840 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33841 - seq_printf(seq, "Vendor info : %s\n",
33842 - chtostr((u8 *) (work32 + 2), 16));
33843 - seq_printf(seq, "Product info : %s\n",
33844 - chtostr((u8 *) (work32 + 6), 16));
33845 - seq_printf(seq, "Description : %s\n",
33846 - chtostr((u8 *) (work32 + 10), 16));
33847 - seq_printf(seq, "Product rev. : %s\n",
33848 - chtostr((u8 *) (work32 + 14), 8));
33849 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33850 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33851 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33852 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33853
33854 seq_printf(seq, "Serial number : ");
33855 print_serial_number(seq, (u8 *) (work32 + 16),
33856 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33857 }
33858
33859 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33860 - seq_printf(seq, "Module name : %s\n",
33861 - chtostr(result.module_name, 24));
33862 - seq_printf(seq, "Module revision : %s\n",
33863 - chtostr(result.module_rev, 8));
33864 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
33865 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33866
33867 seq_printf(seq, "Serial number : ");
33868 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33869 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33870 return 0;
33871 }
33872
33873 - seq_printf(seq, "Device name : %s\n",
33874 - chtostr(result.device_name, 64));
33875 - seq_printf(seq, "Service name : %s\n",
33876 - chtostr(result.service_name, 64));
33877 - seq_printf(seq, "Physical name : %s\n",
33878 - chtostr(result.physical_location, 64));
33879 - seq_printf(seq, "Instance number : %s\n",
33880 - chtostr(result.instance_number, 4));
33881 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
33882 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
33883 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33884 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33885
33886 return 0;
33887 }
33888 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33889 index a8c08f3..155fe3d 100644
33890 --- a/drivers/message/i2o/iop.c
33891 +++ b/drivers/message/i2o/iop.c
33892 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33893
33894 spin_lock_irqsave(&c->context_list_lock, flags);
33895
33896 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33897 - atomic_inc(&c->context_list_counter);
33898 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33899 + atomic_inc_unchecked(&c->context_list_counter);
33900
33901 - entry->context = atomic_read(&c->context_list_counter);
33902 + entry->context = atomic_read_unchecked(&c->context_list_counter);
33903
33904 list_add(&entry->list, &c->context_list);
33905
33906 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
33907
33908 #if BITS_PER_LONG == 64
33909 spin_lock_init(&c->context_list_lock);
33910 - atomic_set(&c->context_list_counter, 0);
33911 + atomic_set_unchecked(&c->context_list_counter, 0);
33912 INIT_LIST_HEAD(&c->context_list);
33913 #endif
33914
33915 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
33916 index 7ce65f4..e66e9bc 100644
33917 --- a/drivers/mfd/abx500-core.c
33918 +++ b/drivers/mfd/abx500-core.c
33919 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
33920
33921 struct abx500_device_entry {
33922 struct list_head list;
33923 - struct abx500_ops ops;
33924 + abx500_ops_no_const ops;
33925 struct device *dev;
33926 };
33927
33928 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
33929 index a9223ed..4127b13 100644
33930 --- a/drivers/mfd/janz-cmodio.c
33931 +++ b/drivers/mfd/janz-cmodio.c
33932 @@ -13,6 +13,7 @@
33933
33934 #include <linux/kernel.h>
33935 #include <linux/module.h>
33936 +#include <linux/slab.h>
33937 #include <linux/init.h>
33938 #include <linux/pci.h>
33939 #include <linux/interrupt.h>
33940 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
33941 index a981e2a..5ca0c8b 100644
33942 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
33943 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
33944 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
33945 * the lid is closed. This leads to interrupts as soon as a little move
33946 * is done.
33947 */
33948 - atomic_inc(&lis3->count);
33949 + atomic_inc_unchecked(&lis3->count);
33950
33951 wake_up_interruptible(&lis3->misc_wait);
33952 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
33953 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33954 if (lis3->pm_dev)
33955 pm_runtime_get_sync(lis3->pm_dev);
33956
33957 - atomic_set(&lis3->count, 0);
33958 + atomic_set_unchecked(&lis3->count, 0);
33959 return 0;
33960 }
33961
33962 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33963 add_wait_queue(&lis3->misc_wait, &wait);
33964 while (true) {
33965 set_current_state(TASK_INTERRUPTIBLE);
33966 - data = atomic_xchg(&lis3->count, 0);
33967 + data = atomic_xchg_unchecked(&lis3->count, 0);
33968 if (data)
33969 break;
33970
33971 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33972 struct lis3lv02d, miscdev);
33973
33974 poll_wait(file, &lis3->misc_wait, wait);
33975 - if (atomic_read(&lis3->count))
33976 + if (atomic_read_unchecked(&lis3->count))
33977 return POLLIN | POLLRDNORM;
33978 return 0;
33979 }
33980 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
33981 index 2b1482a..5d33616 100644
33982 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
33983 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
33984 @@ -266,7 +266,7 @@ struct lis3lv02d {
33985 struct input_polled_dev *idev; /* input device */
33986 struct platform_device *pdev; /* platform device */
33987 struct regulator_bulk_data regulators[2];
33988 - atomic_t count; /* interrupt count after last read */
33989 + atomic_unchecked_t count; /* interrupt count after last read */
33990 union axis_conversion ac; /* hw -> logical axis */
33991 int mapped_btns[3];
33992
33993 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
33994 index 2f30bad..c4c13d0 100644
33995 --- a/drivers/misc/sgi-gru/gruhandles.c
33996 +++ b/drivers/misc/sgi-gru/gruhandles.c
33997 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33998 unsigned long nsec;
33999
34000 nsec = CLKS2NSEC(clks);
34001 - atomic_long_inc(&mcs_op_statistics[op].count);
34002 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34003 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34004 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34005 if (mcs_op_statistics[op].max < nsec)
34006 mcs_op_statistics[op].max = nsec;
34007 }
34008 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34009 index 950dbe9..eeef0f8 100644
34010 --- a/drivers/misc/sgi-gru/gruprocfs.c
34011 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34012 @@ -32,9 +32,9 @@
34013
34014 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34015
34016 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34017 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34018 {
34019 - unsigned long val = atomic_long_read(v);
34020 + unsigned long val = atomic_long_read_unchecked(v);
34021
34022 seq_printf(s, "%16lu %s\n", val, id);
34023 }
34024 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34025
34026 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34027 for (op = 0; op < mcsop_last; op++) {
34028 - count = atomic_long_read(&mcs_op_statistics[op].count);
34029 - total = atomic_long_read(&mcs_op_statistics[op].total);
34030 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34031 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34032 max = mcs_op_statistics[op].max;
34033 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34034 count ? total / count : 0, max);
34035 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34036 index 5c3ce24..4915ccb 100644
34037 --- a/drivers/misc/sgi-gru/grutables.h
34038 +++ b/drivers/misc/sgi-gru/grutables.h
34039 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34040 * GRU statistics.
34041 */
34042 struct gru_stats_s {
34043 - atomic_long_t vdata_alloc;
34044 - atomic_long_t vdata_free;
34045 - atomic_long_t gts_alloc;
34046 - atomic_long_t gts_free;
34047 - atomic_long_t gms_alloc;
34048 - atomic_long_t gms_free;
34049 - atomic_long_t gts_double_allocate;
34050 - atomic_long_t assign_context;
34051 - atomic_long_t assign_context_failed;
34052 - atomic_long_t free_context;
34053 - atomic_long_t load_user_context;
34054 - atomic_long_t load_kernel_context;
34055 - atomic_long_t lock_kernel_context;
34056 - atomic_long_t unlock_kernel_context;
34057 - atomic_long_t steal_user_context;
34058 - atomic_long_t steal_kernel_context;
34059 - atomic_long_t steal_context_failed;
34060 - atomic_long_t nopfn;
34061 - atomic_long_t asid_new;
34062 - atomic_long_t asid_next;
34063 - atomic_long_t asid_wrap;
34064 - atomic_long_t asid_reuse;
34065 - atomic_long_t intr;
34066 - atomic_long_t intr_cbr;
34067 - atomic_long_t intr_tfh;
34068 - atomic_long_t intr_spurious;
34069 - atomic_long_t intr_mm_lock_failed;
34070 - atomic_long_t call_os;
34071 - atomic_long_t call_os_wait_queue;
34072 - atomic_long_t user_flush_tlb;
34073 - atomic_long_t user_unload_context;
34074 - atomic_long_t user_exception;
34075 - atomic_long_t set_context_option;
34076 - atomic_long_t check_context_retarget_intr;
34077 - atomic_long_t check_context_unload;
34078 - atomic_long_t tlb_dropin;
34079 - atomic_long_t tlb_preload_page;
34080 - atomic_long_t tlb_dropin_fail_no_asid;
34081 - atomic_long_t tlb_dropin_fail_upm;
34082 - atomic_long_t tlb_dropin_fail_invalid;
34083 - atomic_long_t tlb_dropin_fail_range_active;
34084 - atomic_long_t tlb_dropin_fail_idle;
34085 - atomic_long_t tlb_dropin_fail_fmm;
34086 - atomic_long_t tlb_dropin_fail_no_exception;
34087 - atomic_long_t tfh_stale_on_fault;
34088 - atomic_long_t mmu_invalidate_range;
34089 - atomic_long_t mmu_invalidate_page;
34090 - atomic_long_t flush_tlb;
34091 - atomic_long_t flush_tlb_gru;
34092 - atomic_long_t flush_tlb_gru_tgh;
34093 - atomic_long_t flush_tlb_gru_zero_asid;
34094 + atomic_long_unchecked_t vdata_alloc;
34095 + atomic_long_unchecked_t vdata_free;
34096 + atomic_long_unchecked_t gts_alloc;
34097 + atomic_long_unchecked_t gts_free;
34098 + atomic_long_unchecked_t gms_alloc;
34099 + atomic_long_unchecked_t gms_free;
34100 + atomic_long_unchecked_t gts_double_allocate;
34101 + atomic_long_unchecked_t assign_context;
34102 + atomic_long_unchecked_t assign_context_failed;
34103 + atomic_long_unchecked_t free_context;
34104 + atomic_long_unchecked_t load_user_context;
34105 + atomic_long_unchecked_t load_kernel_context;
34106 + atomic_long_unchecked_t lock_kernel_context;
34107 + atomic_long_unchecked_t unlock_kernel_context;
34108 + atomic_long_unchecked_t steal_user_context;
34109 + atomic_long_unchecked_t steal_kernel_context;
34110 + atomic_long_unchecked_t steal_context_failed;
34111 + atomic_long_unchecked_t nopfn;
34112 + atomic_long_unchecked_t asid_new;
34113 + atomic_long_unchecked_t asid_next;
34114 + atomic_long_unchecked_t asid_wrap;
34115 + atomic_long_unchecked_t asid_reuse;
34116 + atomic_long_unchecked_t intr;
34117 + atomic_long_unchecked_t intr_cbr;
34118 + atomic_long_unchecked_t intr_tfh;
34119 + atomic_long_unchecked_t intr_spurious;
34120 + atomic_long_unchecked_t intr_mm_lock_failed;
34121 + atomic_long_unchecked_t call_os;
34122 + atomic_long_unchecked_t call_os_wait_queue;
34123 + atomic_long_unchecked_t user_flush_tlb;
34124 + atomic_long_unchecked_t user_unload_context;
34125 + atomic_long_unchecked_t user_exception;
34126 + atomic_long_unchecked_t set_context_option;
34127 + atomic_long_unchecked_t check_context_retarget_intr;
34128 + atomic_long_unchecked_t check_context_unload;
34129 + atomic_long_unchecked_t tlb_dropin;
34130 + atomic_long_unchecked_t tlb_preload_page;
34131 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34132 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34133 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34134 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34135 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34136 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34137 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34138 + atomic_long_unchecked_t tfh_stale_on_fault;
34139 + atomic_long_unchecked_t mmu_invalidate_range;
34140 + atomic_long_unchecked_t mmu_invalidate_page;
34141 + atomic_long_unchecked_t flush_tlb;
34142 + atomic_long_unchecked_t flush_tlb_gru;
34143 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34144 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34145
34146 - atomic_long_t copy_gpa;
34147 - atomic_long_t read_gpa;
34148 + atomic_long_unchecked_t copy_gpa;
34149 + atomic_long_unchecked_t read_gpa;
34150
34151 - atomic_long_t mesq_receive;
34152 - atomic_long_t mesq_receive_none;
34153 - atomic_long_t mesq_send;
34154 - atomic_long_t mesq_send_failed;
34155 - atomic_long_t mesq_noop;
34156 - atomic_long_t mesq_send_unexpected_error;
34157 - atomic_long_t mesq_send_lb_overflow;
34158 - atomic_long_t mesq_send_qlimit_reached;
34159 - atomic_long_t mesq_send_amo_nacked;
34160 - atomic_long_t mesq_send_put_nacked;
34161 - atomic_long_t mesq_page_overflow;
34162 - atomic_long_t mesq_qf_locked;
34163 - atomic_long_t mesq_qf_noop_not_full;
34164 - atomic_long_t mesq_qf_switch_head_failed;
34165 - atomic_long_t mesq_qf_unexpected_error;
34166 - atomic_long_t mesq_noop_unexpected_error;
34167 - atomic_long_t mesq_noop_lb_overflow;
34168 - atomic_long_t mesq_noop_qlimit_reached;
34169 - atomic_long_t mesq_noop_amo_nacked;
34170 - atomic_long_t mesq_noop_put_nacked;
34171 - atomic_long_t mesq_noop_page_overflow;
34172 + atomic_long_unchecked_t mesq_receive;
34173 + atomic_long_unchecked_t mesq_receive_none;
34174 + atomic_long_unchecked_t mesq_send;
34175 + atomic_long_unchecked_t mesq_send_failed;
34176 + atomic_long_unchecked_t mesq_noop;
34177 + atomic_long_unchecked_t mesq_send_unexpected_error;
34178 + atomic_long_unchecked_t mesq_send_lb_overflow;
34179 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34180 + atomic_long_unchecked_t mesq_send_amo_nacked;
34181 + atomic_long_unchecked_t mesq_send_put_nacked;
34182 + atomic_long_unchecked_t mesq_page_overflow;
34183 + atomic_long_unchecked_t mesq_qf_locked;
34184 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34185 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34186 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34187 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34188 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34189 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34190 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34191 + atomic_long_unchecked_t mesq_noop_put_nacked;
34192 + atomic_long_unchecked_t mesq_noop_page_overflow;
34193
34194 };
34195
34196 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34197 tghop_invalidate, mcsop_last};
34198
34199 struct mcs_op_statistic {
34200 - atomic_long_t count;
34201 - atomic_long_t total;
34202 + atomic_long_unchecked_t count;
34203 + atomic_long_unchecked_t total;
34204 unsigned long max;
34205 };
34206
34207 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34208
34209 #define STAT(id) do { \
34210 if (gru_options & OPT_STATS) \
34211 - atomic_long_inc(&gru_stats.id); \
34212 + atomic_long_inc_unchecked(&gru_stats.id); \
34213 } while (0)
34214
34215 #ifdef CONFIG_SGI_GRU_DEBUG
34216 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34217 index c862cd4..0d176fe 100644
34218 --- a/drivers/misc/sgi-xp/xp.h
34219 +++ b/drivers/misc/sgi-xp/xp.h
34220 @@ -288,7 +288,7 @@ struct xpc_interface {
34221 xpc_notify_func, void *);
34222 void (*received) (short, int, void *);
34223 enum xp_retval (*partid_to_nasids) (short, void *);
34224 -};
34225 +} __no_const;
34226
34227 extern struct xpc_interface xpc_interface;
34228
34229 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34230 index b94d5f7..7f494c5 100644
34231 --- a/drivers/misc/sgi-xp/xpc.h
34232 +++ b/drivers/misc/sgi-xp/xpc.h
34233 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34234 void (*received_payload) (struct xpc_channel *, void *);
34235 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34236 };
34237 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34238
34239 /* struct xpc_partition act_state values (for XPC HB) */
34240
34241 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34242 /* found in xpc_main.c */
34243 extern struct device *xpc_part;
34244 extern struct device *xpc_chan;
34245 -extern struct xpc_arch_operations xpc_arch_ops;
34246 +extern xpc_arch_operations_no_const xpc_arch_ops;
34247 extern int xpc_disengage_timelimit;
34248 extern int xpc_disengage_timedout;
34249 extern int xpc_activate_IRQ_rcvd;
34250 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34251 index 8d082b4..aa749ae 100644
34252 --- a/drivers/misc/sgi-xp/xpc_main.c
34253 +++ b/drivers/misc/sgi-xp/xpc_main.c
34254 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34255 .notifier_call = xpc_system_die,
34256 };
34257
34258 -struct xpc_arch_operations xpc_arch_ops;
34259 +xpc_arch_operations_no_const xpc_arch_ops;
34260
34261 /*
34262 * Timer function to enforce the timelimit on the partition disengage.
34263 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34264 index 69ef0be..f3ef91e 100644
34265 --- a/drivers/mmc/host/sdhci-pci.c
34266 +++ b/drivers/mmc/host/sdhci-pci.c
34267 @@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34268 .probe = via_probe,
34269 };
34270
34271 -static const struct pci_device_id pci_ids[] __devinitdata = {
34272 +static const struct pci_device_id pci_ids[] __devinitconst = {
34273 {
34274 .vendor = PCI_VENDOR_ID_RICOH,
34275 .device = PCI_DEVICE_ID_RICOH_R5C822,
34276 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34277 index a4eb8b5..8c0628f 100644
34278 --- a/drivers/mtd/devices/doc2000.c
34279 +++ b/drivers/mtd/devices/doc2000.c
34280 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34281
34282 /* The ECC will not be calculated correctly if less than 512 is written */
34283 /* DBB-
34284 - if (len != 0x200 && eccbuf)
34285 + if (len != 0x200)
34286 printk(KERN_WARNING
34287 "ECC needs a full sector write (adr: %lx size %lx)\n",
34288 (long) to, (long) len);
34289 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34290 index a9e57d6..c6d8731 100644
34291 --- a/drivers/mtd/nand/denali.c
34292 +++ b/drivers/mtd/nand/denali.c
34293 @@ -26,6 +26,7 @@
34294 #include <linux/pci.h>
34295 #include <linux/mtd/mtd.h>
34296 #include <linux/module.h>
34297 +#include <linux/slab.h>
34298
34299 #include "denali.h"
34300
34301 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34302 index 51b9d6a..52af9a7 100644
34303 --- a/drivers/mtd/nftlmount.c
34304 +++ b/drivers/mtd/nftlmount.c
34305 @@ -24,6 +24,7 @@
34306 #include <asm/errno.h>
34307 #include <linux/delay.h>
34308 #include <linux/slab.h>
34309 +#include <linux/sched.h>
34310 #include <linux/mtd/mtd.h>
34311 #include <linux/mtd/nand.h>
34312 #include <linux/mtd/nftl.h>
34313 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34314 index 6762dc4..9956862 100644
34315 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34316 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34317 @@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34318 */
34319
34320 #define ATL2_PARAM(X, desc) \
34321 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34322 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34323 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34324 MODULE_PARM_DESC(X, desc);
34325 #else
34326 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34327 index 61a7670..7da6e34 100644
34328 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34329 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34330 @@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
34331
34332 int (*wait_comp)(struct bnx2x *bp,
34333 struct bnx2x_rx_mode_ramrod_params *p);
34334 -};
34335 +} __no_const;
34336
34337 /********************** Set multicast group ***********************************/
34338
34339 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34340 index 93865f8..5448741 100644
34341 --- a/drivers/net/ethernet/broadcom/tg3.h
34342 +++ b/drivers/net/ethernet/broadcom/tg3.h
34343 @@ -140,6 +140,7 @@
34344 #define CHIPREV_ID_5750_A0 0x4000
34345 #define CHIPREV_ID_5750_A1 0x4001
34346 #define CHIPREV_ID_5750_A3 0x4003
34347 +#define CHIPREV_ID_5750_C1 0x4201
34348 #define CHIPREV_ID_5750_C2 0x4202
34349 #define CHIPREV_ID_5752_A0_HW 0x5000
34350 #define CHIPREV_ID_5752_A0 0x6000
34351 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34352 index c4e8643..0979484 100644
34353 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34354 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34355 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34356 */
34357 struct l2t_skb_cb {
34358 arp_failure_handler_func arp_failure_handler;
34359 -};
34360 +} __no_const;
34361
34362 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34363
34364 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34365 index 18b106c..2b38d36 100644
34366 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34367 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34368 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34369 for (i=0; i<ETH_ALEN; i++) {
34370 tmp.addr[i] = dev->dev_addr[i];
34371 }
34372 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34373 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34374 break;
34375
34376 case DE4X5_SET_HWADDR: /* Set the hardware address */
34377 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34378 spin_lock_irqsave(&lp->lock, flags);
34379 memcpy(&statbuf, &lp->pktStats, ioc->len);
34380 spin_unlock_irqrestore(&lp->lock, flags);
34381 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34382 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34383 return -EFAULT;
34384 break;
34385 }
34386 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34387 index ed7d1dc..d426748 100644
34388 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34389 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34390 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34391 {NULL}};
34392
34393
34394 -static const char *block_name[] __devinitdata = {
34395 +static const char *block_name[] __devinitconst = {
34396 "21140 non-MII",
34397 "21140 MII PHY",
34398 "21142 Serial PHY",
34399 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34400 index 2ac6fff..2d127d0 100644
34401 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34402 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34403 @@ -236,7 +236,7 @@ struct pci_id_info {
34404 int drv_flags; /* Driver use, intended as capability flags. */
34405 };
34406
34407 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34408 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34409 { /* Sometime a Level-One switch card. */
34410 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34411 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34412 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34413 index d783f4f..97fa1b0 100644
34414 --- a/drivers/net/ethernet/dlink/sundance.c
34415 +++ b/drivers/net/ethernet/dlink/sundance.c
34416 @@ -218,7 +218,7 @@ enum {
34417 struct pci_id_info {
34418 const char *name;
34419 };
34420 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34421 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34422 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34423 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34424 {"D-Link DFE-580TX 4 port Server Adapter"},
34425 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34426 index 528a886..e6a98a3 100644
34427 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34428 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34429 @@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34430
34431 if (wrapped)
34432 newacc += 65536;
34433 - ACCESS_ONCE(*acc) = newacc;
34434 + ACCESS_ONCE_RW(*acc) = newacc;
34435 }
34436
34437 void be_parse_stats(struct be_adapter *adapter)
34438 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34439 index 16b0704..d2c07d7 100644
34440 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34441 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34442 @@ -31,6 +31,8 @@
34443 #include <linux/netdevice.h>
34444 #include <linux/phy.h>
34445 #include <linux/platform_device.h>
34446 +#include <linux/interrupt.h>
34447 +#include <linux/irqreturn.h>
34448 #include <net/ip.h>
34449
34450 #include "ftgmac100.h"
34451 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34452 index 829b109..4ae5f6a 100644
34453 --- a/drivers/net/ethernet/faraday/ftmac100.c
34454 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34455 @@ -31,6 +31,8 @@
34456 #include <linux/module.h>
34457 #include <linux/netdevice.h>
34458 #include <linux/platform_device.h>
34459 +#include <linux/interrupt.h>
34460 +#include <linux/irqreturn.h>
34461
34462 #include "ftmac100.h"
34463
34464 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34465 index 1637b98..c42f87b 100644
34466 --- a/drivers/net/ethernet/fealnx.c
34467 +++ b/drivers/net/ethernet/fealnx.c
34468 @@ -150,7 +150,7 @@ struct chip_info {
34469 int flags;
34470 };
34471
34472 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34473 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34474 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34475 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34476 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34477 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34478 index f82ecf5..7d59ecb 100644
34479 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34480 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34481 @@ -784,6 +784,7 @@ struct e1000_mac_operations {
34482 void (*config_collision_dist)(struct e1000_hw *);
34483 s32 (*read_mac_addr)(struct e1000_hw *);
34484 };
34485 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34486
34487 /*
34488 * When to use various PHY register access functions:
34489 @@ -824,6 +825,7 @@ struct e1000_phy_operations {
34490 void (*power_up)(struct e1000_hw *);
34491 void (*power_down)(struct e1000_hw *);
34492 };
34493 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34494
34495 /* Function pointers for the NVM. */
34496 struct e1000_nvm_operations {
34497 @@ -836,9 +838,10 @@ struct e1000_nvm_operations {
34498 s32 (*validate)(struct e1000_hw *);
34499 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34500 };
34501 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34502
34503 struct e1000_mac_info {
34504 - struct e1000_mac_operations ops;
34505 + e1000_mac_operations_no_const ops;
34506 u8 addr[ETH_ALEN];
34507 u8 perm_addr[ETH_ALEN];
34508
34509 @@ -879,7 +882,7 @@ struct e1000_mac_info {
34510 };
34511
34512 struct e1000_phy_info {
34513 - struct e1000_phy_operations ops;
34514 + e1000_phy_operations_no_const ops;
34515
34516 enum e1000_phy_type type;
34517
34518 @@ -913,7 +916,7 @@ struct e1000_phy_info {
34519 };
34520
34521 struct e1000_nvm_info {
34522 - struct e1000_nvm_operations ops;
34523 + e1000_nvm_operations_no_const ops;
34524
34525 enum e1000_nvm_type type;
34526 enum e1000_nvm_override override;
34527 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34528 index f67cbd3..cef9e3d 100644
34529 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34530 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34531 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34532 s32 (*read_mac_addr)(struct e1000_hw *);
34533 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34534 };
34535 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34536
34537 struct e1000_phy_operations {
34538 s32 (*acquire)(struct e1000_hw *);
34539 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34540 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34541 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34542 };
34543 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34544
34545 struct e1000_nvm_operations {
34546 s32 (*acquire)(struct e1000_hw *);
34547 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34548 s32 (*update)(struct e1000_hw *);
34549 s32 (*validate)(struct e1000_hw *);
34550 };
34551 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34552
34553 struct e1000_info {
34554 s32 (*get_invariants)(struct e1000_hw *);
34555 @@ -350,7 +353,7 @@ struct e1000_info {
34556 extern const struct e1000_info e1000_82575_info;
34557
34558 struct e1000_mac_info {
34559 - struct e1000_mac_operations ops;
34560 + e1000_mac_operations_no_const ops;
34561
34562 u8 addr[6];
34563 u8 perm_addr[6];
34564 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34565 };
34566
34567 struct e1000_phy_info {
34568 - struct e1000_phy_operations ops;
34569 + e1000_phy_operations_no_const ops;
34570
34571 enum e1000_phy_type type;
34572
34573 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34574 };
34575
34576 struct e1000_nvm_info {
34577 - struct e1000_nvm_operations ops;
34578 + e1000_nvm_operations_no_const ops;
34579 enum e1000_nvm_type type;
34580 enum e1000_nvm_override override;
34581
34582 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34583 s32 (*check_for_ack)(struct e1000_hw *, u16);
34584 s32 (*check_for_rst)(struct e1000_hw *, u16);
34585 };
34586 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34587
34588 struct e1000_mbx_stats {
34589 u32 msgs_tx;
34590 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34591 };
34592
34593 struct e1000_mbx_info {
34594 - struct e1000_mbx_operations ops;
34595 + e1000_mbx_operations_no_const ops;
34596 struct e1000_mbx_stats stats;
34597 u32 timeout;
34598 u32 usec_delay;
34599 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34600 index 57db3c6..aa825fc 100644
34601 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34602 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34603 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34604 s32 (*read_mac_addr)(struct e1000_hw *);
34605 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34606 };
34607 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34608
34609 struct e1000_mac_info {
34610 - struct e1000_mac_operations ops;
34611 + e1000_mac_operations_no_const ops;
34612 u8 addr[6];
34613 u8 perm_addr[6];
34614
34615 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34616 s32 (*check_for_ack)(struct e1000_hw *);
34617 s32 (*check_for_rst)(struct e1000_hw *);
34618 };
34619 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34620
34621 struct e1000_mbx_stats {
34622 u32 msgs_tx;
34623 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34624 };
34625
34626 struct e1000_mbx_info {
34627 - struct e1000_mbx_operations ops;
34628 + e1000_mbx_operations_no_const ops;
34629 struct e1000_mbx_stats stats;
34630 u32 timeout;
34631 u32 usec_delay;
34632 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34633 index 8636e83..ab9bbc3 100644
34634 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34635 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34636 @@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
34637 s32 (*update_checksum)(struct ixgbe_hw *);
34638 u16 (*calc_checksum)(struct ixgbe_hw *);
34639 };
34640 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34641
34642 struct ixgbe_mac_operations {
34643 s32 (*init_hw)(struct ixgbe_hw *);
34644 @@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
34645 /* Manageability interface */
34646 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34647 };
34648 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34649
34650 struct ixgbe_phy_operations {
34651 s32 (*identify)(struct ixgbe_hw *);
34652 @@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
34653 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34654 s32 (*check_overtemp)(struct ixgbe_hw *);
34655 };
34656 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34657
34658 struct ixgbe_eeprom_info {
34659 - struct ixgbe_eeprom_operations ops;
34660 + ixgbe_eeprom_operations_no_const ops;
34661 enum ixgbe_eeprom_type type;
34662 u32 semaphore_delay;
34663 u16 word_size;
34664 @@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
34665
34666 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34667 struct ixgbe_mac_info {
34668 - struct ixgbe_mac_operations ops;
34669 + ixgbe_mac_operations_no_const ops;
34670 enum ixgbe_mac_type type;
34671 u8 addr[ETH_ALEN];
34672 u8 perm_addr[ETH_ALEN];
34673 @@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
34674 };
34675
34676 struct ixgbe_phy_info {
34677 - struct ixgbe_phy_operations ops;
34678 + ixgbe_phy_operations_no_const ops;
34679 struct mdio_if_info mdio;
34680 enum ixgbe_phy_type type;
34681 u32 id;
34682 @@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
34683 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34684 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34685 };
34686 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34687
34688 struct ixgbe_mbx_stats {
34689 u32 msgs_tx;
34690 @@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
34691 };
34692
34693 struct ixgbe_mbx_info {
34694 - struct ixgbe_mbx_operations ops;
34695 + ixgbe_mbx_operations_no_const ops;
34696 struct ixgbe_mbx_stats stats;
34697 u32 timeout;
34698 u32 usec_delay;
34699 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34700 index 25c951d..cc7cf33 100644
34701 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34702 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34703 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34704 s32 (*clear_vfta)(struct ixgbe_hw *);
34705 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34706 };
34707 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34708
34709 enum ixgbe_mac_type {
34710 ixgbe_mac_unknown = 0,
34711 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34712 };
34713
34714 struct ixgbe_mac_info {
34715 - struct ixgbe_mac_operations ops;
34716 + ixgbe_mac_operations_no_const ops;
34717 u8 addr[6];
34718 u8 perm_addr[6];
34719
34720 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34721 s32 (*check_for_ack)(struct ixgbe_hw *);
34722 s32 (*check_for_rst)(struct ixgbe_hw *);
34723 };
34724 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34725
34726 struct ixgbe_mbx_stats {
34727 u32 msgs_tx;
34728 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34729 };
34730
34731 struct ixgbe_mbx_info {
34732 - struct ixgbe_mbx_operations ops;
34733 + ixgbe_mbx_operations_no_const ops;
34734 struct ixgbe_mbx_stats stats;
34735 u32 timeout;
34736 u32 udelay;
34737 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34738 index 8bb05b4..074796f 100644
34739 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
34740 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34741 @@ -41,6 +41,7 @@
34742 #include <linux/slab.h>
34743 #include <linux/io-mapping.h>
34744 #include <linux/delay.h>
34745 +#include <linux/sched.h>
34746
34747 #include <linux/mlx4/device.h>
34748 #include <linux/mlx4/doorbell.h>
34749 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34750 index 5046a64..71ca936 100644
34751 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34752 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34753 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34754 void (*link_down)(struct __vxge_hw_device *devh);
34755 void (*crit_err)(struct __vxge_hw_device *devh,
34756 enum vxge_hw_event type, u64 ext_data);
34757 -};
34758 +} __no_const;
34759
34760 /*
34761 * struct __vxge_hw_blockpool_entry - Block private data structure
34762 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34763 index 4a518a3..936b334 100644
34764 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34765 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34766 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34767 struct vxge_hw_mempool_dma *dma_object,
34768 u32 index,
34769 u32 is_last);
34770 -};
34771 +} __no_const;
34772
34773 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34774 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34775 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34776 index ce6b44d..74f10c2 100644
34777 --- a/drivers/net/ethernet/realtek/r8169.c
34778 +++ b/drivers/net/ethernet/realtek/r8169.c
34779 @@ -708,17 +708,17 @@ struct rtl8169_private {
34780 struct mdio_ops {
34781 void (*write)(void __iomem *, int, int);
34782 int (*read)(void __iomem *, int);
34783 - } mdio_ops;
34784 + } __no_const mdio_ops;
34785
34786 struct pll_power_ops {
34787 void (*down)(struct rtl8169_private *);
34788 void (*up)(struct rtl8169_private *);
34789 - } pll_power_ops;
34790 + } __no_const pll_power_ops;
34791
34792 struct jumbo_ops {
34793 void (*enable)(struct rtl8169_private *);
34794 void (*disable)(struct rtl8169_private *);
34795 - } jumbo_ops;
34796 + } __no_const jumbo_ops;
34797
34798 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34799 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34800 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34801 index a9deda8..5507c31 100644
34802 --- a/drivers/net/ethernet/sis/sis190.c
34803 +++ b/drivers/net/ethernet/sis/sis190.c
34804 @@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34805 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34806 struct net_device *dev)
34807 {
34808 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34809 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34810 struct sis190_private *tp = netdev_priv(dev);
34811 struct pci_dev *isa_bridge;
34812 u8 reg, tmp8;
34813 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34814 index c07cfe9..81cbf7e 100644
34815 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34816 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34817 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
34818
34819 writel(value, ioaddr + MMC_CNTRL);
34820
34821 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34822 - MMC_CNTRL, value);
34823 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34824 +// MMC_CNTRL, value);
34825 }
34826
34827 /* To mask all all interrupts.*/
34828 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34829 index 48d56da..a27e46c 100644
34830 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34831 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34832 @@ -1584,7 +1584,7 @@ static const struct file_operations stmmac_rings_status_fops = {
34833 .open = stmmac_sysfs_ring_open,
34834 .read = seq_read,
34835 .llseek = seq_lseek,
34836 - .release = seq_release,
34837 + .release = single_release,
34838 };
34839
34840 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
34841 @@ -1656,7 +1656,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
34842 .open = stmmac_sysfs_dma_cap_open,
34843 .read = seq_read,
34844 .llseek = seq_lseek,
34845 - .release = seq_release,
34846 + .release = single_release,
34847 };
34848
34849 static int stmmac_init_fs(struct net_device *dev)
34850 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
34851 index c358245..8c1de63 100644
34852 --- a/drivers/net/hyperv/hyperv_net.h
34853 +++ b/drivers/net/hyperv/hyperv_net.h
34854 @@ -98,7 +98,7 @@ struct rndis_device {
34855
34856 enum rndis_device_state state;
34857 bool link_state;
34858 - atomic_t new_req_id;
34859 + atomic_unchecked_t new_req_id;
34860
34861 spinlock_t request_lock;
34862 struct list_head req_list;
34863 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
34864 index d6be64b..5d97e3b 100644
34865 --- a/drivers/net/hyperv/rndis_filter.c
34866 +++ b/drivers/net/hyperv/rndis_filter.c
34867 @@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34868 * template
34869 */
34870 set = &rndis_msg->msg.set_req;
34871 - set->req_id = atomic_inc_return(&dev->new_req_id);
34872 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34873
34874 /* Add to the request list */
34875 spin_lock_irqsave(&dev->request_lock, flags);
34876 @@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34877
34878 /* Setup the rndis set */
34879 halt = &request->request_msg.msg.halt_req;
34880 - halt->req_id = atomic_inc_return(&dev->new_req_id);
34881 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34882
34883 /* Ignore return since this msg is optional. */
34884 rndis_filter_send_request(dev, request);
34885 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
34886 index cb8fd50..003ec38 100644
34887 --- a/drivers/net/macvtap.c
34888 +++ b/drivers/net/macvtap.c
34889 @@ -528,6 +528,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
34890 }
34891 base = (unsigned long)from->iov_base + offset1;
34892 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
34893 + if (i + size >= MAX_SKB_FRAGS)
34894 + return -EFAULT;
34895 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
34896 if ((num_pages != size) ||
34897 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
34898 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
34899 index 21d7151..8034208 100644
34900 --- a/drivers/net/ppp/ppp_generic.c
34901 +++ b/drivers/net/ppp/ppp_generic.c
34902 @@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34903 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34904 struct ppp_stats stats;
34905 struct ppp_comp_stats cstats;
34906 - char *vers;
34907
34908 switch (cmd) {
34909 case SIOCGPPPSTATS:
34910 @@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34911 break;
34912
34913 case SIOCGPPPVER:
34914 - vers = PPP_VERSION;
34915 - if (copy_to_user(addr, vers, strlen(vers) + 1))
34916 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34917 break;
34918 err = 0;
34919 break;
34920 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34921 index b715e6b..6d2490f 100644
34922 --- a/drivers/net/tokenring/abyss.c
34923 +++ b/drivers/net/tokenring/abyss.c
34924 @@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
34925
34926 static int __init abyss_init (void)
34927 {
34928 - abyss_netdev_ops = tms380tr_netdev_ops;
34929 + pax_open_kernel();
34930 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34931
34932 - abyss_netdev_ops.ndo_open = abyss_open;
34933 - abyss_netdev_ops.ndo_stop = abyss_close;
34934 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34935 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34936 + pax_close_kernel();
34937
34938 return pci_register_driver(&abyss_driver);
34939 }
34940 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34941 index 28adcdf..ae82f35 100644
34942 --- a/drivers/net/tokenring/madgemc.c
34943 +++ b/drivers/net/tokenring/madgemc.c
34944 @@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
34945
34946 static int __init madgemc_init (void)
34947 {
34948 - madgemc_netdev_ops = tms380tr_netdev_ops;
34949 - madgemc_netdev_ops.ndo_open = madgemc_open;
34950 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34951 + pax_open_kernel();
34952 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34953 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34954 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34955 + pax_close_kernel();
34956
34957 return mca_register_driver (&madgemc_driver);
34958 }
34959 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34960 index 62d90e4..9d84237 100644
34961 --- a/drivers/net/tokenring/proteon.c
34962 +++ b/drivers/net/tokenring/proteon.c
34963 @@ -352,9 +352,11 @@ static int __init proteon_init(void)
34964 struct platform_device *pdev;
34965 int i, num = 0, err = 0;
34966
34967 - proteon_netdev_ops = tms380tr_netdev_ops;
34968 - proteon_netdev_ops.ndo_open = proteon_open;
34969 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34970 + pax_open_kernel();
34971 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34972 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34973 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34974 + pax_close_kernel();
34975
34976 err = platform_driver_register(&proteon_driver);
34977 if (err)
34978 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34979 index ee11e93..c8f19c7 100644
34980 --- a/drivers/net/tokenring/skisa.c
34981 +++ b/drivers/net/tokenring/skisa.c
34982 @@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
34983 struct platform_device *pdev;
34984 int i, num = 0, err = 0;
34985
34986 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34987 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34988 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34989 + pax_open_kernel();
34990 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34991 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34992 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34993 + pax_close_kernel();
34994
34995 err = platform_driver_register(&sk_isa_driver);
34996 if (err)
34997 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34998 index 2d2a688..35f2372 100644
34999 --- a/drivers/net/usb/hso.c
35000 +++ b/drivers/net/usb/hso.c
35001 @@ -71,7 +71,7 @@
35002 #include <asm/byteorder.h>
35003 #include <linux/serial_core.h>
35004 #include <linux/serial.h>
35005 -
35006 +#include <asm/local.h>
35007
35008 #define MOD_AUTHOR "Option Wireless"
35009 #define MOD_DESCRIPTION "USB High Speed Option driver"
35010 @@ -257,7 +257,7 @@ struct hso_serial {
35011
35012 /* from usb_serial_port */
35013 struct tty_struct *tty;
35014 - int open_count;
35015 + local_t open_count;
35016 spinlock_t serial_lock;
35017
35018 int (*write_data) (struct hso_serial *serial);
35019 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35020 struct urb *urb;
35021
35022 urb = serial->rx_urb[0];
35023 - if (serial->open_count > 0) {
35024 + if (local_read(&serial->open_count) > 0) {
35025 count = put_rxbuf_data(urb, serial);
35026 if (count == -1)
35027 return;
35028 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35029 DUMP1(urb->transfer_buffer, urb->actual_length);
35030
35031 /* Anyone listening? */
35032 - if (serial->open_count == 0)
35033 + if (local_read(&serial->open_count) == 0)
35034 return;
35035
35036 if (status == 0) {
35037 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35038 spin_unlock_irq(&serial->serial_lock);
35039
35040 /* check for port already opened, if not set the termios */
35041 - serial->open_count++;
35042 - if (serial->open_count == 1) {
35043 + if (local_inc_return(&serial->open_count) == 1) {
35044 serial->rx_state = RX_IDLE;
35045 /* Force default termio settings */
35046 _hso_serial_set_termios(tty, NULL);
35047 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35048 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35049 if (result) {
35050 hso_stop_serial_device(serial->parent);
35051 - serial->open_count--;
35052 + local_dec(&serial->open_count);
35053 kref_put(&serial->parent->ref, hso_serial_ref_free);
35054 }
35055 } else {
35056 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35057
35058 /* reset the rts and dtr */
35059 /* do the actual close */
35060 - serial->open_count--;
35061 + local_dec(&serial->open_count);
35062
35063 - if (serial->open_count <= 0) {
35064 - serial->open_count = 0;
35065 + if (local_read(&serial->open_count) <= 0) {
35066 + local_set(&serial->open_count, 0);
35067 spin_lock_irq(&serial->serial_lock);
35068 if (serial->tty == tty) {
35069 serial->tty->driver_data = NULL;
35070 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35071
35072 /* the actual setup */
35073 spin_lock_irqsave(&serial->serial_lock, flags);
35074 - if (serial->open_count)
35075 + if (local_read(&serial->open_count))
35076 _hso_serial_set_termios(tty, old);
35077 else
35078 tty->termios = old;
35079 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35080 D1("Pending read interrupt on port %d\n", i);
35081 spin_lock(&serial->serial_lock);
35082 if (serial->rx_state == RX_IDLE &&
35083 - serial->open_count > 0) {
35084 + local_read(&serial->open_count) > 0) {
35085 /* Setup and send a ctrl req read on
35086 * port i */
35087 if (!serial->rx_urb_filled[0]) {
35088 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35089 /* Start all serial ports */
35090 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35091 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35092 - if (dev2ser(serial_table[i])->open_count) {
35093 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35094 result =
35095 hso_start_serial_device(serial_table[i], GFP_NOIO);
35096 hso_kick_transmit(dev2ser(serial_table[i]));
35097 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35098 index c54b7d37..af1f359 100644
35099 --- a/drivers/net/wireless/ath/ath.h
35100 +++ b/drivers/net/wireless/ath/ath.h
35101 @@ -119,6 +119,7 @@ struct ath_ops {
35102 void (*write_flush) (void *);
35103 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35104 };
35105 +typedef struct ath_ops __no_const ath_ops_no_const;
35106
35107 struct ath_common;
35108 struct ath_bus_ops;
35109 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35110 index aa2abaf..5f5152d 100644
35111 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35112 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35113 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35114 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35115 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35116
35117 - ACCESS_ONCE(ads->ds_link) = i->link;
35118 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35119 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35120 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35121
35122 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35123 ctl6 = SM(i->keytype, AR_EncrType);
35124 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35125
35126 if ((i->is_first || i->is_last) &&
35127 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35128 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35129 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35130 | set11nTries(i->rates, 1)
35131 | set11nTries(i->rates, 2)
35132 | set11nTries(i->rates, 3)
35133 | (i->dur_update ? AR_DurUpdateEna : 0)
35134 | SM(0, AR_BurstDur);
35135
35136 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35137 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35138 | set11nRate(i->rates, 1)
35139 | set11nRate(i->rates, 2)
35140 | set11nRate(i->rates, 3);
35141 } else {
35142 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35143 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35144 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35145 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35146 }
35147
35148 if (!i->is_first) {
35149 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35150 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35151 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35152 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35153 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35154 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35155 return;
35156 }
35157
35158 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35159 break;
35160 }
35161
35162 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35163 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35164 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35165 | SM(i->txpower, AR_XmitPower)
35166 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35167 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35168 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35169 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35170
35171 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35172 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35173 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35174 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35175
35176 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35177 return;
35178
35179 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35180 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35181 | set11nPktDurRTSCTS(i->rates, 1);
35182
35183 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35184 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35185 | set11nPktDurRTSCTS(i->rates, 3);
35186
35187 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35188 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35189 | set11nRateFlags(i->rates, 1)
35190 | set11nRateFlags(i->rates, 2)
35191 | set11nRateFlags(i->rates, 3)
35192 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35193 index a66a13b..0ef399e 100644
35194 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35195 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35196 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35197 (i->qcu << AR_TxQcuNum_S) | desc_len;
35198
35199 checksum += val;
35200 - ACCESS_ONCE(ads->info) = val;
35201 + ACCESS_ONCE_RW(ads->info) = val;
35202
35203 checksum += i->link;
35204 - ACCESS_ONCE(ads->link) = i->link;
35205 + ACCESS_ONCE_RW(ads->link) = i->link;
35206
35207 checksum += i->buf_addr[0];
35208 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35209 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35210 checksum += i->buf_addr[1];
35211 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35212 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35213 checksum += i->buf_addr[2];
35214 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35215 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35216 checksum += i->buf_addr[3];
35217 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35218 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35219
35220 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35221 - ACCESS_ONCE(ads->ctl3) = val;
35222 + ACCESS_ONCE_RW(ads->ctl3) = val;
35223 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35224 - ACCESS_ONCE(ads->ctl5) = val;
35225 + ACCESS_ONCE_RW(ads->ctl5) = val;
35226 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35227 - ACCESS_ONCE(ads->ctl7) = val;
35228 + ACCESS_ONCE_RW(ads->ctl7) = val;
35229 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35230 - ACCESS_ONCE(ads->ctl9) = val;
35231 + ACCESS_ONCE_RW(ads->ctl9) = val;
35232
35233 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35234 - ACCESS_ONCE(ads->ctl10) = checksum;
35235 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
35236
35237 if (i->is_first || i->is_last) {
35238 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35239 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35240 | set11nTries(i->rates, 1)
35241 | set11nTries(i->rates, 2)
35242 | set11nTries(i->rates, 3)
35243 | (i->dur_update ? AR_DurUpdateEna : 0)
35244 | SM(0, AR_BurstDur);
35245
35246 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35247 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35248 | set11nRate(i->rates, 1)
35249 | set11nRate(i->rates, 2)
35250 | set11nRate(i->rates, 3);
35251 } else {
35252 - ACCESS_ONCE(ads->ctl13) = 0;
35253 - ACCESS_ONCE(ads->ctl14) = 0;
35254 + ACCESS_ONCE_RW(ads->ctl13) = 0;
35255 + ACCESS_ONCE_RW(ads->ctl14) = 0;
35256 }
35257
35258 ads->ctl20 = 0;
35259 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35260
35261 ctl17 = SM(i->keytype, AR_EncrType);
35262 if (!i->is_first) {
35263 - ACCESS_ONCE(ads->ctl11) = 0;
35264 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35265 - ACCESS_ONCE(ads->ctl15) = 0;
35266 - ACCESS_ONCE(ads->ctl16) = 0;
35267 - ACCESS_ONCE(ads->ctl17) = ctl17;
35268 - ACCESS_ONCE(ads->ctl18) = 0;
35269 - ACCESS_ONCE(ads->ctl19) = 0;
35270 + ACCESS_ONCE_RW(ads->ctl11) = 0;
35271 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35272 + ACCESS_ONCE_RW(ads->ctl15) = 0;
35273 + ACCESS_ONCE_RW(ads->ctl16) = 0;
35274 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35275 + ACCESS_ONCE_RW(ads->ctl18) = 0;
35276 + ACCESS_ONCE_RW(ads->ctl19) = 0;
35277 return;
35278 }
35279
35280 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35281 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35282 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35283 | SM(i->txpower, AR_XmitPower)
35284 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35285 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35286 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35287 ctl12 |= SM(val, AR_PAPRDChainMask);
35288
35289 - ACCESS_ONCE(ads->ctl12) = ctl12;
35290 - ACCESS_ONCE(ads->ctl17) = ctl17;
35291 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35292 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35293
35294 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35295 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35296 | set11nPktDurRTSCTS(i->rates, 1);
35297
35298 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35299 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35300 | set11nPktDurRTSCTS(i->rates, 3);
35301
35302 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35303 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35304 | set11nRateFlags(i->rates, 1)
35305 | set11nRateFlags(i->rates, 2)
35306 | set11nRateFlags(i->rates, 3)
35307 | SM(i->rtscts_rate, AR_RTSCTSRate);
35308
35309 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35310 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35311 }
35312
35313 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35314 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35315 index e88f182..4e57f5d 100644
35316 --- a/drivers/net/wireless/ath/ath9k/hw.h
35317 +++ b/drivers/net/wireless/ath/ath9k/hw.h
35318 @@ -614,7 +614,7 @@ struct ath_hw_private_ops {
35319
35320 /* ANI */
35321 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35322 -};
35323 +} __no_const;
35324
35325 /**
35326 * struct ath_hw_ops - callbacks used by hardware code and driver code
35327 @@ -644,7 +644,7 @@ struct ath_hw_ops {
35328 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35329 struct ath_hw_antcomb_conf *antconf);
35330
35331 -};
35332 +} __no_const;
35333
35334 struct ath_nf_limits {
35335 s16 max;
35336 @@ -664,7 +664,7 @@ enum ath_cal_list {
35337 #define AH_FASTCC 0x4
35338
35339 struct ath_hw {
35340 - struct ath_ops reg_ops;
35341 + ath_ops_no_const reg_ops;
35342
35343 struct ieee80211_hw *hw;
35344 struct ath_common common;
35345 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35346 index af00e2c..ab04d34 100644
35347 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35348 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35349 @@ -545,7 +545,7 @@ struct phy_func_ptr {
35350 void (*carrsuppr)(struct brcms_phy *);
35351 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35352 void (*detach)(struct brcms_phy *);
35353 -};
35354 +} __no_const;
35355
35356 struct brcms_phy {
35357 struct brcms_phy_pub pubpi_ro;
35358 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35359 index faec404..a5277f1 100644
35360 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
35361 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35362 @@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35363 */
35364 if (il3945_mod_params.disable_hw_scan) {
35365 D_INFO("Disabling hw_scan\n");
35366 - il3945_mac_ops.hw_scan = NULL;
35367 + pax_open_kernel();
35368 + *(void **)&il3945_mac_ops.hw_scan = NULL;
35369 + pax_close_kernel();
35370 }
35371
35372 D_INFO("*** LOAD DRIVER ***\n");
35373 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35374 index b7ce6a6..5649756 100644
35375 --- a/drivers/net/wireless/mac80211_hwsim.c
35376 +++ b/drivers/net/wireless/mac80211_hwsim.c
35377 @@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
35378 return -EINVAL;
35379
35380 if (fake_hw_scan) {
35381 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35382 - mac80211_hwsim_ops.sw_scan_start = NULL;
35383 - mac80211_hwsim_ops.sw_scan_complete = NULL;
35384 + pax_open_kernel();
35385 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35386 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35387 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35388 + pax_close_kernel();
35389 }
35390
35391 spin_lock_init(&hwsim_radio_lock);
35392 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35393 index 35225e9..95e6bf9 100644
35394 --- a/drivers/net/wireless/mwifiex/main.h
35395 +++ b/drivers/net/wireless/mwifiex/main.h
35396 @@ -537,7 +537,7 @@ struct mwifiex_if_ops {
35397 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35398 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35399 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35400 -};
35401 +} __no_const;
35402
35403 struct mwifiex_adapter {
35404 u8 iface_type;
35405 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35406 index d66e298..55b0a89 100644
35407 --- a/drivers/net/wireless/rndis_wlan.c
35408 +++ b/drivers/net/wireless/rndis_wlan.c
35409 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35410
35411 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35412
35413 - if (rts_threshold < 0 || rts_threshold > 2347)
35414 + if (rts_threshold > 2347)
35415 rts_threshold = 2347;
35416
35417 tmp = cpu_to_le32(rts_threshold);
35418 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35419 index 9d8f581..0f6589e 100644
35420 --- a/drivers/net/wireless/wl1251/wl1251.h
35421 +++ b/drivers/net/wireless/wl1251/wl1251.h
35422 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
35423 void (*reset)(struct wl1251 *wl);
35424 void (*enable_irq)(struct wl1251 *wl);
35425 void (*disable_irq)(struct wl1251 *wl);
35426 -};
35427 +} __no_const;
35428
35429 struct wl1251 {
35430 struct ieee80211_hw *hw;
35431 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35432 index f34b5b2..b5abb9f 100644
35433 --- a/drivers/oprofile/buffer_sync.c
35434 +++ b/drivers/oprofile/buffer_sync.c
35435 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35436 if (cookie == NO_COOKIE)
35437 offset = pc;
35438 if (cookie == INVALID_COOKIE) {
35439 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35440 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35441 offset = pc;
35442 }
35443 if (cookie != last_cookie) {
35444 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35445 /* add userspace sample */
35446
35447 if (!mm) {
35448 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35449 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35450 return 0;
35451 }
35452
35453 cookie = lookup_dcookie(mm, s->eip, &offset);
35454
35455 if (cookie == INVALID_COOKIE) {
35456 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35457 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35458 return 0;
35459 }
35460
35461 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35462 /* ignore backtraces if failed to add a sample */
35463 if (state == sb_bt_start) {
35464 state = sb_bt_ignore;
35465 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35466 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35467 }
35468 }
35469 release_mm(mm);
35470 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35471 index c0cc4e7..44d4e54 100644
35472 --- a/drivers/oprofile/event_buffer.c
35473 +++ b/drivers/oprofile/event_buffer.c
35474 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35475 }
35476
35477 if (buffer_pos == buffer_size) {
35478 - atomic_inc(&oprofile_stats.event_lost_overflow);
35479 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35480 return;
35481 }
35482
35483 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35484 index ed2c3ec..deda85a 100644
35485 --- a/drivers/oprofile/oprof.c
35486 +++ b/drivers/oprofile/oprof.c
35487 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35488 if (oprofile_ops.switch_events())
35489 return;
35490
35491 - atomic_inc(&oprofile_stats.multiplex_counter);
35492 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35493 start_switch_worker();
35494 }
35495
35496 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35497 index 917d28e..d62d981 100644
35498 --- a/drivers/oprofile/oprofile_stats.c
35499 +++ b/drivers/oprofile/oprofile_stats.c
35500 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35501 cpu_buf->sample_invalid_eip = 0;
35502 }
35503
35504 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35505 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35506 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35507 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35508 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35509 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35510 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35511 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35512 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35513 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35514 }
35515
35516
35517 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35518 index 38b6fc0..b5cbfce 100644
35519 --- a/drivers/oprofile/oprofile_stats.h
35520 +++ b/drivers/oprofile/oprofile_stats.h
35521 @@ -13,11 +13,11 @@
35522 #include <linux/atomic.h>
35523
35524 struct oprofile_stat_struct {
35525 - atomic_t sample_lost_no_mm;
35526 - atomic_t sample_lost_no_mapping;
35527 - atomic_t bt_lost_no_mapping;
35528 - atomic_t event_lost_overflow;
35529 - atomic_t multiplex_counter;
35530 + atomic_unchecked_t sample_lost_no_mm;
35531 + atomic_unchecked_t sample_lost_no_mapping;
35532 + atomic_unchecked_t bt_lost_no_mapping;
35533 + atomic_unchecked_t event_lost_overflow;
35534 + atomic_unchecked_t multiplex_counter;
35535 };
35536
35537 extern struct oprofile_stat_struct oprofile_stats;
35538 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35539 index 849357c..b83c1e0 100644
35540 --- a/drivers/oprofile/oprofilefs.c
35541 +++ b/drivers/oprofile/oprofilefs.c
35542 @@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
35543
35544
35545 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35546 - char const *name, atomic_t *val)
35547 + char const *name, atomic_unchecked_t *val)
35548 {
35549 return __oprofilefs_create_file(sb, root, name,
35550 &atomic_ro_fops, 0444, val);
35551 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35552 index 3f56bc0..707d642 100644
35553 --- a/drivers/parport/procfs.c
35554 +++ b/drivers/parport/procfs.c
35555 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35556
35557 *ppos += len;
35558
35559 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35560 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35561 }
35562
35563 #ifdef CONFIG_PARPORT_1284
35564 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35565
35566 *ppos += len;
35567
35568 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35569 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35570 }
35571 #endif /* IEEE1284.3 support. */
35572
35573 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35574 index 9fff878..ad0ad53 100644
35575 --- a/drivers/pci/hotplug/cpci_hotplug.h
35576 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35577 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35578 int (*hardware_test) (struct slot* slot, u32 value);
35579 u8 (*get_power) (struct slot* slot);
35580 int (*set_power) (struct slot* slot, int value);
35581 -};
35582 +} __no_const;
35583
35584 struct cpci_hp_controller {
35585 unsigned int irq;
35586 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35587 index 76ba8a1..20ca857 100644
35588 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35589 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35590 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35591
35592 void compaq_nvram_init (void __iomem *rom_start)
35593 {
35594 +
35595 +#ifndef CONFIG_PAX_KERNEXEC
35596 if (rom_start) {
35597 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35598 }
35599 +#endif
35600 +
35601 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35602
35603 /* initialize our int15 lock */
35604 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35605 index b500840..d7159d3 100644
35606 --- a/drivers/pci/pcie/aspm.c
35607 +++ b/drivers/pci/pcie/aspm.c
35608 @@ -27,9 +27,9 @@
35609 #define MODULE_PARAM_PREFIX "pcie_aspm."
35610
35611 /* Note: those are not register definitions */
35612 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35613 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35614 -#define ASPM_STATE_L1 (4) /* L1 state */
35615 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35616 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35617 +#define ASPM_STATE_L1 (4U) /* L1 state */
35618 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35619 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35620
35621 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35622 index 5e1ca3c..08082fe 100644
35623 --- a/drivers/pci/probe.c
35624 +++ b/drivers/pci/probe.c
35625 @@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35626 u16 orig_cmd;
35627 struct pci_bus_region region;
35628
35629 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35630 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35631
35632 if (!dev->mmio_always_on) {
35633 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35634 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35635 index 27911b5..5b6db88 100644
35636 --- a/drivers/pci/proc.c
35637 +++ b/drivers/pci/proc.c
35638 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35639 static int __init pci_proc_init(void)
35640 {
35641 struct pci_dev *dev = NULL;
35642 +
35643 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35644 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35645 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35646 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35647 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35648 +#endif
35649 +#else
35650 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35651 +#endif
35652 proc_create("devices", 0, proc_bus_pci_dir,
35653 &proc_bus_pci_dev_operations);
35654 proc_initialized = 1;
35655 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35656 index d68c000..f6094ca 100644
35657 --- a/drivers/platform/x86/thinkpad_acpi.c
35658 +++ b/drivers/platform/x86/thinkpad_acpi.c
35659 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35660 return 0;
35661 }
35662
35663 -void static hotkey_mask_warn_incomplete_mask(void)
35664 +static void hotkey_mask_warn_incomplete_mask(void)
35665 {
35666 /* log only what the user can fix... */
35667 const u32 wantedmask = hotkey_driver_mask &
35668 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35669 }
35670 }
35671
35672 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35673 - struct tp_nvram_state *newn,
35674 - const u32 event_mask)
35675 -{
35676 -
35677 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35678 do { \
35679 if ((event_mask & (1 << __scancode)) && \
35680 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35681 tpacpi_hotkey_send_key(__scancode); \
35682 } while (0)
35683
35684 - void issue_volchange(const unsigned int oldvol,
35685 - const unsigned int newvol)
35686 - {
35687 - unsigned int i = oldvol;
35688 +static void issue_volchange(const unsigned int oldvol,
35689 + const unsigned int newvol,
35690 + const u32 event_mask)
35691 +{
35692 + unsigned int i = oldvol;
35693
35694 - while (i > newvol) {
35695 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35696 - i--;
35697 - }
35698 - while (i < newvol) {
35699 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35700 - i++;
35701 - }
35702 + while (i > newvol) {
35703 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35704 + i--;
35705 }
35706 + while (i < newvol) {
35707 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35708 + i++;
35709 + }
35710 +}
35711
35712 - void issue_brightnesschange(const unsigned int oldbrt,
35713 - const unsigned int newbrt)
35714 - {
35715 - unsigned int i = oldbrt;
35716 +static void issue_brightnesschange(const unsigned int oldbrt,
35717 + const unsigned int newbrt,
35718 + const u32 event_mask)
35719 +{
35720 + unsigned int i = oldbrt;
35721
35722 - while (i > newbrt) {
35723 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35724 - i--;
35725 - }
35726 - while (i < newbrt) {
35727 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35728 - i++;
35729 - }
35730 + while (i > newbrt) {
35731 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35732 + i--;
35733 + }
35734 + while (i < newbrt) {
35735 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35736 + i++;
35737 }
35738 +}
35739
35740 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35741 + struct tp_nvram_state *newn,
35742 + const u32 event_mask)
35743 +{
35744 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35745 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35746 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35747 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35748 oldn->volume_level != newn->volume_level) {
35749 /* recently muted, or repeated mute keypress, or
35750 * multiple presses ending in mute */
35751 - issue_volchange(oldn->volume_level, newn->volume_level);
35752 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35753 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35754 }
35755 } else {
35756 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35757 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35758 }
35759 if (oldn->volume_level != newn->volume_level) {
35760 - issue_volchange(oldn->volume_level, newn->volume_level);
35761 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35762 } else if (oldn->volume_toggle != newn->volume_toggle) {
35763 /* repeated vol up/down keypress at end of scale ? */
35764 if (newn->volume_level == 0)
35765 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35766 /* handle brightness */
35767 if (oldn->brightness_level != newn->brightness_level) {
35768 issue_brightnesschange(oldn->brightness_level,
35769 - newn->brightness_level);
35770 + newn->brightness_level,
35771 + event_mask);
35772 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35773 /* repeated key presses that didn't change state */
35774 if (newn->brightness_level == 0)
35775 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35776 && !tp_features.bright_unkfw)
35777 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35778 }
35779 +}
35780
35781 #undef TPACPI_COMPARE_KEY
35782 #undef TPACPI_MAY_SEND_KEY
35783 -}
35784
35785 /*
35786 * Polling driver
35787 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35788 index 769d265..a3a05ca 100644
35789 --- a/drivers/pnp/pnpbios/bioscalls.c
35790 +++ b/drivers/pnp/pnpbios/bioscalls.c
35791 @@ -58,7 +58,7 @@ do { \
35792 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35793 } while(0)
35794
35795 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35796 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35797 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35798
35799 /*
35800 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35801
35802 cpu = get_cpu();
35803 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35804 +
35805 + pax_open_kernel();
35806 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35807 + pax_close_kernel();
35808
35809 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35810 spin_lock_irqsave(&pnp_bios_lock, flags);
35811 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35812 :"memory");
35813 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35814
35815 + pax_open_kernel();
35816 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35817 + pax_close_kernel();
35818 +
35819 put_cpu();
35820
35821 /* If we get here and this is set then the PnP BIOS faulted on us. */
35822 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35823 return status;
35824 }
35825
35826 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35827 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35828 {
35829 int i;
35830
35831 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35832 pnp_bios_callpoint.offset = header->fields.pm16offset;
35833 pnp_bios_callpoint.segment = PNP_CS16;
35834
35835 + pax_open_kernel();
35836 +
35837 for_each_possible_cpu(i) {
35838 struct desc_struct *gdt = get_cpu_gdt_table(i);
35839 if (!gdt)
35840 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35841 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35842 (unsigned long)__va(header->fields.pm16dseg));
35843 }
35844 +
35845 + pax_close_kernel();
35846 }
35847 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35848 index b0ecacb..7c9da2e 100644
35849 --- a/drivers/pnp/resource.c
35850 +++ b/drivers/pnp/resource.c
35851 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35852 return 1;
35853
35854 /* check if the resource is valid */
35855 - if (*irq < 0 || *irq > 15)
35856 + if (*irq > 15)
35857 return 0;
35858
35859 /* check if the resource is reserved */
35860 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35861 return 1;
35862
35863 /* check if the resource is valid */
35864 - if (*dma < 0 || *dma == 4 || *dma > 7)
35865 + if (*dma == 4 || *dma > 7)
35866 return 0;
35867
35868 /* check if the resource is reserved */
35869 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35870 index 222ccd8..6275fa5 100644
35871 --- a/drivers/power/bq27x00_battery.c
35872 +++ b/drivers/power/bq27x00_battery.c
35873 @@ -72,7 +72,7 @@
35874 struct bq27x00_device_info;
35875 struct bq27x00_access_methods {
35876 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35877 -};
35878 +} __no_const;
35879
35880 enum bq27x00_chip { BQ27000, BQ27500 };
35881
35882 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35883 index 4c5b053..104263e 100644
35884 --- a/drivers/regulator/max8660.c
35885 +++ b/drivers/regulator/max8660.c
35886 @@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35887 max8660->shadow_regs[MAX8660_OVER1] = 5;
35888 } else {
35889 /* Otherwise devices can be toggled via software */
35890 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35891 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35892 + pax_open_kernel();
35893 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35894 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35895 + pax_close_kernel();
35896 }
35897
35898 /*
35899 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35900 index 845aa22..99ec402 100644
35901 --- a/drivers/regulator/mc13892-regulator.c
35902 +++ b/drivers/regulator/mc13892-regulator.c
35903 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35904 }
35905 mc13xxx_unlock(mc13892);
35906
35907 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35908 + pax_open_kernel();
35909 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35910 = mc13892_vcam_set_mode;
35911 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35912 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35913 = mc13892_vcam_get_mode;
35914 + pax_close_kernel();
35915
35916 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
35917 ARRAY_SIZE(mc13892_regulators));
35918 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35919 index cace6d3..f623fda 100644
35920 --- a/drivers/rtc/rtc-dev.c
35921 +++ b/drivers/rtc/rtc-dev.c
35922 @@ -14,6 +14,7 @@
35923 #include <linux/module.h>
35924 #include <linux/rtc.h>
35925 #include <linux/sched.h>
35926 +#include <linux/grsecurity.h>
35927 #include "rtc-core.h"
35928
35929 static dev_t rtc_devt;
35930 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35931 if (copy_from_user(&tm, uarg, sizeof(tm)))
35932 return -EFAULT;
35933
35934 + gr_log_timechange();
35935 +
35936 return rtc_set_time(rtc, &tm);
35937
35938 case RTC_PIE_ON:
35939 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35940 index 3fcf627..f334910 100644
35941 --- a/drivers/scsi/aacraid/aacraid.h
35942 +++ b/drivers/scsi/aacraid/aacraid.h
35943 @@ -492,7 +492,7 @@ struct adapter_ops
35944 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35945 /* Administrative operations */
35946 int (*adapter_comm)(struct aac_dev * dev, int comm);
35947 -};
35948 +} __no_const;
35949
35950 /*
35951 * Define which interrupt handler needs to be installed
35952 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35953 index 0d279c44..3d25a97 100644
35954 --- a/drivers/scsi/aacraid/linit.c
35955 +++ b/drivers/scsi/aacraid/linit.c
35956 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35957 #elif defined(__devinitconst)
35958 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35959 #else
35960 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35961 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35962 #endif
35963 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35964 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35965 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35966 index ff80552..1c4120c 100644
35967 --- a/drivers/scsi/aic94xx/aic94xx_init.c
35968 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
35969 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35970 .lldd_ata_set_dmamode = asd_set_dmamode,
35971 };
35972
35973 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35974 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35975 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35976 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35977 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35978 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35979 index 4ad7e36..d004679 100644
35980 --- a/drivers/scsi/bfa/bfa.h
35981 +++ b/drivers/scsi/bfa/bfa.h
35982 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
35983 u32 *end);
35984 int cpe_vec_q0;
35985 int rme_vec_q0;
35986 -};
35987 +} __no_const;
35988 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35989
35990 struct bfa_faa_cbfn_s {
35991 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35992 index f0f80e2..8ec946b 100644
35993 --- a/drivers/scsi/bfa/bfa_fcpim.c
35994 +++ b/drivers/scsi/bfa/bfa_fcpim.c
35995 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
35996
35997 bfa_iotag_attach(fcp);
35998
35999 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36000 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36001 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36002 (fcp->num_itns * sizeof(struct bfa_itn_s));
36003 memset(fcp->itn_arr, 0,
36004 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36005 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36006 {
36007 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36008 - struct bfa_itn_s *itn;
36009 + bfa_itn_s_no_const *itn;
36010
36011 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36012 itn->isr = isr;
36013 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36014 index 36f26da..38a34a8 100644
36015 --- a/drivers/scsi/bfa/bfa_fcpim.h
36016 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36017 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36018 struct bfa_itn_s {
36019 bfa_isr_func_t isr;
36020 };
36021 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36022
36023 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36024 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36025 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36026 struct list_head iotag_tio_free_q; /* free IO resources */
36027 struct list_head iotag_unused_q; /* unused IO resources*/
36028 struct bfa_iotag_s *iotag_arr;
36029 - struct bfa_itn_s *itn_arr;
36030 + bfa_itn_s_no_const *itn_arr;
36031 int num_ioim_reqs;
36032 int num_fwtio_reqs;
36033 int num_itns;
36034 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36035 index 1a99d4b..e85d64b 100644
36036 --- a/drivers/scsi/bfa/bfa_ioc.h
36037 +++ b/drivers/scsi/bfa/bfa_ioc.h
36038 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36039 bfa_ioc_disable_cbfn_t disable_cbfn;
36040 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36041 bfa_ioc_reset_cbfn_t reset_cbfn;
36042 -};
36043 +} __no_const;
36044
36045 /*
36046 * IOC event notification mechanism.
36047 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36048 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36049 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36050 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36051 -};
36052 +} __no_const;
36053
36054 /*
36055 * Queue element to wait for room in request queue. FIFO order is
36056 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36057 index a3a056a..b9bbc2f 100644
36058 --- a/drivers/scsi/hosts.c
36059 +++ b/drivers/scsi/hosts.c
36060 @@ -42,7 +42,7 @@
36061 #include "scsi_logging.h"
36062
36063
36064 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36065 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36066
36067
36068 static void scsi_host_cls_release(struct device *dev)
36069 @@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36070 * subtract one because we increment first then return, but we need to
36071 * know what the next host number was before increment
36072 */
36073 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36074 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36075 shost->dma_channel = 0xff;
36076
36077 /* These three are default values which can be overridden */
36078 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36079 index 500e20d..ebd3059 100644
36080 --- a/drivers/scsi/hpsa.c
36081 +++ b/drivers/scsi/hpsa.c
36082 @@ -521,7 +521,7 @@ static inline u32 next_command(struct ctlr_info *h)
36083 u32 a;
36084
36085 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36086 - return h->access.command_completed(h);
36087 + return h->access->command_completed(h);
36088
36089 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36090 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36091 @@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h)
36092 while (!list_empty(&h->reqQ)) {
36093 c = list_entry(h->reqQ.next, struct CommandList, list);
36094 /* can't do anything if fifo is full */
36095 - if ((h->access.fifo_full(h))) {
36096 + if ((h->access->fifo_full(h))) {
36097 dev_warn(&h->pdev->dev, "fifo full\n");
36098 break;
36099 }
36100 @@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h)
36101 h->Qdepth--;
36102
36103 /* Tell the controller execute command */
36104 - h->access.submit_command(h, c);
36105 + h->access->submit_command(h, c);
36106
36107 /* Put job onto the completed Q */
36108 addQ(&h->cmpQ, c);
36109 @@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h)
36110
36111 static inline unsigned long get_next_completion(struct ctlr_info *h)
36112 {
36113 - return h->access.command_completed(h);
36114 + return h->access->command_completed(h);
36115 }
36116
36117 static inline bool interrupt_pending(struct ctlr_info *h)
36118 {
36119 - return h->access.intr_pending(h);
36120 + return h->access->intr_pending(h);
36121 }
36122
36123 static inline long interrupt_not_for_us(struct ctlr_info *h)
36124 {
36125 - return (h->access.intr_pending(h) == 0) ||
36126 + return (h->access->intr_pending(h) == 0) ||
36127 (h->interrupts_enabled == 0);
36128 }
36129
36130 @@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36131 if (prod_index < 0)
36132 return -ENODEV;
36133 h->product_name = products[prod_index].product_name;
36134 - h->access = *(products[prod_index].access);
36135 + h->access = products[prod_index].access;
36136
36137 if (hpsa_board_disabled(h->pdev)) {
36138 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36139 @@ -4175,7 +4175,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36140
36141 assert_spin_locked(&lockup_detector_lock);
36142 remove_ctlr_from_lockup_detector_list(h);
36143 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36144 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36145 spin_lock_irqsave(&h->lock, flags);
36146 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36147 spin_unlock_irqrestore(&h->lock, flags);
36148 @@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
36149 }
36150
36151 /* make sure the board interrupts are off */
36152 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36153 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36154
36155 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36156 goto clean2;
36157 @@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
36158 * fake ones to scoop up any residual completions.
36159 */
36160 spin_lock_irqsave(&h->lock, flags);
36161 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36162 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36163 spin_unlock_irqrestore(&h->lock, flags);
36164 free_irq(h->intr[h->intr_mode], h);
36165 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36166 @@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
36167 dev_info(&h->pdev->dev, "Board READY.\n");
36168 dev_info(&h->pdev->dev,
36169 "Waiting for stale completions to drain.\n");
36170 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36171 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36172 msleep(10000);
36173 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36174 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36175
36176 rc = controller_reset_failed(h->cfgtable);
36177 if (rc)
36178 @@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
36179 }
36180
36181 /* Turn the interrupts on so we can service requests */
36182 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36183 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36184
36185 hpsa_hba_inquiry(h);
36186 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36187 @@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36188 * To write all data in the battery backed cache to disks
36189 */
36190 hpsa_flush_cache(h);
36191 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36192 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36193 free_irq(h->intr[h->intr_mode], h);
36194 #ifdef CONFIG_PCI_MSI
36195 if (h->msix_vector)
36196 @@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36197 return;
36198 }
36199 /* Change the access methods to the performant access methods */
36200 - h->access = SA5_performant_access;
36201 + h->access = &SA5_performant_access;
36202 h->transMethod = CFGTBL_Trans_Performant;
36203 }
36204
36205 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36206 index 7b28d54..952f23a 100644
36207 --- a/drivers/scsi/hpsa.h
36208 +++ b/drivers/scsi/hpsa.h
36209 @@ -72,7 +72,7 @@ struct ctlr_info {
36210 unsigned int msix_vector;
36211 unsigned int msi_vector;
36212 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36213 - struct access_method access;
36214 + struct access_method *access;
36215
36216 /* queue and queue Info */
36217 struct list_head reqQ;
36218 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36219 index f2df059..a3a9930 100644
36220 --- a/drivers/scsi/ips.h
36221 +++ b/drivers/scsi/ips.h
36222 @@ -1027,7 +1027,7 @@ typedef struct {
36223 int (*intr)(struct ips_ha *);
36224 void (*enableint)(struct ips_ha *);
36225 uint32_t (*statupd)(struct ips_ha *);
36226 -} ips_hw_func_t;
36227 +} __no_const ips_hw_func_t;
36228
36229 typedef struct ips_ha {
36230 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36231 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36232 index aceffad..c35c08d 100644
36233 --- a/drivers/scsi/libfc/fc_exch.c
36234 +++ b/drivers/scsi/libfc/fc_exch.c
36235 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36236 * all together if not used XXX
36237 */
36238 struct {
36239 - atomic_t no_free_exch;
36240 - atomic_t no_free_exch_xid;
36241 - atomic_t xid_not_found;
36242 - atomic_t xid_busy;
36243 - atomic_t seq_not_found;
36244 - atomic_t non_bls_resp;
36245 + atomic_unchecked_t no_free_exch;
36246 + atomic_unchecked_t no_free_exch_xid;
36247 + atomic_unchecked_t xid_not_found;
36248 + atomic_unchecked_t xid_busy;
36249 + atomic_unchecked_t seq_not_found;
36250 + atomic_unchecked_t non_bls_resp;
36251 } stats;
36252 };
36253
36254 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36255 /* allocate memory for exchange */
36256 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36257 if (!ep) {
36258 - atomic_inc(&mp->stats.no_free_exch);
36259 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36260 goto out;
36261 }
36262 memset(ep, 0, sizeof(*ep));
36263 @@ -780,7 +780,7 @@ out:
36264 return ep;
36265 err:
36266 spin_unlock_bh(&pool->lock);
36267 - atomic_inc(&mp->stats.no_free_exch_xid);
36268 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36269 mempool_free(ep, mp->ep_pool);
36270 return NULL;
36271 }
36272 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36273 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36274 ep = fc_exch_find(mp, xid);
36275 if (!ep) {
36276 - atomic_inc(&mp->stats.xid_not_found);
36277 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36278 reject = FC_RJT_OX_ID;
36279 goto out;
36280 }
36281 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36282 ep = fc_exch_find(mp, xid);
36283 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36284 if (ep) {
36285 - atomic_inc(&mp->stats.xid_busy);
36286 + atomic_inc_unchecked(&mp->stats.xid_busy);
36287 reject = FC_RJT_RX_ID;
36288 goto rel;
36289 }
36290 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36291 }
36292 xid = ep->xid; /* get our XID */
36293 } else if (!ep) {
36294 - atomic_inc(&mp->stats.xid_not_found);
36295 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36296 reject = FC_RJT_RX_ID; /* XID not found */
36297 goto out;
36298 }
36299 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36300 } else {
36301 sp = &ep->seq;
36302 if (sp->id != fh->fh_seq_id) {
36303 - atomic_inc(&mp->stats.seq_not_found);
36304 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36305 if (f_ctl & FC_FC_END_SEQ) {
36306 /*
36307 * Update sequence_id based on incoming last
36308 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36309
36310 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36311 if (!ep) {
36312 - atomic_inc(&mp->stats.xid_not_found);
36313 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36314 goto out;
36315 }
36316 if (ep->esb_stat & ESB_ST_COMPLETE) {
36317 - atomic_inc(&mp->stats.xid_not_found);
36318 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36319 goto rel;
36320 }
36321 if (ep->rxid == FC_XID_UNKNOWN)
36322 ep->rxid = ntohs(fh->fh_rx_id);
36323 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36324 - atomic_inc(&mp->stats.xid_not_found);
36325 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36326 goto rel;
36327 }
36328 if (ep->did != ntoh24(fh->fh_s_id) &&
36329 ep->did != FC_FID_FLOGI) {
36330 - atomic_inc(&mp->stats.xid_not_found);
36331 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36332 goto rel;
36333 }
36334 sof = fr_sof(fp);
36335 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36336 sp->ssb_stat |= SSB_ST_RESP;
36337 sp->id = fh->fh_seq_id;
36338 } else if (sp->id != fh->fh_seq_id) {
36339 - atomic_inc(&mp->stats.seq_not_found);
36340 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36341 goto rel;
36342 }
36343
36344 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36345 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36346
36347 if (!sp)
36348 - atomic_inc(&mp->stats.xid_not_found);
36349 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36350 else
36351 - atomic_inc(&mp->stats.non_bls_resp);
36352 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36353
36354 fc_frame_free(fp);
36355 }
36356 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36357 index 441d88a..689ad71 100644
36358 --- a/drivers/scsi/libsas/sas_ata.c
36359 +++ b/drivers/scsi/libsas/sas_ata.c
36360 @@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36361 .postreset = ata_std_postreset,
36362 .error_handler = ata_std_error_handler,
36363 .post_internal_cmd = sas_ata_post_internal,
36364 - .qc_defer = ata_std_qc_defer,
36365 + .qc_defer = ata_std_qc_defer,
36366 .qc_prep = ata_noop_qc_prep,
36367 .qc_issue = sas_ata_qc_issue,
36368 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36369 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36370 index 3a1ffdd..8eb7c71 100644
36371 --- a/drivers/scsi/lpfc/lpfc.h
36372 +++ b/drivers/scsi/lpfc/lpfc.h
36373 @@ -413,7 +413,7 @@ struct lpfc_vport {
36374 struct dentry *debug_nodelist;
36375 struct dentry *vport_debugfs_root;
36376 struct lpfc_debugfs_trc *disc_trc;
36377 - atomic_t disc_trc_cnt;
36378 + atomic_unchecked_t disc_trc_cnt;
36379 #endif
36380 uint8_t stat_data_enabled;
36381 uint8_t stat_data_blocked;
36382 @@ -826,8 +826,8 @@ struct lpfc_hba {
36383 struct timer_list fabric_block_timer;
36384 unsigned long bit_flags;
36385 #define FABRIC_COMANDS_BLOCKED 0
36386 - atomic_t num_rsrc_err;
36387 - atomic_t num_cmd_success;
36388 + atomic_unchecked_t num_rsrc_err;
36389 + atomic_unchecked_t num_cmd_success;
36390 unsigned long last_rsrc_error_time;
36391 unsigned long last_ramp_down_time;
36392 unsigned long last_ramp_up_time;
36393 @@ -863,7 +863,7 @@ struct lpfc_hba {
36394
36395 struct dentry *debug_slow_ring_trc;
36396 struct lpfc_debugfs_trc *slow_ring_trc;
36397 - atomic_t slow_ring_trc_cnt;
36398 + atomic_unchecked_t slow_ring_trc_cnt;
36399 /* iDiag debugfs sub-directory */
36400 struct dentry *idiag_root;
36401 struct dentry *idiag_pci_cfg;
36402 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36403 index af04b0d..8f1a97e 100644
36404 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36405 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36406 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36407
36408 #include <linux/debugfs.h>
36409
36410 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36411 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36412 static unsigned long lpfc_debugfs_start_time = 0L;
36413
36414 /* iDiag */
36415 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36416 lpfc_debugfs_enable = 0;
36417
36418 len = 0;
36419 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36420 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36421 (lpfc_debugfs_max_disc_trc - 1);
36422 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36423 dtp = vport->disc_trc + i;
36424 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36425 lpfc_debugfs_enable = 0;
36426
36427 len = 0;
36428 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36429 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36430 (lpfc_debugfs_max_slow_ring_trc - 1);
36431 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36432 dtp = phba->slow_ring_trc + i;
36433 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36434 !vport || !vport->disc_trc)
36435 return;
36436
36437 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36438 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36439 (lpfc_debugfs_max_disc_trc - 1);
36440 dtp = vport->disc_trc + index;
36441 dtp->fmt = fmt;
36442 dtp->data1 = data1;
36443 dtp->data2 = data2;
36444 dtp->data3 = data3;
36445 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36446 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36447 dtp->jif = jiffies;
36448 #endif
36449 return;
36450 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36451 !phba || !phba->slow_ring_trc)
36452 return;
36453
36454 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36455 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36456 (lpfc_debugfs_max_slow_ring_trc - 1);
36457 dtp = phba->slow_ring_trc + index;
36458 dtp->fmt = fmt;
36459 dtp->data1 = data1;
36460 dtp->data2 = data2;
36461 dtp->data3 = data3;
36462 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36463 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36464 dtp->jif = jiffies;
36465 #endif
36466 return;
36467 @@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36468 "slow_ring buffer\n");
36469 goto debug_failed;
36470 }
36471 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36472 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36473 memset(phba->slow_ring_trc, 0,
36474 (sizeof(struct lpfc_debugfs_trc) *
36475 lpfc_debugfs_max_slow_ring_trc));
36476 @@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36477 "buffer\n");
36478 goto debug_failed;
36479 }
36480 - atomic_set(&vport->disc_trc_cnt, 0);
36481 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36482
36483 snprintf(name, sizeof(name), "discovery_trace");
36484 vport->debug_disc_trc =
36485 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36486 index 9598fdc..7e9f3d9 100644
36487 --- a/drivers/scsi/lpfc/lpfc_init.c
36488 +++ b/drivers/scsi/lpfc/lpfc_init.c
36489 @@ -10266,8 +10266,10 @@ lpfc_init(void)
36490 "misc_register returned with status %d", error);
36491
36492 if (lpfc_enable_npiv) {
36493 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36494 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36495 + pax_open_kernel();
36496 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36497 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36498 + pax_close_kernel();
36499 }
36500 lpfc_transport_template =
36501 fc_attach_transport(&lpfc_transport_functions);
36502 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36503 index 88f3a83..686d3fa 100644
36504 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36505 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36506 @@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36507 uint32_t evt_posted;
36508
36509 spin_lock_irqsave(&phba->hbalock, flags);
36510 - atomic_inc(&phba->num_rsrc_err);
36511 + atomic_inc_unchecked(&phba->num_rsrc_err);
36512 phba->last_rsrc_error_time = jiffies;
36513
36514 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36515 @@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36516 unsigned long flags;
36517 struct lpfc_hba *phba = vport->phba;
36518 uint32_t evt_posted;
36519 - atomic_inc(&phba->num_cmd_success);
36520 + atomic_inc_unchecked(&phba->num_cmd_success);
36521
36522 if (vport->cfg_lun_queue_depth <= queue_depth)
36523 return;
36524 @@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36525 unsigned long num_rsrc_err, num_cmd_success;
36526 int i;
36527
36528 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36529 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36530 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36531 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36532
36533 vports = lpfc_create_vport_work_array(phba);
36534 if (vports != NULL)
36535 @@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36536 }
36537 }
36538 lpfc_destroy_vport_work_array(phba, vports);
36539 - atomic_set(&phba->num_rsrc_err, 0);
36540 - atomic_set(&phba->num_cmd_success, 0);
36541 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36542 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36543 }
36544
36545 /**
36546 @@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36547 }
36548 }
36549 lpfc_destroy_vport_work_array(phba, vports);
36550 - atomic_set(&phba->num_rsrc_err, 0);
36551 - atomic_set(&phba->num_cmd_success, 0);
36552 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36553 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36554 }
36555
36556 /**
36557 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36558 index ea8a0b4..812a124 100644
36559 --- a/drivers/scsi/pmcraid.c
36560 +++ b/drivers/scsi/pmcraid.c
36561 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36562 res->scsi_dev = scsi_dev;
36563 scsi_dev->hostdata = res;
36564 res->change_detected = 0;
36565 - atomic_set(&res->read_failures, 0);
36566 - atomic_set(&res->write_failures, 0);
36567 + atomic_set_unchecked(&res->read_failures, 0);
36568 + atomic_set_unchecked(&res->write_failures, 0);
36569 rc = 0;
36570 }
36571 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36572 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36573
36574 /* If this was a SCSI read/write command keep count of errors */
36575 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36576 - atomic_inc(&res->read_failures);
36577 + atomic_inc_unchecked(&res->read_failures);
36578 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36579 - atomic_inc(&res->write_failures);
36580 + atomic_inc_unchecked(&res->write_failures);
36581
36582 if (!RES_IS_GSCSI(res->cfg_entry) &&
36583 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36584 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36585 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36586 * hrrq_id assigned here in queuecommand
36587 */
36588 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36589 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36590 pinstance->num_hrrq;
36591 cmd->cmd_done = pmcraid_io_done;
36592
36593 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36594 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36595 * hrrq_id assigned here in queuecommand
36596 */
36597 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36598 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36599 pinstance->num_hrrq;
36600
36601 if (request_size) {
36602 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36603
36604 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36605 /* add resources only after host is added into system */
36606 - if (!atomic_read(&pinstance->expose_resources))
36607 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36608 return;
36609
36610 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36611 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36612 init_waitqueue_head(&pinstance->reset_wait_q);
36613
36614 atomic_set(&pinstance->outstanding_cmds, 0);
36615 - atomic_set(&pinstance->last_message_id, 0);
36616 - atomic_set(&pinstance->expose_resources, 0);
36617 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36618 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36619
36620 INIT_LIST_HEAD(&pinstance->free_res_q);
36621 INIT_LIST_HEAD(&pinstance->used_res_q);
36622 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36623 /* Schedule worker thread to handle CCN and take care of adding and
36624 * removing devices to OS
36625 */
36626 - atomic_set(&pinstance->expose_resources, 1);
36627 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36628 schedule_work(&pinstance->worker_q);
36629 return rc;
36630
36631 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36632 index e1d150f..6c6df44 100644
36633 --- a/drivers/scsi/pmcraid.h
36634 +++ b/drivers/scsi/pmcraid.h
36635 @@ -748,7 +748,7 @@ struct pmcraid_instance {
36636 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36637
36638 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36639 - atomic_t last_message_id;
36640 + atomic_unchecked_t last_message_id;
36641
36642 /* configuration table */
36643 struct pmcraid_config_table *cfg_table;
36644 @@ -777,7 +777,7 @@ struct pmcraid_instance {
36645 atomic_t outstanding_cmds;
36646
36647 /* should add/delete resources to mid-layer now ?*/
36648 - atomic_t expose_resources;
36649 + atomic_unchecked_t expose_resources;
36650
36651
36652
36653 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36654 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36655 };
36656 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36657 - atomic_t read_failures; /* count of failed READ commands */
36658 - atomic_t write_failures; /* count of failed WRITE commands */
36659 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36660 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36661
36662 /* To indicate add/delete/modify during CCN */
36663 u8 change_detected;
36664 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36665 index a244303..6015eb7 100644
36666 --- a/drivers/scsi/qla2xxx/qla_def.h
36667 +++ b/drivers/scsi/qla2xxx/qla_def.h
36668 @@ -2264,7 +2264,7 @@ struct isp_operations {
36669 int (*start_scsi) (srb_t *);
36670 int (*abort_isp) (struct scsi_qla_host *);
36671 int (*iospace_config)(struct qla_hw_data*);
36672 -};
36673 +} __no_const;
36674
36675 /* MSI-X Support *************************************************************/
36676
36677 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36678 index 7f2492e..5113877 100644
36679 --- a/drivers/scsi/qla4xxx/ql4_def.h
36680 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36681 @@ -268,7 +268,7 @@ struct ddb_entry {
36682 * (4000 only) */
36683 atomic_t relogin_timer; /* Max Time to wait for
36684 * relogin to complete */
36685 - atomic_t relogin_retry_count; /* Num of times relogin has been
36686 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36687 * retried */
36688 uint32_t default_time2wait; /* Default Min time between
36689 * relogins (+aens) */
36690 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36691 index ee47820..a83b1f4 100644
36692 --- a/drivers/scsi/qla4xxx/ql4_os.c
36693 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36694 @@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36695 */
36696 if (!iscsi_is_session_online(cls_sess)) {
36697 /* Reset retry relogin timer */
36698 - atomic_inc(&ddb_entry->relogin_retry_count);
36699 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36700 DEBUG2(ql4_printk(KERN_INFO, ha,
36701 "%s: index[%d] relogin timed out-retrying"
36702 " relogin (%d), retry (%d)\n", __func__,
36703 ddb_entry->fw_ddb_index,
36704 - atomic_read(&ddb_entry->relogin_retry_count),
36705 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36706 ddb_entry->default_time2wait + 4));
36707 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36708 atomic_set(&ddb_entry->retry_relogin_timer,
36709 @@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36710
36711 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36712 atomic_set(&ddb_entry->relogin_timer, 0);
36713 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36714 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36715 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36716 ddb_entry->default_relogin_timeout =
36717 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
36718 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36719 index 07322ec..91ccc23 100644
36720 --- a/drivers/scsi/scsi.c
36721 +++ b/drivers/scsi/scsi.c
36722 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36723 unsigned long timeout;
36724 int rtn = 0;
36725
36726 - atomic_inc(&cmd->device->iorequest_cnt);
36727 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36728
36729 /* check if the device is still usable */
36730 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36731 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36732 index 4037fd5..a19fcc7 100644
36733 --- a/drivers/scsi/scsi_lib.c
36734 +++ b/drivers/scsi/scsi_lib.c
36735 @@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36736 shost = sdev->host;
36737 scsi_init_cmd_errh(cmd);
36738 cmd->result = DID_NO_CONNECT << 16;
36739 - atomic_inc(&cmd->device->iorequest_cnt);
36740 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36741
36742 /*
36743 * SCSI request completion path will do scsi_device_unbusy(),
36744 @@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
36745
36746 INIT_LIST_HEAD(&cmd->eh_entry);
36747
36748 - atomic_inc(&cmd->device->iodone_cnt);
36749 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36750 if (cmd->result)
36751 - atomic_inc(&cmd->device->ioerr_cnt);
36752 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36753
36754 disposition = scsi_decide_disposition(cmd);
36755 if (disposition != SUCCESS &&
36756 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36757 index 04c2a27..9d8bd66 100644
36758 --- a/drivers/scsi/scsi_sysfs.c
36759 +++ b/drivers/scsi/scsi_sysfs.c
36760 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36761 char *buf) \
36762 { \
36763 struct scsi_device *sdev = to_scsi_device(dev); \
36764 - unsigned long long count = atomic_read(&sdev->field); \
36765 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36766 return snprintf(buf, 20, "0x%llx\n", count); \
36767 } \
36768 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36769 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36770 index 84a1fdf..693b0d6 100644
36771 --- a/drivers/scsi/scsi_tgt_lib.c
36772 +++ b/drivers/scsi/scsi_tgt_lib.c
36773 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36774 int err;
36775
36776 dprintk("%lx %u\n", uaddr, len);
36777 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36778 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36779 if (err) {
36780 /*
36781 * TODO: need to fixup sg_tablesize, max_segment_size,
36782 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36783 index 80fbe2a..efa223b 100644
36784 --- a/drivers/scsi/scsi_transport_fc.c
36785 +++ b/drivers/scsi/scsi_transport_fc.c
36786 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36787 * Netlink Infrastructure
36788 */
36789
36790 -static atomic_t fc_event_seq;
36791 +static atomic_unchecked_t fc_event_seq;
36792
36793 /**
36794 * fc_get_event_number - Obtain the next sequential FC event number
36795 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
36796 u32
36797 fc_get_event_number(void)
36798 {
36799 - return atomic_add_return(1, &fc_event_seq);
36800 + return atomic_add_return_unchecked(1, &fc_event_seq);
36801 }
36802 EXPORT_SYMBOL(fc_get_event_number);
36803
36804 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
36805 {
36806 int error;
36807
36808 - atomic_set(&fc_event_seq, 0);
36809 + atomic_set_unchecked(&fc_event_seq, 0);
36810
36811 error = transport_class_register(&fc_host_class);
36812 if (error)
36813 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36814 char *cp;
36815
36816 *val = simple_strtoul(buf, &cp, 0);
36817 - if ((*cp && (*cp != '\n')) || (*val < 0))
36818 + if (*cp && (*cp != '\n'))
36819 return -EINVAL;
36820 /*
36821 * Check for overflow; dev_loss_tmo is u32
36822 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36823 index 1cf640e..78e9014 100644
36824 --- a/drivers/scsi/scsi_transport_iscsi.c
36825 +++ b/drivers/scsi/scsi_transport_iscsi.c
36826 @@ -79,7 +79,7 @@ struct iscsi_internal {
36827 struct transport_container session_cont;
36828 };
36829
36830 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36831 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36832 static struct workqueue_struct *iscsi_eh_timer_workq;
36833
36834 static DEFINE_IDA(iscsi_sess_ida);
36835 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36836 int err;
36837
36838 ihost = shost->shost_data;
36839 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36840 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36841
36842 if (target_id == ISCSI_MAX_TARGET) {
36843 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36844 @@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
36845 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36846 ISCSI_TRANSPORT_VERSION);
36847
36848 - atomic_set(&iscsi_session_nr, 0);
36849 + atomic_set_unchecked(&iscsi_session_nr, 0);
36850
36851 err = class_register(&iscsi_transport_class);
36852 if (err)
36853 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36854 index 21a045e..ec89e03 100644
36855 --- a/drivers/scsi/scsi_transport_srp.c
36856 +++ b/drivers/scsi/scsi_transport_srp.c
36857 @@ -33,7 +33,7 @@
36858 #include "scsi_transport_srp_internal.h"
36859
36860 struct srp_host_attrs {
36861 - atomic_t next_port_id;
36862 + atomic_unchecked_t next_port_id;
36863 };
36864 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36865
36866 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36867 struct Scsi_Host *shost = dev_to_shost(dev);
36868 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36869
36870 - atomic_set(&srp_host->next_port_id, 0);
36871 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36872 return 0;
36873 }
36874
36875 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36876 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36877 rport->roles = ids->roles;
36878
36879 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36880 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36881 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36882
36883 transport_setup_device(&rport->dev);
36884 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36885 index eacd46b..e3f4d62 100644
36886 --- a/drivers/scsi/sg.c
36887 +++ b/drivers/scsi/sg.c
36888 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36889 sdp->disk->disk_name,
36890 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36891 NULL,
36892 - (char *)arg);
36893 + (char __user *)arg);
36894 case BLKTRACESTART:
36895 return blk_trace_startstop(sdp->device->request_queue, 1);
36896 case BLKTRACESTOP:
36897 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
36898 const struct file_operations * fops;
36899 };
36900
36901 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36902 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36903 {"allow_dio", &adio_fops},
36904 {"debug", &debug_fops},
36905 {"def_reserved_size", &dressz_fops},
36906 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
36907 if (!sg_proc_sgp)
36908 return 1;
36909 for (k = 0; k < num_leaves; ++k) {
36910 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36911 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36912 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
36913 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
36914 }
36915 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36916 index 3d8f662..070f1a5 100644
36917 --- a/drivers/spi/spi.c
36918 +++ b/drivers/spi/spi.c
36919 @@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
36920 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36921
36922 /* portable code must never pass more than 32 bytes */
36923 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36924 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36925
36926 static u8 *buf;
36927
36928 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
36929 index d91751f..a3a9e36 100644
36930 --- a/drivers/staging/octeon/ethernet-rx.c
36931 +++ b/drivers/staging/octeon/ethernet-rx.c
36932 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36933 /* Increment RX stats for virtual ports */
36934 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
36935 #ifdef CONFIG_64BIT
36936 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
36937 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
36938 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
36939 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
36940 #else
36941 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
36942 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
36943 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
36944 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
36945 #endif
36946 }
36947 netif_receive_skb(skb);
36948 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36949 dev->name);
36950 */
36951 #ifdef CONFIG_64BIT
36952 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
36953 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36954 #else
36955 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
36956 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
36957 #endif
36958 dev_kfree_skb_irq(skb);
36959 }
36960 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
36961 index 60cba81..71eb239 100644
36962 --- a/drivers/staging/octeon/ethernet.c
36963 +++ b/drivers/staging/octeon/ethernet.c
36964 @@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
36965 * since the RX tasklet also increments it.
36966 */
36967 #ifdef CONFIG_64BIT
36968 - atomic64_add(rx_status.dropped_packets,
36969 - (atomic64_t *)&priv->stats.rx_dropped);
36970 + atomic64_add_unchecked(rx_status.dropped_packets,
36971 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36972 #else
36973 - atomic_add(rx_status.dropped_packets,
36974 - (atomic_t *)&priv->stats.rx_dropped);
36975 + atomic_add_unchecked(rx_status.dropped_packets,
36976 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
36977 #endif
36978 }
36979
36980 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
36981 index d3d8727..f9327bb8 100644
36982 --- a/drivers/staging/rtl8712/rtl871x_io.h
36983 +++ b/drivers/staging/rtl8712/rtl871x_io.h
36984 @@ -108,7 +108,7 @@ struct _io_ops {
36985 u8 *pmem);
36986 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
36987 u8 *pmem);
36988 -};
36989 +} __no_const;
36990
36991 struct io_req {
36992 struct list_head list;
36993 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
36994 index c7b5e8b..783d6cb 100644
36995 --- a/drivers/staging/sbe-2t3e3/netdev.c
36996 +++ b/drivers/staging/sbe-2t3e3/netdev.c
36997 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36998 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
36999
37000 if (rlen)
37001 - if (copy_to_user(data, &resp, rlen))
37002 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37003 return -EFAULT;
37004
37005 return 0;
37006 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37007 index 42cdafe..2769103 100644
37008 --- a/drivers/staging/speakup/speakup_soft.c
37009 +++ b/drivers/staging/speakup/speakup_soft.c
37010 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37011 break;
37012 } else if (!initialized) {
37013 if (*init) {
37014 - ch = *init;
37015 init++;
37016 } else {
37017 initialized = 1;
37018 }
37019 + ch = *init;
37020 } else {
37021 ch = synth_buffer_getc();
37022 }
37023 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37024 index c7b888c..c94be93 100644
37025 --- a/drivers/staging/usbip/usbip_common.h
37026 +++ b/drivers/staging/usbip/usbip_common.h
37027 @@ -289,7 +289,7 @@ struct usbip_device {
37028 void (*shutdown)(struct usbip_device *);
37029 void (*reset)(struct usbip_device *);
37030 void (*unusable)(struct usbip_device *);
37031 - } eh_ops;
37032 + } __no_const eh_ops;
37033 };
37034
37035 /* usbip_common.c */
37036 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37037 index 88b3298..3783eee 100644
37038 --- a/drivers/staging/usbip/vhci.h
37039 +++ b/drivers/staging/usbip/vhci.h
37040 @@ -88,7 +88,7 @@ struct vhci_hcd {
37041 unsigned resuming:1;
37042 unsigned long re_timeout;
37043
37044 - atomic_t seqnum;
37045 + atomic_unchecked_t seqnum;
37046
37047 /*
37048 * NOTE:
37049 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37050 index dca9bf1..80735c9 100644
37051 --- a/drivers/staging/usbip/vhci_hcd.c
37052 +++ b/drivers/staging/usbip/vhci_hcd.c
37053 @@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37054 return;
37055 }
37056
37057 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37058 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37059 if (priv->seqnum == 0xffff)
37060 dev_info(&urb->dev->dev, "seqnum max\n");
37061
37062 @@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37063 return -ENOMEM;
37064 }
37065
37066 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37067 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37068 if (unlink->seqnum == 0xffff)
37069 pr_info("seqnum max\n");
37070
37071 @@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37072 vdev->rhport = rhport;
37073 }
37074
37075 - atomic_set(&vhci->seqnum, 0);
37076 + atomic_set_unchecked(&vhci->seqnum, 0);
37077 spin_lock_init(&vhci->lock);
37078
37079 hcd->power_budget = 0; /* no limit */
37080 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37081 index f5fba732..210a16c 100644
37082 --- a/drivers/staging/usbip/vhci_rx.c
37083 +++ b/drivers/staging/usbip/vhci_rx.c
37084 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37085 if (!urb) {
37086 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37087 pr_info("max seqnum %d\n",
37088 - atomic_read(&the_controller->seqnum));
37089 + atomic_read_unchecked(&the_controller->seqnum));
37090 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37091 return;
37092 }
37093 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37094 index 7735027..30eed13 100644
37095 --- a/drivers/staging/vt6655/hostap.c
37096 +++ b/drivers/staging/vt6655/hostap.c
37097 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37098 *
37099 */
37100
37101 +static net_device_ops_no_const apdev_netdev_ops;
37102 +
37103 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37104 {
37105 PSDevice apdev_priv;
37106 struct net_device *dev = pDevice->dev;
37107 int ret;
37108 - const struct net_device_ops apdev_netdev_ops = {
37109 - .ndo_start_xmit = pDevice->tx_80211,
37110 - };
37111
37112 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37113
37114 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37115 *apdev_priv = *pDevice;
37116 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37117
37118 + /* only half broken now */
37119 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37120 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37121
37122 pDevice->apdev->type = ARPHRD_IEEE80211;
37123 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37124 index 51b5adf..098e320 100644
37125 --- a/drivers/staging/vt6656/hostap.c
37126 +++ b/drivers/staging/vt6656/hostap.c
37127 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37128 *
37129 */
37130
37131 +static net_device_ops_no_const apdev_netdev_ops;
37132 +
37133 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37134 {
37135 PSDevice apdev_priv;
37136 struct net_device *dev = pDevice->dev;
37137 int ret;
37138 - const struct net_device_ops apdev_netdev_ops = {
37139 - .ndo_start_xmit = pDevice->tx_80211,
37140 - };
37141
37142 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37143
37144 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37145 *apdev_priv = *pDevice;
37146 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37147
37148 + /* only half broken now */
37149 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37150 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37151
37152 pDevice->apdev->type = ARPHRD_IEEE80211;
37153 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37154 index 7843dfd..3db105f 100644
37155 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37156 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37157 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37158
37159 struct usbctlx_completor {
37160 int (*complete) (struct usbctlx_completor *);
37161 -};
37162 +} __no_const;
37163
37164 static int
37165 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37166 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37167 index 1ca66ea..76f1343 100644
37168 --- a/drivers/staging/zcache/tmem.c
37169 +++ b/drivers/staging/zcache/tmem.c
37170 @@ -39,7 +39,7 @@
37171 * A tmem host implementation must use this function to register callbacks
37172 * for memory allocation.
37173 */
37174 -static struct tmem_hostops tmem_hostops;
37175 +static tmem_hostops_no_const tmem_hostops;
37176
37177 static void tmem_objnode_tree_init(void);
37178
37179 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37180 * A tmem host implementation must use this function to register
37181 * callbacks for a page-accessible memory (PAM) implementation
37182 */
37183 -static struct tmem_pamops tmem_pamops;
37184 +static tmem_pamops_no_const tmem_pamops;
37185
37186 void tmem_register_pamops(struct tmem_pamops *m)
37187 {
37188 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37189 index 0d4aa82..f7832d4 100644
37190 --- a/drivers/staging/zcache/tmem.h
37191 +++ b/drivers/staging/zcache/tmem.h
37192 @@ -180,6 +180,7 @@ struct tmem_pamops {
37193 void (*new_obj)(struct tmem_obj *);
37194 int (*replace_in_obj)(void *, struct tmem_obj *);
37195 };
37196 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37197 extern void tmem_register_pamops(struct tmem_pamops *m);
37198
37199 /* memory allocation methods provided by the host implementation */
37200 @@ -189,6 +190,7 @@ struct tmem_hostops {
37201 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37202 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37203 };
37204 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37205 extern void tmem_register_hostops(struct tmem_hostops *m);
37206
37207 /* core tmem accessor functions */
37208 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37209 index f015839..b15dfc4 100644
37210 --- a/drivers/target/target_core_tmr.c
37211 +++ b/drivers/target/target_core_tmr.c
37212 @@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
37213 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37214 cmd->t_task_list_num,
37215 atomic_read(&cmd->t_task_cdbs_left),
37216 - atomic_read(&cmd->t_task_cdbs_sent),
37217 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37218 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37219 (cmd->transport_state & CMD_T_STOP) != 0,
37220 (cmd->transport_state & CMD_T_SENT) != 0);
37221 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37222 index 443704f..92d3517 100644
37223 --- a/drivers/target/target_core_transport.c
37224 +++ b/drivers/target/target_core_transport.c
37225 @@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_to_core_hba(
37226 spin_lock_init(&dev->se_port_lock);
37227 spin_lock_init(&dev->se_tmr_lock);
37228 spin_lock_init(&dev->qf_cmd_lock);
37229 - atomic_set(&dev->dev_ordered_id, 0);
37230 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37231
37232 se_dev_set_default_attribs(dev, dev_limits);
37233
37234 @@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37235 * Used to determine when ORDERED commands should go from
37236 * Dormant to Active status.
37237 */
37238 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37239 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37240 smp_mb__after_atomic_inc();
37241 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37242 cmd->se_ordered_id, cmd->sam_task_attr,
37243 @@ -1956,7 +1956,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
37244 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
37245 cmd->t_task_list_num,
37246 atomic_read(&cmd->t_task_cdbs_left),
37247 - atomic_read(&cmd->t_task_cdbs_sent),
37248 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37249 atomic_read(&cmd->t_task_cdbs_ex_left),
37250 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37251 (cmd->transport_state & CMD_T_STOP) != 0,
37252 @@ -2216,9 +2216,9 @@ check_depth:
37253 cmd = task->task_se_cmd;
37254 spin_lock_irqsave(&cmd->t_state_lock, flags);
37255 task->task_flags |= (TF_ACTIVE | TF_SENT);
37256 - atomic_inc(&cmd->t_task_cdbs_sent);
37257 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37258
37259 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37260 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37261 cmd->t_task_list_num)
37262 cmd->transport_state |= CMD_T_SENT;
37263
37264 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37265 index 3436436..772237b 100644
37266 --- a/drivers/tty/hvc/hvcs.c
37267 +++ b/drivers/tty/hvc/hvcs.c
37268 @@ -83,6 +83,7 @@
37269 #include <asm/hvcserver.h>
37270 #include <asm/uaccess.h>
37271 #include <asm/vio.h>
37272 +#include <asm/local.h>
37273
37274 /*
37275 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37276 @@ -270,7 +271,7 @@ struct hvcs_struct {
37277 unsigned int index;
37278
37279 struct tty_struct *tty;
37280 - int open_count;
37281 + local_t open_count;
37282
37283 /*
37284 * Used to tell the driver kernel_thread what operations need to take
37285 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37286
37287 spin_lock_irqsave(&hvcsd->lock, flags);
37288
37289 - if (hvcsd->open_count > 0) {
37290 + if (local_read(&hvcsd->open_count) > 0) {
37291 spin_unlock_irqrestore(&hvcsd->lock, flags);
37292 printk(KERN_INFO "HVCS: vterm state unchanged. "
37293 "The hvcs device node is still in use.\n");
37294 @@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37295 if ((retval = hvcs_partner_connect(hvcsd)))
37296 goto error_release;
37297
37298 - hvcsd->open_count = 1;
37299 + local_set(&hvcsd->open_count, 1);
37300 hvcsd->tty = tty;
37301 tty->driver_data = hvcsd;
37302
37303 @@ -1172,7 +1173,7 @@ fast_open:
37304
37305 spin_lock_irqsave(&hvcsd->lock, flags);
37306 kref_get(&hvcsd->kref);
37307 - hvcsd->open_count++;
37308 + local_inc(&hvcsd->open_count);
37309 hvcsd->todo_mask |= HVCS_SCHED_READ;
37310 spin_unlock_irqrestore(&hvcsd->lock, flags);
37311
37312 @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37313 hvcsd = tty->driver_data;
37314
37315 spin_lock_irqsave(&hvcsd->lock, flags);
37316 - if (--hvcsd->open_count == 0) {
37317 + if (local_dec_and_test(&hvcsd->open_count)) {
37318
37319 vio_disable_interrupts(hvcsd->vdev);
37320
37321 @@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37322 free_irq(irq, hvcsd);
37323 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37324 return;
37325 - } else if (hvcsd->open_count < 0) {
37326 + } else if (local_read(&hvcsd->open_count) < 0) {
37327 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37328 " is missmanaged.\n",
37329 - hvcsd->vdev->unit_address, hvcsd->open_count);
37330 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37331 }
37332
37333 spin_unlock_irqrestore(&hvcsd->lock, flags);
37334 @@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37335
37336 spin_lock_irqsave(&hvcsd->lock, flags);
37337 /* Preserve this so that we know how many kref refs to put */
37338 - temp_open_count = hvcsd->open_count;
37339 + temp_open_count = local_read(&hvcsd->open_count);
37340
37341 /*
37342 * Don't kref put inside the spinlock because the destruction
37343 @@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37344 hvcsd->tty->driver_data = NULL;
37345 hvcsd->tty = NULL;
37346
37347 - hvcsd->open_count = 0;
37348 + local_set(&hvcsd->open_count, 0);
37349
37350 /* This will drop any buffered data on the floor which is OK in a hangup
37351 * scenario. */
37352 @@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
37353 * the middle of a write operation? This is a crummy place to do this
37354 * but we want to keep it all in the spinlock.
37355 */
37356 - if (hvcsd->open_count <= 0) {
37357 + if (local_read(&hvcsd->open_count) <= 0) {
37358 spin_unlock_irqrestore(&hvcsd->lock, flags);
37359 return -ENODEV;
37360 }
37361 @@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37362 {
37363 struct hvcs_struct *hvcsd = tty->driver_data;
37364
37365 - if (!hvcsd || hvcsd->open_count <= 0)
37366 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37367 return 0;
37368
37369 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37370 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37371 index 4daf962..b4a2281 100644
37372 --- a/drivers/tty/ipwireless/tty.c
37373 +++ b/drivers/tty/ipwireless/tty.c
37374 @@ -29,6 +29,7 @@
37375 #include <linux/tty_driver.h>
37376 #include <linux/tty_flip.h>
37377 #include <linux/uaccess.h>
37378 +#include <asm/local.h>
37379
37380 #include "tty.h"
37381 #include "network.h"
37382 @@ -51,7 +52,7 @@ struct ipw_tty {
37383 int tty_type;
37384 struct ipw_network *network;
37385 struct tty_struct *linux_tty;
37386 - int open_count;
37387 + local_t open_count;
37388 unsigned int control_lines;
37389 struct mutex ipw_tty_mutex;
37390 int tx_bytes_queued;
37391 @@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37392 mutex_unlock(&tty->ipw_tty_mutex);
37393 return -ENODEV;
37394 }
37395 - if (tty->open_count == 0)
37396 + if (local_read(&tty->open_count) == 0)
37397 tty->tx_bytes_queued = 0;
37398
37399 - tty->open_count++;
37400 + local_inc(&tty->open_count);
37401
37402 tty->linux_tty = linux_tty;
37403 linux_tty->driver_data = tty;
37404 @@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37405
37406 static void do_ipw_close(struct ipw_tty *tty)
37407 {
37408 - tty->open_count--;
37409 -
37410 - if (tty->open_count == 0) {
37411 + if (local_dec_return(&tty->open_count) == 0) {
37412 struct tty_struct *linux_tty = tty->linux_tty;
37413
37414 if (linux_tty != NULL) {
37415 @@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37416 return;
37417
37418 mutex_lock(&tty->ipw_tty_mutex);
37419 - if (tty->open_count == 0) {
37420 + if (local_read(&tty->open_count) == 0) {
37421 mutex_unlock(&tty->ipw_tty_mutex);
37422 return;
37423 }
37424 @@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37425 return;
37426 }
37427
37428 - if (!tty->open_count) {
37429 + if (!local_read(&tty->open_count)) {
37430 mutex_unlock(&tty->ipw_tty_mutex);
37431 return;
37432 }
37433 @@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37434 return -ENODEV;
37435
37436 mutex_lock(&tty->ipw_tty_mutex);
37437 - if (!tty->open_count) {
37438 + if (!local_read(&tty->open_count)) {
37439 mutex_unlock(&tty->ipw_tty_mutex);
37440 return -EINVAL;
37441 }
37442 @@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37443 if (!tty)
37444 return -ENODEV;
37445
37446 - if (!tty->open_count)
37447 + if (!local_read(&tty->open_count))
37448 return -EINVAL;
37449
37450 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37451 @@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37452 if (!tty)
37453 return 0;
37454
37455 - if (!tty->open_count)
37456 + if (!local_read(&tty->open_count))
37457 return 0;
37458
37459 return tty->tx_bytes_queued;
37460 @@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37461 if (!tty)
37462 return -ENODEV;
37463
37464 - if (!tty->open_count)
37465 + if (!local_read(&tty->open_count))
37466 return -EINVAL;
37467
37468 return get_control_lines(tty);
37469 @@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37470 if (!tty)
37471 return -ENODEV;
37472
37473 - if (!tty->open_count)
37474 + if (!local_read(&tty->open_count))
37475 return -EINVAL;
37476
37477 return set_control_lines(tty, set, clear);
37478 @@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37479 if (!tty)
37480 return -ENODEV;
37481
37482 - if (!tty->open_count)
37483 + if (!local_read(&tty->open_count))
37484 return -EINVAL;
37485
37486 /* FIXME: Exactly how is the tty object locked here .. */
37487 @@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37488 against a parallel ioctl etc */
37489 mutex_lock(&ttyj->ipw_tty_mutex);
37490 }
37491 - while (ttyj->open_count)
37492 + while (local_read(&ttyj->open_count))
37493 do_ipw_close(ttyj);
37494 ipwireless_disassociate_network_ttys(network,
37495 ttyj->channel_idx);
37496 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37497 index c43b683..0a88f1c 100644
37498 --- a/drivers/tty/n_gsm.c
37499 +++ b/drivers/tty/n_gsm.c
37500 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37501 kref_init(&dlci->ref);
37502 mutex_init(&dlci->mutex);
37503 dlci->fifo = &dlci->_fifo;
37504 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37505 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37506 kfree(dlci);
37507 return NULL;
37508 }
37509 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37510 index 94b6eda..15f7cec 100644
37511 --- a/drivers/tty/n_tty.c
37512 +++ b/drivers/tty/n_tty.c
37513 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37514 {
37515 *ops = tty_ldisc_N_TTY;
37516 ops->owner = NULL;
37517 - ops->refcount = ops->flags = 0;
37518 + atomic_set(&ops->refcount, 0);
37519 + ops->flags = 0;
37520 }
37521 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37522 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37523 index eeae7fa..177a743 100644
37524 --- a/drivers/tty/pty.c
37525 +++ b/drivers/tty/pty.c
37526 @@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
37527 panic("Couldn't register Unix98 pts driver");
37528
37529 /* Now create the /dev/ptmx special device */
37530 + pax_open_kernel();
37531 tty_default_fops(&ptmx_fops);
37532 - ptmx_fops.open = ptmx_open;
37533 + *(void **)&ptmx_fops.open = ptmx_open;
37534 + pax_close_kernel();
37535
37536 cdev_init(&ptmx_cdev, &ptmx_fops);
37537 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37538 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37539 index 2b42a01..32a2ed3 100644
37540 --- a/drivers/tty/serial/kgdboc.c
37541 +++ b/drivers/tty/serial/kgdboc.c
37542 @@ -24,8 +24,9 @@
37543 #define MAX_CONFIG_LEN 40
37544
37545 static struct kgdb_io kgdboc_io_ops;
37546 +static struct kgdb_io kgdboc_io_ops_console;
37547
37548 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37549 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37550 static int configured = -1;
37551
37552 static char config[MAX_CONFIG_LEN];
37553 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37554 kgdboc_unregister_kbd();
37555 if (configured == 1)
37556 kgdb_unregister_io_module(&kgdboc_io_ops);
37557 + else if (configured == 2)
37558 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37559 }
37560
37561 static int configure_kgdboc(void)
37562 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37563 int err;
37564 char *cptr = config;
37565 struct console *cons;
37566 + int is_console = 0;
37567
37568 err = kgdboc_option_setup(config);
37569 if (err || !strlen(config) || isspace(config[0]))
37570 goto noconfig;
37571
37572 err = -ENODEV;
37573 - kgdboc_io_ops.is_console = 0;
37574 kgdb_tty_driver = NULL;
37575
37576 kgdboc_use_kms = 0;
37577 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37578 int idx;
37579 if (cons->device && cons->device(cons, &idx) == p &&
37580 idx == tty_line) {
37581 - kgdboc_io_ops.is_console = 1;
37582 + is_console = 1;
37583 break;
37584 }
37585 cons = cons->next;
37586 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37587 kgdb_tty_line = tty_line;
37588
37589 do_register:
37590 - err = kgdb_register_io_module(&kgdboc_io_ops);
37591 + if (is_console) {
37592 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37593 + configured = 2;
37594 + } else {
37595 + err = kgdb_register_io_module(&kgdboc_io_ops);
37596 + configured = 1;
37597 + }
37598 if (err)
37599 goto noconfig;
37600
37601 - configured = 1;
37602 -
37603 return 0;
37604
37605 noconfig:
37606 @@ -213,7 +220,7 @@ noconfig:
37607 static int __init init_kgdboc(void)
37608 {
37609 /* Already configured? */
37610 - if (configured == 1)
37611 + if (configured >= 1)
37612 return 0;
37613
37614 return configure_kgdboc();
37615 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37616 if (config[len - 1] == '\n')
37617 config[len - 1] = '\0';
37618
37619 - if (configured == 1)
37620 + if (configured >= 1)
37621 cleanup_kgdboc();
37622
37623 /* Go and configure with the new params. */
37624 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37625 .post_exception = kgdboc_post_exp_handler,
37626 };
37627
37628 +static struct kgdb_io kgdboc_io_ops_console = {
37629 + .name = "kgdboc",
37630 + .read_char = kgdboc_get_char,
37631 + .write_char = kgdboc_put_char,
37632 + .pre_exception = kgdboc_pre_exp_handler,
37633 + .post_exception = kgdboc_post_exp_handler,
37634 + .is_console = 1
37635 +};
37636 +
37637 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37638 /* This is only available if kgdboc is a built in for early debugging */
37639 static int __init kgdboc_early_init(char *opt)
37640 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
37641 index 05728894..b9d44c6 100644
37642 --- a/drivers/tty/sysrq.c
37643 +++ b/drivers/tty/sysrq.c
37644 @@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
37645 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
37646 size_t count, loff_t *ppos)
37647 {
37648 - if (count) {
37649 + if (count && capable(CAP_SYS_ADMIN)) {
37650 char c;
37651
37652 if (get_user(c, buf))
37653 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37654 index d939bd7..33d92cd 100644
37655 --- a/drivers/tty/tty_io.c
37656 +++ b/drivers/tty/tty_io.c
37657 @@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37658
37659 void tty_default_fops(struct file_operations *fops)
37660 {
37661 - *fops = tty_fops;
37662 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37663 }
37664
37665 /*
37666 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37667 index 24b95db..9c078d0 100644
37668 --- a/drivers/tty/tty_ldisc.c
37669 +++ b/drivers/tty/tty_ldisc.c
37670 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37671 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37672 struct tty_ldisc_ops *ldo = ld->ops;
37673
37674 - ldo->refcount--;
37675 + atomic_dec(&ldo->refcount);
37676 module_put(ldo->owner);
37677 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37678
37679 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37680 spin_lock_irqsave(&tty_ldisc_lock, flags);
37681 tty_ldiscs[disc] = new_ldisc;
37682 new_ldisc->num = disc;
37683 - new_ldisc->refcount = 0;
37684 + atomic_set(&new_ldisc->refcount, 0);
37685 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37686
37687 return ret;
37688 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
37689 return -EINVAL;
37690
37691 spin_lock_irqsave(&tty_ldisc_lock, flags);
37692 - if (tty_ldiscs[disc]->refcount)
37693 + if (atomic_read(&tty_ldiscs[disc]->refcount))
37694 ret = -EBUSY;
37695 else
37696 tty_ldiscs[disc] = NULL;
37697 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37698 if (ldops) {
37699 ret = ERR_PTR(-EAGAIN);
37700 if (try_module_get(ldops->owner)) {
37701 - ldops->refcount++;
37702 + atomic_inc(&ldops->refcount);
37703 ret = ldops;
37704 }
37705 }
37706 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37707 unsigned long flags;
37708
37709 spin_lock_irqsave(&tty_ldisc_lock, flags);
37710 - ldops->refcount--;
37711 + atomic_dec(&ldops->refcount);
37712 module_put(ldops->owner);
37713 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37714 }
37715 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37716 index 3b0c4e3..f98a992 100644
37717 --- a/drivers/tty/vt/keyboard.c
37718 +++ b/drivers/tty/vt/keyboard.c
37719 @@ -663,6 +663,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37720 kbd->kbdmode == VC_OFF) &&
37721 value != KVAL(K_SAK))
37722 return; /* SAK is allowed even in raw mode */
37723 +
37724 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37725 + {
37726 + void *func = fn_handler[value];
37727 + if (func == fn_show_state || func == fn_show_ptregs ||
37728 + func == fn_show_mem)
37729 + return;
37730 + }
37731 +#endif
37732 +
37733 fn_handler[value](vc);
37734 }
37735
37736 @@ -1812,9 +1822,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37737 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37738 return -EFAULT;
37739
37740 - if (!capable(CAP_SYS_TTY_CONFIG))
37741 - perm = 0;
37742 -
37743 switch (cmd) {
37744 case KDGKBENT:
37745 /* Ensure another thread doesn't free it under us */
37746 @@ -1829,6 +1836,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37747 spin_unlock_irqrestore(&kbd_event_lock, flags);
37748 return put_user(val, &user_kbe->kb_value);
37749 case KDSKBENT:
37750 + if (!capable(CAP_SYS_TTY_CONFIG))
37751 + perm = 0;
37752 +
37753 if (!perm)
37754 return -EPERM;
37755 if (!i && v == K_NOSUCHMAP) {
37756 @@ -1919,9 +1929,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37757 int i, j, k;
37758 int ret;
37759
37760 - if (!capable(CAP_SYS_TTY_CONFIG))
37761 - perm = 0;
37762 -
37763 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37764 if (!kbs) {
37765 ret = -ENOMEM;
37766 @@ -1955,6 +1962,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37767 kfree(kbs);
37768 return ((p && *p) ? -EOVERFLOW : 0);
37769 case KDSKBSENT:
37770 + if (!capable(CAP_SYS_TTY_CONFIG))
37771 + perm = 0;
37772 +
37773 if (!perm) {
37774 ret = -EPERM;
37775 goto reterr;
37776 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37777 index a783d53..cb30d94 100644
37778 --- a/drivers/uio/uio.c
37779 +++ b/drivers/uio/uio.c
37780 @@ -25,6 +25,7 @@
37781 #include <linux/kobject.h>
37782 #include <linux/cdev.h>
37783 #include <linux/uio_driver.h>
37784 +#include <asm/local.h>
37785
37786 #define UIO_MAX_DEVICES (1U << MINORBITS)
37787
37788 @@ -32,10 +33,10 @@ struct uio_device {
37789 struct module *owner;
37790 struct device *dev;
37791 int minor;
37792 - atomic_t event;
37793 + atomic_unchecked_t event;
37794 struct fasync_struct *async_queue;
37795 wait_queue_head_t wait;
37796 - int vma_count;
37797 + local_t vma_count;
37798 struct uio_info *info;
37799 struct kobject *map_dir;
37800 struct kobject *portio_dir;
37801 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37802 struct device_attribute *attr, char *buf)
37803 {
37804 struct uio_device *idev = dev_get_drvdata(dev);
37805 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37806 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37807 }
37808
37809 static struct device_attribute uio_class_attributes[] = {
37810 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37811 {
37812 struct uio_device *idev = info->uio_dev;
37813
37814 - atomic_inc(&idev->event);
37815 + atomic_inc_unchecked(&idev->event);
37816 wake_up_interruptible(&idev->wait);
37817 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37818 }
37819 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37820 }
37821
37822 listener->dev = idev;
37823 - listener->event_count = atomic_read(&idev->event);
37824 + listener->event_count = atomic_read_unchecked(&idev->event);
37825 filep->private_data = listener;
37826
37827 if (idev->info->open) {
37828 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37829 return -EIO;
37830
37831 poll_wait(filep, &idev->wait, wait);
37832 - if (listener->event_count != atomic_read(&idev->event))
37833 + if (listener->event_count != atomic_read_unchecked(&idev->event))
37834 return POLLIN | POLLRDNORM;
37835 return 0;
37836 }
37837 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37838 do {
37839 set_current_state(TASK_INTERRUPTIBLE);
37840
37841 - event_count = atomic_read(&idev->event);
37842 + event_count = atomic_read_unchecked(&idev->event);
37843 if (event_count != listener->event_count) {
37844 if (copy_to_user(buf, &event_count, count))
37845 retval = -EFAULT;
37846 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37847 static void uio_vma_open(struct vm_area_struct *vma)
37848 {
37849 struct uio_device *idev = vma->vm_private_data;
37850 - idev->vma_count++;
37851 + local_inc(&idev->vma_count);
37852 }
37853
37854 static void uio_vma_close(struct vm_area_struct *vma)
37855 {
37856 struct uio_device *idev = vma->vm_private_data;
37857 - idev->vma_count--;
37858 + local_dec(&idev->vma_count);
37859 }
37860
37861 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37862 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
37863 idev->owner = owner;
37864 idev->info = info;
37865 init_waitqueue_head(&idev->wait);
37866 - atomic_set(&idev->event, 0);
37867 + atomic_set_unchecked(&idev->event, 0);
37868
37869 ret = uio_get_minor(idev);
37870 if (ret)
37871 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
37872 index 98b89fe..aff824e 100644
37873 --- a/drivers/usb/atm/cxacru.c
37874 +++ b/drivers/usb/atm/cxacru.c
37875 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
37876 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37877 if (ret < 2)
37878 return -EINVAL;
37879 - if (index < 0 || index > 0x7f)
37880 + if (index > 0x7f)
37881 return -EINVAL;
37882 pos += tmp;
37883
37884 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
37885 index d3448ca..d2864ca 100644
37886 --- a/drivers/usb/atm/usbatm.c
37887 +++ b/drivers/usb/atm/usbatm.c
37888 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37889 if (printk_ratelimit())
37890 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37891 __func__, vpi, vci);
37892 - atomic_inc(&vcc->stats->rx_err);
37893 + atomic_inc_unchecked(&vcc->stats->rx_err);
37894 return;
37895 }
37896
37897 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37898 if (length > ATM_MAX_AAL5_PDU) {
37899 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37900 __func__, length, vcc);
37901 - atomic_inc(&vcc->stats->rx_err);
37902 + atomic_inc_unchecked(&vcc->stats->rx_err);
37903 goto out;
37904 }
37905
37906 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37907 if (sarb->len < pdu_length) {
37908 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37909 __func__, pdu_length, sarb->len, vcc);
37910 - atomic_inc(&vcc->stats->rx_err);
37911 + atomic_inc_unchecked(&vcc->stats->rx_err);
37912 goto out;
37913 }
37914
37915 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37916 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37917 __func__, vcc);
37918 - atomic_inc(&vcc->stats->rx_err);
37919 + atomic_inc_unchecked(&vcc->stats->rx_err);
37920 goto out;
37921 }
37922
37923 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37924 if (printk_ratelimit())
37925 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37926 __func__, length);
37927 - atomic_inc(&vcc->stats->rx_drop);
37928 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37929 goto out;
37930 }
37931
37932 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37933
37934 vcc->push(vcc, skb);
37935
37936 - atomic_inc(&vcc->stats->rx);
37937 + atomic_inc_unchecked(&vcc->stats->rx);
37938 out:
37939 skb_trim(sarb, 0);
37940 }
37941 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
37942 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37943
37944 usbatm_pop(vcc, skb);
37945 - atomic_inc(&vcc->stats->tx);
37946 + atomic_inc_unchecked(&vcc->stats->tx);
37947
37948 skb = skb_dequeue(&instance->sndqueue);
37949 }
37950 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
37951 if (!left--)
37952 return sprintf(page,
37953 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37954 - atomic_read(&atm_dev->stats.aal5.tx),
37955 - atomic_read(&atm_dev->stats.aal5.tx_err),
37956 - atomic_read(&atm_dev->stats.aal5.rx),
37957 - atomic_read(&atm_dev->stats.aal5.rx_err),
37958 - atomic_read(&atm_dev->stats.aal5.rx_drop));
37959 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37960 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37961 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37962 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37963 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37964
37965 if (!left--) {
37966 if (instance->disconnected)
37967 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
37968 index d956965..4179a77 100644
37969 --- a/drivers/usb/core/devices.c
37970 +++ b/drivers/usb/core/devices.c
37971 @@ -126,7 +126,7 @@ static const char format_endpt[] =
37972 * time it gets called.
37973 */
37974 static struct device_connect_event {
37975 - atomic_t count;
37976 + atomic_unchecked_t count;
37977 wait_queue_head_t wait;
37978 } device_event = {
37979 .count = ATOMIC_INIT(1),
37980 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
37981
37982 void usbfs_conn_disc_event(void)
37983 {
37984 - atomic_add(2, &device_event.count);
37985 + atomic_add_unchecked(2, &device_event.count);
37986 wake_up(&device_event.wait);
37987 }
37988
37989 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
37990
37991 poll_wait(file, &device_event.wait, wait);
37992
37993 - event_count = atomic_read(&device_event.count);
37994 + event_count = atomic_read_unchecked(&device_event.count);
37995 if (file->f_version != event_count) {
37996 file->f_version = event_count;
37997 return POLLIN | POLLRDNORM;
37998 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
37999 index 1fc8f12..20647c1 100644
38000 --- a/drivers/usb/early/ehci-dbgp.c
38001 +++ b/drivers/usb/early/ehci-dbgp.c
38002 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38003
38004 #ifdef CONFIG_KGDB
38005 static struct kgdb_io kgdbdbgp_io_ops;
38006 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38007 +static struct kgdb_io kgdbdbgp_io_ops_console;
38008 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38009 #else
38010 #define dbgp_kgdb_mode (0)
38011 #endif
38012 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38013 .write_char = kgdbdbgp_write_char,
38014 };
38015
38016 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38017 + .name = "kgdbdbgp",
38018 + .read_char = kgdbdbgp_read_char,
38019 + .write_char = kgdbdbgp_write_char,
38020 + .is_console = 1
38021 +};
38022 +
38023 static int kgdbdbgp_wait_time;
38024
38025 static int __init kgdbdbgp_parse_config(char *str)
38026 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38027 ptr++;
38028 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38029 }
38030 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38031 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38032 + if (early_dbgp_console.index != -1)
38033 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38034 + else
38035 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38036
38037 return 0;
38038 }
38039 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38040 index d6bea3e..60b250e 100644
38041 --- a/drivers/usb/wusbcore/wa-hc.h
38042 +++ b/drivers/usb/wusbcore/wa-hc.h
38043 @@ -192,7 +192,7 @@ struct wahc {
38044 struct list_head xfer_delayed_list;
38045 spinlock_t xfer_list_lock;
38046 struct work_struct xfer_work;
38047 - atomic_t xfer_id_count;
38048 + atomic_unchecked_t xfer_id_count;
38049 };
38050
38051
38052 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38053 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38054 spin_lock_init(&wa->xfer_list_lock);
38055 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38056 - atomic_set(&wa->xfer_id_count, 1);
38057 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38058 }
38059
38060 /**
38061 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38062 index 57c01ab..8a05959 100644
38063 --- a/drivers/usb/wusbcore/wa-xfer.c
38064 +++ b/drivers/usb/wusbcore/wa-xfer.c
38065 @@ -296,7 +296,7 @@ out:
38066 */
38067 static void wa_xfer_id_init(struct wa_xfer *xfer)
38068 {
38069 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38070 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38071 }
38072
38073 /*
38074 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38075 index 51e4c1e..9d87e2a 100644
38076 --- a/drivers/vhost/vhost.c
38077 +++ b/drivers/vhost/vhost.c
38078 @@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38079 return 0;
38080 }
38081
38082 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38083 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38084 {
38085 struct file *eventfp, *filep = NULL,
38086 *pollstart = NULL, *pollstop = NULL;
38087 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38088 index b0b2ac3..89a4399 100644
38089 --- a/drivers/video/aty/aty128fb.c
38090 +++ b/drivers/video/aty/aty128fb.c
38091 @@ -148,7 +148,7 @@ enum {
38092 };
38093
38094 /* Must match above enum */
38095 -static const char *r128_family[] __devinitdata = {
38096 +static const char *r128_family[] __devinitconst = {
38097 "AGP",
38098 "PCI",
38099 "PRO AGP",
38100 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38101 index 5c3960d..15cf8fc 100644
38102 --- a/drivers/video/fbcmap.c
38103 +++ b/drivers/video/fbcmap.c
38104 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38105 rc = -ENODEV;
38106 goto out;
38107 }
38108 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38109 - !info->fbops->fb_setcmap)) {
38110 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38111 rc = -EINVAL;
38112 goto out1;
38113 }
38114 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38115 index c6ce416..3b9b642 100644
38116 --- a/drivers/video/fbmem.c
38117 +++ b/drivers/video/fbmem.c
38118 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38119 image->dx += image->width + 8;
38120 }
38121 } else if (rotate == FB_ROTATE_UD) {
38122 - for (x = 0; x < num && image->dx >= 0; x++) {
38123 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38124 info->fbops->fb_imageblit(info, image);
38125 image->dx -= image->width + 8;
38126 }
38127 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38128 image->dy += image->height + 8;
38129 }
38130 } else if (rotate == FB_ROTATE_CCW) {
38131 - for (x = 0; x < num && image->dy >= 0; x++) {
38132 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38133 info->fbops->fb_imageblit(info, image);
38134 image->dy -= image->height + 8;
38135 }
38136 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38137 return -EFAULT;
38138 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38139 return -EINVAL;
38140 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38141 + if (con2fb.framebuffer >= FB_MAX)
38142 return -EINVAL;
38143 if (!registered_fb[con2fb.framebuffer])
38144 request_module("fb%d", con2fb.framebuffer);
38145 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38146 index 5a5d092..265c5ed 100644
38147 --- a/drivers/video/geode/gx1fb_core.c
38148 +++ b/drivers/video/geode/gx1fb_core.c
38149 @@ -29,7 +29,7 @@ static int crt_option = 1;
38150 static char panel_option[32] = "";
38151
38152 /* Modes relevant to the GX1 (taken from modedb.c) */
38153 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38154 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38155 /* 640x480-60 VESA */
38156 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38157 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38158 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38159 index 0fad23f..0e9afa4 100644
38160 --- a/drivers/video/gxt4500.c
38161 +++ b/drivers/video/gxt4500.c
38162 @@ -156,7 +156,7 @@ struct gxt4500_par {
38163 static char *mode_option;
38164
38165 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38166 -static const struct fb_videomode defaultmode __devinitdata = {
38167 +static const struct fb_videomode defaultmode __devinitconst = {
38168 .refresh = 60,
38169 .xres = 1280,
38170 .yres = 1024,
38171 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38172 return 0;
38173 }
38174
38175 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38176 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38177 .id = "IBM GXT4500P",
38178 .type = FB_TYPE_PACKED_PIXELS,
38179 .visual = FB_VISUAL_PSEUDOCOLOR,
38180 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38181 index 7672d2e..b56437f 100644
38182 --- a/drivers/video/i810/i810_accel.c
38183 +++ b/drivers/video/i810/i810_accel.c
38184 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38185 }
38186 }
38187 printk("ringbuffer lockup!!!\n");
38188 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38189 i810_report_error(mmio);
38190 par->dev_flags |= LOCKUP;
38191 info->pixmap.scan_align = 1;
38192 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38193 index b83f361..2b05a91 100644
38194 --- a/drivers/video/i810/i810_main.c
38195 +++ b/drivers/video/i810/i810_main.c
38196 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38197 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38198
38199 /* PCI */
38200 -static const char *i810_pci_list[] __devinitdata = {
38201 +static const char *i810_pci_list[] __devinitconst = {
38202 "Intel(R) 810 Framebuffer Device" ,
38203 "Intel(R) 810-DC100 Framebuffer Device" ,
38204 "Intel(R) 810E Framebuffer Device" ,
38205 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38206 index de36693..3c63fc2 100644
38207 --- a/drivers/video/jz4740_fb.c
38208 +++ b/drivers/video/jz4740_fb.c
38209 @@ -136,7 +136,7 @@ struct jzfb {
38210 uint32_t pseudo_palette[16];
38211 };
38212
38213 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38214 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38215 .id = "JZ4740 FB",
38216 .type = FB_TYPE_PACKED_PIXELS,
38217 .visual = FB_VISUAL_TRUECOLOR,
38218 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38219 index 3c14e43..eafa544 100644
38220 --- a/drivers/video/logo/logo_linux_clut224.ppm
38221 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38222 @@ -1,1604 +1,1123 @@
38223 P3
38224 -# Standard 224-color Linux logo
38225 80 80
38226 255
38227 - 0 0 0 0 0 0 0 0 0 0 0 0
38228 - 0 0 0 0 0 0 0 0 0 0 0 0
38229 - 0 0 0 0 0 0 0 0 0 0 0 0
38230 - 0 0 0 0 0 0 0 0 0 0 0 0
38231 - 0 0 0 0 0 0 0 0 0 0 0 0
38232 - 0 0 0 0 0 0 0 0 0 0 0 0
38233 - 0 0 0 0 0 0 0 0 0 0 0 0
38234 - 0 0 0 0 0 0 0 0 0 0 0 0
38235 - 0 0 0 0 0 0 0 0 0 0 0 0
38236 - 6 6 6 6 6 6 10 10 10 10 10 10
38237 - 10 10 10 6 6 6 6 6 6 6 6 6
38238 - 0 0 0 0 0 0 0 0 0 0 0 0
38239 - 0 0 0 0 0 0 0 0 0 0 0 0
38240 - 0 0 0 0 0 0 0 0 0 0 0 0
38241 - 0 0 0 0 0 0 0 0 0 0 0 0
38242 - 0 0 0 0 0 0 0 0 0 0 0 0
38243 - 0 0 0 0 0 0 0 0 0 0 0 0
38244 - 0 0 0 0 0 0 0 0 0 0 0 0
38245 - 0 0 0 0 0 0 0 0 0 0 0 0
38246 - 0 0 0 0 0 0 0 0 0 0 0 0
38247 - 0 0 0 0 0 0 0 0 0 0 0 0
38248 - 0 0 0 0 0 0 0 0 0 0 0 0
38249 - 0 0 0 0 0 0 0 0 0 0 0 0
38250 - 0 0 0 0 0 0 0 0 0 0 0 0
38251 - 0 0 0 0 0 0 0 0 0 0 0 0
38252 - 0 0 0 0 0 0 0 0 0 0 0 0
38253 - 0 0 0 0 0 0 0 0 0 0 0 0
38254 - 0 0 0 0 0 0 0 0 0 0 0 0
38255 - 0 0 0 6 6 6 10 10 10 14 14 14
38256 - 22 22 22 26 26 26 30 30 30 34 34 34
38257 - 30 30 30 30 30 30 26 26 26 18 18 18
38258 - 14 14 14 10 10 10 6 6 6 0 0 0
38259 - 0 0 0 0 0 0 0 0 0 0 0 0
38260 - 0 0 0 0 0 0 0 0 0 0 0 0
38261 - 0 0 0 0 0 0 0 0 0 0 0 0
38262 - 0 0 0 0 0 0 0 0 0 0 0 0
38263 - 0 0 0 0 0 0 0 0 0 0 0 0
38264 - 0 0 0 0 0 0 0 0 0 0 0 0
38265 - 0 0 0 0 0 0 0 0 0 0 0 0
38266 - 0 0 0 0 0 0 0 0 0 0 0 0
38267 - 0 0 0 0 0 0 0 0 0 0 0 0
38268 - 0 0 0 0 0 1 0 0 1 0 0 0
38269 - 0 0 0 0 0 0 0 0 0 0 0 0
38270 - 0 0 0 0 0 0 0 0 0 0 0 0
38271 - 0 0 0 0 0 0 0 0 0 0 0 0
38272 - 0 0 0 0 0 0 0 0 0 0 0 0
38273 - 0 0 0 0 0 0 0 0 0 0 0 0
38274 - 0 0 0 0 0 0 0 0 0 0 0 0
38275 - 6 6 6 14 14 14 26 26 26 42 42 42
38276 - 54 54 54 66 66 66 78 78 78 78 78 78
38277 - 78 78 78 74 74 74 66 66 66 54 54 54
38278 - 42 42 42 26 26 26 18 18 18 10 10 10
38279 - 6 6 6 0 0 0 0 0 0 0 0 0
38280 - 0 0 0 0 0 0 0 0 0 0 0 0
38281 - 0 0 0 0 0 0 0 0 0 0 0 0
38282 - 0 0 0 0 0 0 0 0 0 0 0 0
38283 - 0 0 0 0 0 0 0 0 0 0 0 0
38284 - 0 0 0 0 0 0 0 0 0 0 0 0
38285 - 0 0 0 0 0 0 0 0 0 0 0 0
38286 - 0 0 0 0 0 0 0 0 0 0 0 0
38287 - 0 0 0 0 0 0 0 0 0 0 0 0
38288 - 0 0 1 0 0 0 0 0 0 0 0 0
38289 - 0 0 0 0 0 0 0 0 0 0 0 0
38290 - 0 0 0 0 0 0 0 0 0 0 0 0
38291 - 0 0 0 0 0 0 0 0 0 0 0 0
38292 - 0 0 0 0 0 0 0 0 0 0 0 0
38293 - 0 0 0 0 0 0 0 0 0 0 0 0
38294 - 0 0 0 0 0 0 0 0 0 10 10 10
38295 - 22 22 22 42 42 42 66 66 66 86 86 86
38296 - 66 66 66 38 38 38 38 38 38 22 22 22
38297 - 26 26 26 34 34 34 54 54 54 66 66 66
38298 - 86 86 86 70 70 70 46 46 46 26 26 26
38299 - 14 14 14 6 6 6 0 0 0 0 0 0
38300 - 0 0 0 0 0 0 0 0 0 0 0 0
38301 - 0 0 0 0 0 0 0 0 0 0 0 0
38302 - 0 0 0 0 0 0 0 0 0 0 0 0
38303 - 0 0 0 0 0 0 0 0 0 0 0 0
38304 - 0 0 0 0 0 0 0 0 0 0 0 0
38305 - 0 0 0 0 0 0 0 0 0 0 0 0
38306 - 0 0 0 0 0 0 0 0 0 0 0 0
38307 - 0 0 0 0 0 0 0 0 0 0 0 0
38308 - 0 0 1 0 0 1 0 0 1 0 0 0
38309 - 0 0 0 0 0 0 0 0 0 0 0 0
38310 - 0 0 0 0 0 0 0 0 0 0 0 0
38311 - 0 0 0 0 0 0 0 0 0 0 0 0
38312 - 0 0 0 0 0 0 0 0 0 0 0 0
38313 - 0 0 0 0 0 0 0 0 0 0 0 0
38314 - 0 0 0 0 0 0 10 10 10 26 26 26
38315 - 50 50 50 82 82 82 58 58 58 6 6 6
38316 - 2 2 6 2 2 6 2 2 6 2 2 6
38317 - 2 2 6 2 2 6 2 2 6 2 2 6
38318 - 6 6 6 54 54 54 86 86 86 66 66 66
38319 - 38 38 38 18 18 18 6 6 6 0 0 0
38320 - 0 0 0 0 0 0 0 0 0 0 0 0
38321 - 0 0 0 0 0 0 0 0 0 0 0 0
38322 - 0 0 0 0 0 0 0 0 0 0 0 0
38323 - 0 0 0 0 0 0 0 0 0 0 0 0
38324 - 0 0 0 0 0 0 0 0 0 0 0 0
38325 - 0 0 0 0 0 0 0 0 0 0 0 0
38326 - 0 0 0 0 0 0 0 0 0 0 0 0
38327 - 0 0 0 0 0 0 0 0 0 0 0 0
38328 - 0 0 0 0 0 0 0 0 0 0 0 0
38329 - 0 0 0 0 0 0 0 0 0 0 0 0
38330 - 0 0 0 0 0 0 0 0 0 0 0 0
38331 - 0 0 0 0 0 0 0 0 0 0 0 0
38332 - 0 0 0 0 0 0 0 0 0 0 0 0
38333 - 0 0 0 0 0 0 0 0 0 0 0 0
38334 - 0 0 0 6 6 6 22 22 22 50 50 50
38335 - 78 78 78 34 34 34 2 2 6 2 2 6
38336 - 2 2 6 2 2 6 2 2 6 2 2 6
38337 - 2 2 6 2 2 6 2 2 6 2 2 6
38338 - 2 2 6 2 2 6 6 6 6 70 70 70
38339 - 78 78 78 46 46 46 22 22 22 6 6 6
38340 - 0 0 0 0 0 0 0 0 0 0 0 0
38341 - 0 0 0 0 0 0 0 0 0 0 0 0
38342 - 0 0 0 0 0 0 0 0 0 0 0 0
38343 - 0 0 0 0 0 0 0 0 0 0 0 0
38344 - 0 0 0 0 0 0 0 0 0 0 0 0
38345 - 0 0 0 0 0 0 0 0 0 0 0 0
38346 - 0 0 0 0 0 0 0 0 0 0 0 0
38347 - 0 0 0 0 0 0 0 0 0 0 0 0
38348 - 0 0 1 0 0 1 0 0 1 0 0 0
38349 - 0 0 0 0 0 0 0 0 0 0 0 0
38350 - 0 0 0 0 0 0 0 0 0 0 0 0
38351 - 0 0 0 0 0 0 0 0 0 0 0 0
38352 - 0 0 0 0 0 0 0 0 0 0 0 0
38353 - 0 0 0 0 0 0 0 0 0 0 0 0
38354 - 6 6 6 18 18 18 42 42 42 82 82 82
38355 - 26 26 26 2 2 6 2 2 6 2 2 6
38356 - 2 2 6 2 2 6 2 2 6 2 2 6
38357 - 2 2 6 2 2 6 2 2 6 14 14 14
38358 - 46 46 46 34 34 34 6 6 6 2 2 6
38359 - 42 42 42 78 78 78 42 42 42 18 18 18
38360 - 6 6 6 0 0 0 0 0 0 0 0 0
38361 - 0 0 0 0 0 0 0 0 0 0 0 0
38362 - 0 0 0 0 0 0 0 0 0 0 0 0
38363 - 0 0 0 0 0 0 0 0 0 0 0 0
38364 - 0 0 0 0 0 0 0 0 0 0 0 0
38365 - 0 0 0 0 0 0 0 0 0 0 0 0
38366 - 0 0 0 0 0 0 0 0 0 0 0 0
38367 - 0 0 0 0 0 0 0 0 0 0 0 0
38368 - 0 0 1 0 0 0 0 0 1 0 0 0
38369 - 0 0 0 0 0 0 0 0 0 0 0 0
38370 - 0 0 0 0 0 0 0 0 0 0 0 0
38371 - 0 0 0 0 0 0 0 0 0 0 0 0
38372 - 0 0 0 0 0 0 0 0 0 0 0 0
38373 - 0 0 0 0 0 0 0 0 0 0 0 0
38374 - 10 10 10 30 30 30 66 66 66 58 58 58
38375 - 2 2 6 2 2 6 2 2 6 2 2 6
38376 - 2 2 6 2 2 6 2 2 6 2 2 6
38377 - 2 2 6 2 2 6 2 2 6 26 26 26
38378 - 86 86 86 101 101 101 46 46 46 10 10 10
38379 - 2 2 6 58 58 58 70 70 70 34 34 34
38380 - 10 10 10 0 0 0 0 0 0 0 0 0
38381 - 0 0 0 0 0 0 0 0 0 0 0 0
38382 - 0 0 0 0 0 0 0 0 0 0 0 0
38383 - 0 0 0 0 0 0 0 0 0 0 0 0
38384 - 0 0 0 0 0 0 0 0 0 0 0 0
38385 - 0 0 0 0 0 0 0 0 0 0 0 0
38386 - 0 0 0 0 0 0 0 0 0 0 0 0
38387 - 0 0 0 0 0 0 0 0 0 0 0 0
38388 - 0 0 1 0 0 1 0 0 1 0 0 0
38389 - 0 0 0 0 0 0 0 0 0 0 0 0
38390 - 0 0 0 0 0 0 0 0 0 0 0 0
38391 - 0 0 0 0 0 0 0 0 0 0 0 0
38392 - 0 0 0 0 0 0 0 0 0 0 0 0
38393 - 0 0 0 0 0 0 0 0 0 0 0 0
38394 - 14 14 14 42 42 42 86 86 86 10 10 10
38395 - 2 2 6 2 2 6 2 2 6 2 2 6
38396 - 2 2 6 2 2 6 2 2 6 2 2 6
38397 - 2 2 6 2 2 6 2 2 6 30 30 30
38398 - 94 94 94 94 94 94 58 58 58 26 26 26
38399 - 2 2 6 6 6 6 78 78 78 54 54 54
38400 - 22 22 22 6 6 6 0 0 0 0 0 0
38401 - 0 0 0 0 0 0 0 0 0 0 0 0
38402 - 0 0 0 0 0 0 0 0 0 0 0 0
38403 - 0 0 0 0 0 0 0 0 0 0 0 0
38404 - 0 0 0 0 0 0 0 0 0 0 0 0
38405 - 0 0 0 0 0 0 0 0 0 0 0 0
38406 - 0 0 0 0 0 0 0 0 0 0 0 0
38407 - 0 0 0 0 0 0 0 0 0 0 0 0
38408 - 0 0 0 0 0 0 0 0 0 0 0 0
38409 - 0 0 0 0 0 0 0 0 0 0 0 0
38410 - 0 0 0 0 0 0 0 0 0 0 0 0
38411 - 0 0 0 0 0 0 0 0 0 0 0 0
38412 - 0 0 0 0 0 0 0 0 0 0 0 0
38413 - 0 0 0 0 0 0 0 0 0 6 6 6
38414 - 22 22 22 62 62 62 62 62 62 2 2 6
38415 - 2 2 6 2 2 6 2 2 6 2 2 6
38416 - 2 2 6 2 2 6 2 2 6 2 2 6
38417 - 2 2 6 2 2 6 2 2 6 26 26 26
38418 - 54 54 54 38 38 38 18 18 18 10 10 10
38419 - 2 2 6 2 2 6 34 34 34 82 82 82
38420 - 38 38 38 14 14 14 0 0 0 0 0 0
38421 - 0 0 0 0 0 0 0 0 0 0 0 0
38422 - 0 0 0 0 0 0 0 0 0 0 0 0
38423 - 0 0 0 0 0 0 0 0 0 0 0 0
38424 - 0 0 0 0 0 0 0 0 0 0 0 0
38425 - 0 0 0 0 0 0 0 0 0 0 0 0
38426 - 0 0 0 0 0 0 0 0 0 0 0 0
38427 - 0 0 0 0 0 0 0 0 0 0 0 0
38428 - 0 0 0 0 0 1 0 0 1 0 0 0
38429 - 0 0 0 0 0 0 0 0 0 0 0 0
38430 - 0 0 0 0 0 0 0 0 0 0 0 0
38431 - 0 0 0 0 0 0 0 0 0 0 0 0
38432 - 0 0 0 0 0 0 0 0 0 0 0 0
38433 - 0 0 0 0 0 0 0 0 0 6 6 6
38434 - 30 30 30 78 78 78 30 30 30 2 2 6
38435 - 2 2 6 2 2 6 2 2 6 2 2 6
38436 - 2 2 6 2 2 6 2 2 6 2 2 6
38437 - 2 2 6 2 2 6 2 2 6 10 10 10
38438 - 10 10 10 2 2 6 2 2 6 2 2 6
38439 - 2 2 6 2 2 6 2 2 6 78 78 78
38440 - 50 50 50 18 18 18 6 6 6 0 0 0
38441 - 0 0 0 0 0 0 0 0 0 0 0 0
38442 - 0 0 0 0 0 0 0 0 0 0 0 0
38443 - 0 0 0 0 0 0 0 0 0 0 0 0
38444 - 0 0 0 0 0 0 0 0 0 0 0 0
38445 - 0 0 0 0 0 0 0 0 0 0 0 0
38446 - 0 0 0 0 0 0 0 0 0 0 0 0
38447 - 0 0 0 0 0 0 0 0 0 0 0 0
38448 - 0 0 1 0 0 0 0 0 0 0 0 0
38449 - 0 0 0 0 0 0 0 0 0 0 0 0
38450 - 0 0 0 0 0 0 0 0 0 0 0 0
38451 - 0 0 0 0 0 0 0 0 0 0 0 0
38452 - 0 0 0 0 0 0 0 0 0 0 0 0
38453 - 0 0 0 0 0 0 0 0 0 10 10 10
38454 - 38 38 38 86 86 86 14 14 14 2 2 6
38455 - 2 2 6 2 2 6 2 2 6 2 2 6
38456 - 2 2 6 2 2 6 2 2 6 2 2 6
38457 - 2 2 6 2 2 6 2 2 6 2 2 6
38458 - 2 2 6 2 2 6 2 2 6 2 2 6
38459 - 2 2 6 2 2 6 2 2 6 54 54 54
38460 - 66 66 66 26 26 26 6 6 6 0 0 0
38461 - 0 0 0 0 0 0 0 0 0 0 0 0
38462 - 0 0 0 0 0 0 0 0 0 0 0 0
38463 - 0 0 0 0 0 0 0 0 0 0 0 0
38464 - 0 0 0 0 0 0 0 0 0 0 0 0
38465 - 0 0 0 0 0 0 0 0 0 0 0 0
38466 - 0 0 0 0 0 0 0 0 0 0 0 0
38467 - 0 0 0 0 0 0 0 0 0 0 0 0
38468 - 0 0 0 0 0 1 0 0 1 0 0 0
38469 - 0 0 0 0 0 0 0 0 0 0 0 0
38470 - 0 0 0 0 0 0 0 0 0 0 0 0
38471 - 0 0 0 0 0 0 0 0 0 0 0 0
38472 - 0 0 0 0 0 0 0 0 0 0 0 0
38473 - 0 0 0 0 0 0 0 0 0 14 14 14
38474 - 42 42 42 82 82 82 2 2 6 2 2 6
38475 - 2 2 6 6 6 6 10 10 10 2 2 6
38476 - 2 2 6 2 2 6 2 2 6 2 2 6
38477 - 2 2 6 2 2 6 2 2 6 6 6 6
38478 - 14 14 14 10 10 10 2 2 6 2 2 6
38479 - 2 2 6 2 2 6 2 2 6 18 18 18
38480 - 82 82 82 34 34 34 10 10 10 0 0 0
38481 - 0 0 0 0 0 0 0 0 0 0 0 0
38482 - 0 0 0 0 0 0 0 0 0 0 0 0
38483 - 0 0 0 0 0 0 0 0 0 0 0 0
38484 - 0 0 0 0 0 0 0 0 0 0 0 0
38485 - 0 0 0 0 0 0 0 0 0 0 0 0
38486 - 0 0 0 0 0 0 0 0 0 0 0 0
38487 - 0 0 0 0 0 0 0 0 0 0 0 0
38488 - 0 0 1 0 0 0 0 0 0 0 0 0
38489 - 0 0 0 0 0 0 0 0 0 0 0 0
38490 - 0 0 0 0 0 0 0 0 0 0 0 0
38491 - 0 0 0 0 0 0 0 0 0 0 0 0
38492 - 0 0 0 0 0 0 0 0 0 0 0 0
38493 - 0 0 0 0 0 0 0 0 0 14 14 14
38494 - 46 46 46 86 86 86 2 2 6 2 2 6
38495 - 6 6 6 6 6 6 22 22 22 34 34 34
38496 - 6 6 6 2 2 6 2 2 6 2 2 6
38497 - 2 2 6 2 2 6 18 18 18 34 34 34
38498 - 10 10 10 50 50 50 22 22 22 2 2 6
38499 - 2 2 6 2 2 6 2 2 6 10 10 10
38500 - 86 86 86 42 42 42 14 14 14 0 0 0
38501 - 0 0 0 0 0 0 0 0 0 0 0 0
38502 - 0 0 0 0 0 0 0 0 0 0 0 0
38503 - 0 0 0 0 0 0 0 0 0 0 0 0
38504 - 0 0 0 0 0 0 0 0 0 0 0 0
38505 - 0 0 0 0 0 0 0 0 0 0 0 0
38506 - 0 0 0 0 0 0 0 0 0 0 0 0
38507 - 0 0 0 0 0 0 0 0 0 0 0 0
38508 - 0 0 1 0 0 1 0 0 1 0 0 0
38509 - 0 0 0 0 0 0 0 0 0 0 0 0
38510 - 0 0 0 0 0 0 0 0 0 0 0 0
38511 - 0 0 0 0 0 0 0 0 0 0 0 0
38512 - 0 0 0 0 0 0 0 0 0 0 0 0
38513 - 0 0 0 0 0 0 0 0 0 14 14 14
38514 - 46 46 46 86 86 86 2 2 6 2 2 6
38515 - 38 38 38 116 116 116 94 94 94 22 22 22
38516 - 22 22 22 2 2 6 2 2 6 2 2 6
38517 - 14 14 14 86 86 86 138 138 138 162 162 162
38518 -154 154 154 38 38 38 26 26 26 6 6 6
38519 - 2 2 6 2 2 6 2 2 6 2 2 6
38520 - 86 86 86 46 46 46 14 14 14 0 0 0
38521 - 0 0 0 0 0 0 0 0 0 0 0 0
38522 - 0 0 0 0 0 0 0 0 0 0 0 0
38523 - 0 0 0 0 0 0 0 0 0 0 0 0
38524 - 0 0 0 0 0 0 0 0 0 0 0 0
38525 - 0 0 0 0 0 0 0 0 0 0 0 0
38526 - 0 0 0 0 0 0 0 0 0 0 0 0
38527 - 0 0 0 0 0 0 0 0 0 0 0 0
38528 - 0 0 0 0 0 0 0 0 0 0 0 0
38529 - 0 0 0 0 0 0 0 0 0 0 0 0
38530 - 0 0 0 0 0 0 0 0 0 0 0 0
38531 - 0 0 0 0 0 0 0 0 0 0 0 0
38532 - 0 0 0 0 0 0 0 0 0 0 0 0
38533 - 0 0 0 0 0 0 0 0 0 14 14 14
38534 - 46 46 46 86 86 86 2 2 6 14 14 14
38535 -134 134 134 198 198 198 195 195 195 116 116 116
38536 - 10 10 10 2 2 6 2 2 6 6 6 6
38537 -101 98 89 187 187 187 210 210 210 218 218 218
38538 -214 214 214 134 134 134 14 14 14 6 6 6
38539 - 2 2 6 2 2 6 2 2 6 2 2 6
38540 - 86 86 86 50 50 50 18 18 18 6 6 6
38541 - 0 0 0 0 0 0 0 0 0 0 0 0
38542 - 0 0 0 0 0 0 0 0 0 0 0 0
38543 - 0 0 0 0 0 0 0 0 0 0 0 0
38544 - 0 0 0 0 0 0 0 0 0 0 0 0
38545 - 0 0 0 0 0 0 0 0 0 0 0 0
38546 - 0 0 0 0 0 0 0 0 0 0 0 0
38547 - 0 0 0 0 0 0 0 0 1 0 0 0
38548 - 0 0 1 0 0 1 0 0 1 0 0 0
38549 - 0 0 0 0 0 0 0 0 0 0 0 0
38550 - 0 0 0 0 0 0 0 0 0 0 0 0
38551 - 0 0 0 0 0 0 0 0 0 0 0 0
38552 - 0 0 0 0 0 0 0 0 0 0 0 0
38553 - 0 0 0 0 0 0 0 0 0 14 14 14
38554 - 46 46 46 86 86 86 2 2 6 54 54 54
38555 -218 218 218 195 195 195 226 226 226 246 246 246
38556 - 58 58 58 2 2 6 2 2 6 30 30 30
38557 -210 210 210 253 253 253 174 174 174 123 123 123
38558 -221 221 221 234 234 234 74 74 74 2 2 6
38559 - 2 2 6 2 2 6 2 2 6 2 2 6
38560 - 70 70 70 58 58 58 22 22 22 6 6 6
38561 - 0 0 0 0 0 0 0 0 0 0 0 0
38562 - 0 0 0 0 0 0 0 0 0 0 0 0
38563 - 0 0 0 0 0 0 0 0 0 0 0 0
38564 - 0 0 0 0 0 0 0 0 0 0 0 0
38565 - 0 0 0 0 0 0 0 0 0 0 0 0
38566 - 0 0 0 0 0 0 0 0 0 0 0 0
38567 - 0 0 0 0 0 0 0 0 0 0 0 0
38568 - 0 0 0 0 0 0 0 0 0 0 0 0
38569 - 0 0 0 0 0 0 0 0 0 0 0 0
38570 - 0 0 0 0 0 0 0 0 0 0 0 0
38571 - 0 0 0 0 0 0 0 0 0 0 0 0
38572 - 0 0 0 0 0 0 0 0 0 0 0 0
38573 - 0 0 0 0 0 0 0 0 0 14 14 14
38574 - 46 46 46 82 82 82 2 2 6 106 106 106
38575 -170 170 170 26 26 26 86 86 86 226 226 226
38576 -123 123 123 10 10 10 14 14 14 46 46 46
38577 -231 231 231 190 190 190 6 6 6 70 70 70
38578 - 90 90 90 238 238 238 158 158 158 2 2 6
38579 - 2 2 6 2 2 6 2 2 6 2 2 6
38580 - 70 70 70 58 58 58 22 22 22 6 6 6
38581 - 0 0 0 0 0 0 0 0 0 0 0 0
38582 - 0 0 0 0 0 0 0 0 0 0 0 0
38583 - 0 0 0 0 0 0 0 0 0 0 0 0
38584 - 0 0 0 0 0 0 0 0 0 0 0 0
38585 - 0 0 0 0 0 0 0 0 0 0 0 0
38586 - 0 0 0 0 0 0 0 0 0 0 0 0
38587 - 0 0 0 0 0 0 0 0 1 0 0 0
38588 - 0 0 1 0 0 1 0 0 1 0 0 0
38589 - 0 0 0 0 0 0 0 0 0 0 0 0
38590 - 0 0 0 0 0 0 0 0 0 0 0 0
38591 - 0 0 0 0 0 0 0 0 0 0 0 0
38592 - 0 0 0 0 0 0 0 0 0 0 0 0
38593 - 0 0 0 0 0 0 0 0 0 14 14 14
38594 - 42 42 42 86 86 86 6 6 6 116 116 116
38595 -106 106 106 6 6 6 70 70 70 149 149 149
38596 -128 128 128 18 18 18 38 38 38 54 54 54
38597 -221 221 221 106 106 106 2 2 6 14 14 14
38598 - 46 46 46 190 190 190 198 198 198 2 2 6
38599 - 2 2 6 2 2 6 2 2 6 2 2 6
38600 - 74 74 74 62 62 62 22 22 22 6 6 6
38601 - 0 0 0 0 0 0 0 0 0 0 0 0
38602 - 0 0 0 0 0 0 0 0 0 0 0 0
38603 - 0 0 0 0 0 0 0 0 0 0 0 0
38604 - 0 0 0 0 0 0 0 0 0 0 0 0
38605 - 0 0 0 0 0 0 0 0 0 0 0 0
38606 - 0 0 0 0 0 0 0 0 0 0 0 0
38607 - 0 0 0 0 0 0 0 0 1 0 0 0
38608 - 0 0 1 0 0 0 0 0 1 0 0 0
38609 - 0 0 0 0 0 0 0 0 0 0 0 0
38610 - 0 0 0 0 0 0 0 0 0 0 0 0
38611 - 0 0 0 0 0 0 0 0 0 0 0 0
38612 - 0 0 0 0 0 0 0 0 0 0 0 0
38613 - 0 0 0 0 0 0 0 0 0 14 14 14
38614 - 42 42 42 94 94 94 14 14 14 101 101 101
38615 -128 128 128 2 2 6 18 18 18 116 116 116
38616 -118 98 46 121 92 8 121 92 8 98 78 10
38617 -162 162 162 106 106 106 2 2 6 2 2 6
38618 - 2 2 6 195 195 195 195 195 195 6 6 6
38619 - 2 2 6 2 2 6 2 2 6 2 2 6
38620 - 74 74 74 62 62 62 22 22 22 6 6 6
38621 - 0 0 0 0 0 0 0 0 0 0 0 0
38622 - 0 0 0 0 0 0 0 0 0 0 0 0
38623 - 0 0 0 0 0 0 0 0 0 0 0 0
38624 - 0 0 0 0 0 0 0 0 0 0 0 0
38625 - 0 0 0 0 0 0 0 0 0 0 0 0
38626 - 0 0 0 0 0 0 0 0 0 0 0 0
38627 - 0 0 0 0 0 0 0 0 1 0 0 1
38628 - 0 0 1 0 0 0 0 0 1 0 0 0
38629 - 0 0 0 0 0 0 0 0 0 0 0 0
38630 - 0 0 0 0 0 0 0 0 0 0 0 0
38631 - 0 0 0 0 0 0 0 0 0 0 0 0
38632 - 0 0 0 0 0 0 0 0 0 0 0 0
38633 - 0 0 0 0 0 0 0 0 0 10 10 10
38634 - 38 38 38 90 90 90 14 14 14 58 58 58
38635 -210 210 210 26 26 26 54 38 6 154 114 10
38636 -226 170 11 236 186 11 225 175 15 184 144 12
38637 -215 174 15 175 146 61 37 26 9 2 2 6
38638 - 70 70 70 246 246 246 138 138 138 2 2 6
38639 - 2 2 6 2 2 6 2 2 6 2 2 6
38640 - 70 70 70 66 66 66 26 26 26 6 6 6
38641 - 0 0 0 0 0 0 0 0 0 0 0 0
38642 - 0 0 0 0 0 0 0 0 0 0 0 0
38643 - 0 0 0 0 0 0 0 0 0 0 0 0
38644 - 0 0 0 0 0 0 0 0 0 0 0 0
38645 - 0 0 0 0 0 0 0 0 0 0 0 0
38646 - 0 0 0 0 0 0 0 0 0 0 0 0
38647 - 0 0 0 0 0 0 0 0 0 0 0 0
38648 - 0 0 0 0 0 0 0 0 0 0 0 0
38649 - 0 0 0 0 0 0 0 0 0 0 0 0
38650 - 0 0 0 0 0 0 0 0 0 0 0 0
38651 - 0 0 0 0 0 0 0 0 0 0 0 0
38652 - 0 0 0 0 0 0 0 0 0 0 0 0
38653 - 0 0 0 0 0 0 0 0 0 10 10 10
38654 - 38 38 38 86 86 86 14 14 14 10 10 10
38655 -195 195 195 188 164 115 192 133 9 225 175 15
38656 -239 182 13 234 190 10 232 195 16 232 200 30
38657 -245 207 45 241 208 19 232 195 16 184 144 12
38658 -218 194 134 211 206 186 42 42 42 2 2 6
38659 - 2 2 6 2 2 6 2 2 6 2 2 6
38660 - 50 50 50 74 74 74 30 30 30 6 6 6
38661 - 0 0 0 0 0 0 0 0 0 0 0 0
38662 - 0 0 0 0 0 0 0 0 0 0 0 0
38663 - 0 0 0 0 0 0 0 0 0 0 0 0
38664 - 0 0 0 0 0 0 0 0 0 0 0 0
38665 - 0 0 0 0 0 0 0 0 0 0 0 0
38666 - 0 0 0 0 0 0 0 0 0 0 0 0
38667 - 0 0 0 0 0 0 0 0 0 0 0 0
38668 - 0 0 0 0 0 0 0 0 0 0 0 0
38669 - 0 0 0 0 0 0 0 0 0 0 0 0
38670 - 0 0 0 0 0 0 0 0 0 0 0 0
38671 - 0 0 0 0 0 0 0 0 0 0 0 0
38672 - 0 0 0 0 0 0 0 0 0 0 0 0
38673 - 0 0 0 0 0 0 0 0 0 10 10 10
38674 - 34 34 34 86 86 86 14 14 14 2 2 6
38675 -121 87 25 192 133 9 219 162 10 239 182 13
38676 -236 186 11 232 195 16 241 208 19 244 214 54
38677 -246 218 60 246 218 38 246 215 20 241 208 19
38678 -241 208 19 226 184 13 121 87 25 2 2 6
38679 - 2 2 6 2 2 6 2 2 6 2 2 6
38680 - 50 50 50 82 82 82 34 34 34 10 10 10
38681 - 0 0 0 0 0 0 0 0 0 0 0 0
38682 - 0 0 0 0 0 0 0 0 0 0 0 0
38683 - 0 0 0 0 0 0 0 0 0 0 0 0
38684 - 0 0 0 0 0 0 0 0 0 0 0 0
38685 - 0 0 0 0 0 0 0 0 0 0 0 0
38686 - 0 0 0 0 0 0 0 0 0 0 0 0
38687 - 0 0 0 0 0 0 0 0 0 0 0 0
38688 - 0 0 0 0 0 0 0 0 0 0 0 0
38689 - 0 0 0 0 0 0 0 0 0 0 0 0
38690 - 0 0 0 0 0 0 0 0 0 0 0 0
38691 - 0 0 0 0 0 0 0 0 0 0 0 0
38692 - 0 0 0 0 0 0 0 0 0 0 0 0
38693 - 0 0 0 0 0 0 0 0 0 10 10 10
38694 - 34 34 34 82 82 82 30 30 30 61 42 6
38695 -180 123 7 206 145 10 230 174 11 239 182 13
38696 -234 190 10 238 202 15 241 208 19 246 218 74
38697 -246 218 38 246 215 20 246 215 20 246 215 20
38698 -226 184 13 215 174 15 184 144 12 6 6 6
38699 - 2 2 6 2 2 6 2 2 6 2 2 6
38700 - 26 26 26 94 94 94 42 42 42 14 14 14
38701 - 0 0 0 0 0 0 0 0 0 0 0 0
38702 - 0 0 0 0 0 0 0 0 0 0 0 0
38703 - 0 0 0 0 0 0 0 0 0 0 0 0
38704 - 0 0 0 0 0 0 0 0 0 0 0 0
38705 - 0 0 0 0 0 0 0 0 0 0 0 0
38706 - 0 0 0 0 0 0 0 0 0 0 0 0
38707 - 0 0 0 0 0 0 0 0 0 0 0 0
38708 - 0 0 0 0 0 0 0 0 0 0 0 0
38709 - 0 0 0 0 0 0 0 0 0 0 0 0
38710 - 0 0 0 0 0 0 0 0 0 0 0 0
38711 - 0 0 0 0 0 0 0 0 0 0 0 0
38712 - 0 0 0 0 0 0 0 0 0 0 0 0
38713 - 0 0 0 0 0 0 0 0 0 10 10 10
38714 - 30 30 30 78 78 78 50 50 50 104 69 6
38715 -192 133 9 216 158 10 236 178 12 236 186 11
38716 -232 195 16 241 208 19 244 214 54 245 215 43
38717 -246 215 20 246 215 20 241 208 19 198 155 10
38718 -200 144 11 216 158 10 156 118 10 2 2 6
38719 - 2 2 6 2 2 6 2 2 6 2 2 6
38720 - 6 6 6 90 90 90 54 54 54 18 18 18
38721 - 6 6 6 0 0 0 0 0 0 0 0 0
38722 - 0 0 0 0 0 0 0 0 0 0 0 0
38723 - 0 0 0 0 0 0 0 0 0 0 0 0
38724 - 0 0 0 0 0 0 0 0 0 0 0 0
38725 - 0 0 0 0 0 0 0 0 0 0 0 0
38726 - 0 0 0 0 0 0 0 0 0 0 0 0
38727 - 0 0 0 0 0 0 0 0 0 0 0 0
38728 - 0 0 0 0 0 0 0 0 0 0 0 0
38729 - 0 0 0 0 0 0 0 0 0 0 0 0
38730 - 0 0 0 0 0 0 0 0 0 0 0 0
38731 - 0 0 0 0 0 0 0 0 0 0 0 0
38732 - 0 0 0 0 0 0 0 0 0 0 0 0
38733 - 0 0 0 0 0 0 0 0 0 10 10 10
38734 - 30 30 30 78 78 78 46 46 46 22 22 22
38735 -137 92 6 210 162 10 239 182 13 238 190 10
38736 -238 202 15 241 208 19 246 215 20 246 215 20
38737 -241 208 19 203 166 17 185 133 11 210 150 10
38738 -216 158 10 210 150 10 102 78 10 2 2 6
38739 - 6 6 6 54 54 54 14 14 14 2 2 6
38740 - 2 2 6 62 62 62 74 74 74 30 30 30
38741 - 10 10 10 0 0 0 0 0 0 0 0 0
38742 - 0 0 0 0 0 0 0 0 0 0 0 0
38743 - 0 0 0 0 0 0 0 0 0 0 0 0
38744 - 0 0 0 0 0 0 0 0 0 0 0 0
38745 - 0 0 0 0 0 0 0 0 0 0 0 0
38746 - 0 0 0 0 0 0 0 0 0 0 0 0
38747 - 0 0 0 0 0 0 0 0 0 0 0 0
38748 - 0 0 0 0 0 0 0 0 0 0 0 0
38749 - 0 0 0 0 0 0 0 0 0 0 0 0
38750 - 0 0 0 0 0 0 0 0 0 0 0 0
38751 - 0 0 0 0 0 0 0 0 0 0 0 0
38752 - 0 0 0 0 0 0 0 0 0 0 0 0
38753 - 0 0 0 0 0 0 0 0 0 10 10 10
38754 - 34 34 34 78 78 78 50 50 50 6 6 6
38755 - 94 70 30 139 102 15 190 146 13 226 184 13
38756 -232 200 30 232 195 16 215 174 15 190 146 13
38757 -168 122 10 192 133 9 210 150 10 213 154 11
38758 -202 150 34 182 157 106 101 98 89 2 2 6
38759 - 2 2 6 78 78 78 116 116 116 58 58 58
38760 - 2 2 6 22 22 22 90 90 90 46 46 46
38761 - 18 18 18 6 6 6 0 0 0 0 0 0
38762 - 0 0 0 0 0 0 0 0 0 0 0 0
38763 - 0 0 0 0 0 0 0 0 0 0 0 0
38764 - 0 0 0 0 0 0 0 0 0 0 0 0
38765 - 0 0 0 0 0 0 0 0 0 0 0 0
38766 - 0 0 0 0 0 0 0 0 0 0 0 0
38767 - 0 0 0 0 0 0 0 0 0 0 0 0
38768 - 0 0 0 0 0 0 0 0 0 0 0 0
38769 - 0 0 0 0 0 0 0 0 0 0 0 0
38770 - 0 0 0 0 0 0 0 0 0 0 0 0
38771 - 0 0 0 0 0 0 0 0 0 0 0 0
38772 - 0 0 0 0 0 0 0 0 0 0 0 0
38773 - 0 0 0 0 0 0 0 0 0 10 10 10
38774 - 38 38 38 86 86 86 50 50 50 6 6 6
38775 -128 128 128 174 154 114 156 107 11 168 122 10
38776 -198 155 10 184 144 12 197 138 11 200 144 11
38777 -206 145 10 206 145 10 197 138 11 188 164 115
38778 -195 195 195 198 198 198 174 174 174 14 14 14
38779 - 2 2 6 22 22 22 116 116 116 116 116 116
38780 - 22 22 22 2 2 6 74 74 74 70 70 70
38781 - 30 30 30 10 10 10 0 0 0 0 0 0
38782 - 0 0 0 0 0 0 0 0 0 0 0 0
38783 - 0 0 0 0 0 0 0 0 0 0 0 0
38784 - 0 0 0 0 0 0 0 0 0 0 0 0
38785 - 0 0 0 0 0 0 0 0 0 0 0 0
38786 - 0 0 0 0 0 0 0 0 0 0 0 0
38787 - 0 0 0 0 0 0 0 0 0 0 0 0
38788 - 0 0 0 0 0 0 0 0 0 0 0 0
38789 - 0 0 0 0 0 0 0 0 0 0 0 0
38790 - 0 0 0 0 0 0 0 0 0 0 0 0
38791 - 0 0 0 0 0 0 0 0 0 0 0 0
38792 - 0 0 0 0 0 0 0 0 0 0 0 0
38793 - 0 0 0 0 0 0 6 6 6 18 18 18
38794 - 50 50 50 101 101 101 26 26 26 10 10 10
38795 -138 138 138 190 190 190 174 154 114 156 107 11
38796 -197 138 11 200 144 11 197 138 11 192 133 9
38797 -180 123 7 190 142 34 190 178 144 187 187 187
38798 -202 202 202 221 221 221 214 214 214 66 66 66
38799 - 2 2 6 2 2 6 50 50 50 62 62 62
38800 - 6 6 6 2 2 6 10 10 10 90 90 90
38801 - 50 50 50 18 18 18 6 6 6 0 0 0
38802 - 0 0 0 0 0 0 0 0 0 0 0 0
38803 - 0 0 0 0 0 0 0 0 0 0 0 0
38804 - 0 0 0 0 0 0 0 0 0 0 0 0
38805 - 0 0 0 0 0 0 0 0 0 0 0 0
38806 - 0 0 0 0 0 0 0 0 0 0 0 0
38807 - 0 0 0 0 0 0 0 0 0 0 0 0
38808 - 0 0 0 0 0 0 0 0 0 0 0 0
38809 - 0 0 0 0 0 0 0 0 0 0 0 0
38810 - 0 0 0 0 0 0 0 0 0 0 0 0
38811 - 0 0 0 0 0 0 0 0 0 0 0 0
38812 - 0 0 0 0 0 0 0 0 0 0 0 0
38813 - 0 0 0 0 0 0 10 10 10 34 34 34
38814 - 74 74 74 74 74 74 2 2 6 6 6 6
38815 -144 144 144 198 198 198 190 190 190 178 166 146
38816 -154 121 60 156 107 11 156 107 11 168 124 44
38817 -174 154 114 187 187 187 190 190 190 210 210 210
38818 -246 246 246 253 253 253 253 253 253 182 182 182
38819 - 6 6 6 2 2 6 2 2 6 2 2 6
38820 - 2 2 6 2 2 6 2 2 6 62 62 62
38821 - 74 74 74 34 34 34 14 14 14 0 0 0
38822 - 0 0 0 0 0 0 0 0 0 0 0 0
38823 - 0 0 0 0 0 0 0 0 0 0 0 0
38824 - 0 0 0 0 0 0 0 0 0 0 0 0
38825 - 0 0 0 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 0 0 0 0 0 0 0 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 0 0 0
38830 - 0 0 0 0 0 0 0 0 0 0 0 0
38831 - 0 0 0 0 0 0 0 0 0 0 0 0
38832 - 0 0 0 0 0 0 0 0 0 0 0 0
38833 - 0 0 0 10 10 10 22 22 22 54 54 54
38834 - 94 94 94 18 18 18 2 2 6 46 46 46
38835 -234 234 234 221 221 221 190 190 190 190 190 190
38836 -190 190 190 187 187 187 187 187 187 190 190 190
38837 -190 190 190 195 195 195 214 214 214 242 242 242
38838 -253 253 253 253 253 253 253 253 253 253 253 253
38839 - 82 82 82 2 2 6 2 2 6 2 2 6
38840 - 2 2 6 2 2 6 2 2 6 14 14 14
38841 - 86 86 86 54 54 54 22 22 22 6 6 6
38842 - 0 0 0 0 0 0 0 0 0 0 0 0
38843 - 0 0 0 0 0 0 0 0 0 0 0 0
38844 - 0 0 0 0 0 0 0 0 0 0 0 0
38845 - 0 0 0 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 0 0 0 0 0 0 0 0 0 0
38848 - 0 0 0 0 0 0 0 0 0 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 0 0 0
38850 - 0 0 0 0 0 0 0 0 0 0 0 0
38851 - 0 0 0 0 0 0 0 0 0 0 0 0
38852 - 0 0 0 0 0 0 0 0 0 0 0 0
38853 - 6 6 6 18 18 18 46 46 46 90 90 90
38854 - 46 46 46 18 18 18 6 6 6 182 182 182
38855 -253 253 253 246 246 246 206 206 206 190 190 190
38856 -190 190 190 190 190 190 190 190 190 190 190 190
38857 -206 206 206 231 231 231 250 250 250 253 253 253
38858 -253 253 253 253 253 253 253 253 253 253 253 253
38859 -202 202 202 14 14 14 2 2 6 2 2 6
38860 - 2 2 6 2 2 6 2 2 6 2 2 6
38861 - 42 42 42 86 86 86 42 42 42 18 18 18
38862 - 6 6 6 0 0 0 0 0 0 0 0 0
38863 - 0 0 0 0 0 0 0 0 0 0 0 0
38864 - 0 0 0 0 0 0 0 0 0 0 0 0
38865 - 0 0 0 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 0 0 0 0 0 0 0 0 0
38868 - 0 0 0 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 0 0 0 0 0 0
38870 - 0 0 0 0 0 0 0 0 0 0 0 0
38871 - 0 0 0 0 0 0 0 0 0 0 0 0
38872 - 0 0 0 0 0 0 0 0 0 6 6 6
38873 - 14 14 14 38 38 38 74 74 74 66 66 66
38874 - 2 2 6 6 6 6 90 90 90 250 250 250
38875 -253 253 253 253 253 253 238 238 238 198 198 198
38876 -190 190 190 190 190 190 195 195 195 221 221 221
38877 -246 246 246 253 253 253 253 253 253 253 253 253
38878 -253 253 253 253 253 253 253 253 253 253 253 253
38879 -253 253 253 82 82 82 2 2 6 2 2 6
38880 - 2 2 6 2 2 6 2 2 6 2 2 6
38881 - 2 2 6 78 78 78 70 70 70 34 34 34
38882 - 14 14 14 6 6 6 0 0 0 0 0 0
38883 - 0 0 0 0 0 0 0 0 0 0 0 0
38884 - 0 0 0 0 0 0 0 0 0 0 0 0
38885 - 0 0 0 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 0 0 0 0 0 0 0 0 0 0 0 0
38888 - 0 0 0 0 0 0 0 0 0 0 0 0
38889 - 0 0 0 0 0 0 0 0 0 0 0 0
38890 - 0 0 0 0 0 0 0 0 0 0 0 0
38891 - 0 0 0 0 0 0 0 0 0 0 0 0
38892 - 0 0 0 0 0 0 0 0 0 14 14 14
38893 - 34 34 34 66 66 66 78 78 78 6 6 6
38894 - 2 2 6 18 18 18 218 218 218 253 253 253
38895 -253 253 253 253 253 253 253 253 253 246 246 246
38896 -226 226 226 231 231 231 246 246 246 253 253 253
38897 -253 253 253 253 253 253 253 253 253 253 253 253
38898 -253 253 253 253 253 253 253 253 253 253 253 253
38899 -253 253 253 178 178 178 2 2 6 2 2 6
38900 - 2 2 6 2 2 6 2 2 6 2 2 6
38901 - 2 2 6 18 18 18 90 90 90 62 62 62
38902 - 30 30 30 10 10 10 0 0 0 0 0 0
38903 - 0 0 0 0 0 0 0 0 0 0 0 0
38904 - 0 0 0 0 0 0 0 0 0 0 0 0
38905 - 0 0 0 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 0 0 0
38907 - 0 0 0 0 0 0 0 0 0 0 0 0
38908 - 0 0 0 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 0 0 0 0 0 0
38910 - 0 0 0 0 0 0 0 0 0 0 0 0
38911 - 0 0 0 0 0 0 0 0 0 0 0 0
38912 - 0 0 0 0 0 0 10 10 10 26 26 26
38913 - 58 58 58 90 90 90 18 18 18 2 2 6
38914 - 2 2 6 110 110 110 253 253 253 253 253 253
38915 -253 253 253 253 253 253 253 253 253 253 253 253
38916 -250 250 250 253 253 253 253 253 253 253 253 253
38917 -253 253 253 253 253 253 253 253 253 253 253 253
38918 -253 253 253 253 253 253 253 253 253 253 253 253
38919 -253 253 253 231 231 231 18 18 18 2 2 6
38920 - 2 2 6 2 2 6 2 2 6 2 2 6
38921 - 2 2 6 2 2 6 18 18 18 94 94 94
38922 - 54 54 54 26 26 26 10 10 10 0 0 0
38923 - 0 0 0 0 0 0 0 0 0 0 0 0
38924 - 0 0 0 0 0 0 0 0 0 0 0 0
38925 - 0 0 0 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 0 0 0
38927 - 0 0 0 0 0 0 0 0 0 0 0 0
38928 - 0 0 0 0 0 0 0 0 0 0 0 0
38929 - 0 0 0 0 0 0 0 0 0 0 0 0
38930 - 0 0 0 0 0 0 0 0 0 0 0 0
38931 - 0 0 0 0 0 0 0 0 0 0 0 0
38932 - 0 0 0 6 6 6 22 22 22 50 50 50
38933 - 90 90 90 26 26 26 2 2 6 2 2 6
38934 - 14 14 14 195 195 195 250 250 250 253 253 253
38935 -253 253 253 253 253 253 253 253 253 253 253 253
38936 -253 253 253 253 253 253 253 253 253 253 253 253
38937 -253 253 253 253 253 253 253 253 253 253 253 253
38938 -253 253 253 253 253 253 253 253 253 253 253 253
38939 -250 250 250 242 242 242 54 54 54 2 2 6
38940 - 2 2 6 2 2 6 2 2 6 2 2 6
38941 - 2 2 6 2 2 6 2 2 6 38 38 38
38942 - 86 86 86 50 50 50 22 22 22 6 6 6
38943 - 0 0 0 0 0 0 0 0 0 0 0 0
38944 - 0 0 0 0 0 0 0 0 0 0 0 0
38945 - 0 0 0 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 0 0 0 0 0 0
38947 - 0 0 0 0 0 0 0 0 0 0 0 0
38948 - 0 0 0 0 0 0 0 0 0 0 0 0
38949 - 0 0 0 0 0 0 0 0 0 0 0 0
38950 - 0 0 0 0 0 0 0 0 0 0 0 0
38951 - 0 0 0 0 0 0 0 0 0 0 0 0
38952 - 6 6 6 14 14 14 38 38 38 82 82 82
38953 - 34 34 34 2 2 6 2 2 6 2 2 6
38954 - 42 42 42 195 195 195 246 246 246 253 253 253
38955 -253 253 253 253 253 253 253 253 253 250 250 250
38956 -242 242 242 242 242 242 250 250 250 253 253 253
38957 -253 253 253 253 253 253 253 253 253 253 253 253
38958 -253 253 253 250 250 250 246 246 246 238 238 238
38959 -226 226 226 231 231 231 101 101 101 6 6 6
38960 - 2 2 6 2 2 6 2 2 6 2 2 6
38961 - 2 2 6 2 2 6 2 2 6 2 2 6
38962 - 38 38 38 82 82 82 42 42 42 14 14 14
38963 - 6 6 6 0 0 0 0 0 0 0 0 0
38964 - 0 0 0 0 0 0 0 0 0 0 0 0
38965 - 0 0 0 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 0 0 0 0 0 0 0 0 0
38967 - 0 0 0 0 0 0 0 0 0 0 0 0
38968 - 0 0 0 0 0 0 0 0 0 0 0 0
38969 - 0 0 0 0 0 0 0 0 0 0 0 0
38970 - 0 0 0 0 0 0 0 0 0 0 0 0
38971 - 0 0 0 0 0 0 0 0 0 0 0 0
38972 - 10 10 10 26 26 26 62 62 62 66 66 66
38973 - 2 2 6 2 2 6 2 2 6 6 6 6
38974 - 70 70 70 170 170 170 206 206 206 234 234 234
38975 -246 246 246 250 250 250 250 250 250 238 238 238
38976 -226 226 226 231 231 231 238 238 238 250 250 250
38977 -250 250 250 250 250 250 246 246 246 231 231 231
38978 -214 214 214 206 206 206 202 202 202 202 202 202
38979 -198 198 198 202 202 202 182 182 182 18 18 18
38980 - 2 2 6 2 2 6 2 2 6 2 2 6
38981 - 2 2 6 2 2 6 2 2 6 2 2 6
38982 - 2 2 6 62 62 62 66 66 66 30 30 30
38983 - 10 10 10 0 0 0 0 0 0 0 0 0
38984 - 0 0 0 0 0 0 0 0 0 0 0 0
38985 - 0 0 0 0 0 0 0 0 0 0 0 0
38986 - 0 0 0 0 0 0 0 0 0 0 0 0
38987 - 0 0 0 0 0 0 0 0 0 0 0 0
38988 - 0 0 0 0 0 0 0 0 0 0 0 0
38989 - 0 0 0 0 0 0 0 0 0 0 0 0
38990 - 0 0 0 0 0 0 0 0 0 0 0 0
38991 - 0 0 0 0 0 0 0 0 0 0 0 0
38992 - 14 14 14 42 42 42 82 82 82 18 18 18
38993 - 2 2 6 2 2 6 2 2 6 10 10 10
38994 - 94 94 94 182 182 182 218 218 218 242 242 242
38995 -250 250 250 253 253 253 253 253 253 250 250 250
38996 -234 234 234 253 253 253 253 253 253 253 253 253
38997 -253 253 253 253 253 253 253 253 253 246 246 246
38998 -238 238 238 226 226 226 210 210 210 202 202 202
38999 -195 195 195 195 195 195 210 210 210 158 158 158
39000 - 6 6 6 14 14 14 50 50 50 14 14 14
39001 - 2 2 6 2 2 6 2 2 6 2 2 6
39002 - 2 2 6 6 6 6 86 86 86 46 46 46
39003 - 18 18 18 6 6 6 0 0 0 0 0 0
39004 - 0 0 0 0 0 0 0 0 0 0 0 0
39005 - 0 0 0 0 0 0 0 0 0 0 0 0
39006 - 0 0 0 0 0 0 0 0 0 0 0 0
39007 - 0 0 0 0 0 0 0 0 0 0 0 0
39008 - 0 0 0 0 0 0 0 0 0 0 0 0
39009 - 0 0 0 0 0 0 0 0 0 0 0 0
39010 - 0 0 0 0 0 0 0 0 0 0 0 0
39011 - 0 0 0 0 0 0 0 0 0 6 6 6
39012 - 22 22 22 54 54 54 70 70 70 2 2 6
39013 - 2 2 6 10 10 10 2 2 6 22 22 22
39014 -166 166 166 231 231 231 250 250 250 253 253 253
39015 -253 253 253 253 253 253 253 253 253 250 250 250
39016 -242 242 242 253 253 253 253 253 253 253 253 253
39017 -253 253 253 253 253 253 253 253 253 253 253 253
39018 -253 253 253 253 253 253 253 253 253 246 246 246
39019 -231 231 231 206 206 206 198 198 198 226 226 226
39020 - 94 94 94 2 2 6 6 6 6 38 38 38
39021 - 30 30 30 2 2 6 2 2 6 2 2 6
39022 - 2 2 6 2 2 6 62 62 62 66 66 66
39023 - 26 26 26 10 10 10 0 0 0 0 0 0
39024 - 0 0 0 0 0 0 0 0 0 0 0 0
39025 - 0 0 0 0 0 0 0 0 0 0 0 0
39026 - 0 0 0 0 0 0 0 0 0 0 0 0
39027 - 0 0 0 0 0 0 0 0 0 0 0 0
39028 - 0 0 0 0 0 0 0 0 0 0 0 0
39029 - 0 0 0 0 0 0 0 0 0 0 0 0
39030 - 0 0 0 0 0 0 0 0 0 0 0 0
39031 - 0 0 0 0 0 0 0 0 0 10 10 10
39032 - 30 30 30 74 74 74 50 50 50 2 2 6
39033 - 26 26 26 26 26 26 2 2 6 106 106 106
39034 -238 238 238 253 253 253 253 253 253 253 253 253
39035 -253 253 253 253 253 253 253 253 253 253 253 253
39036 -253 253 253 253 253 253 253 253 253 253 253 253
39037 -253 253 253 253 253 253 253 253 253 253 253 253
39038 -253 253 253 253 253 253 253 253 253 253 253 253
39039 -253 253 253 246 246 246 218 218 218 202 202 202
39040 -210 210 210 14 14 14 2 2 6 2 2 6
39041 - 30 30 30 22 22 22 2 2 6 2 2 6
39042 - 2 2 6 2 2 6 18 18 18 86 86 86
39043 - 42 42 42 14 14 14 0 0 0 0 0 0
39044 - 0 0 0 0 0 0 0 0 0 0 0 0
39045 - 0 0 0 0 0 0 0 0 0 0 0 0
39046 - 0 0 0 0 0 0 0 0 0 0 0 0
39047 - 0 0 0 0 0 0 0 0 0 0 0 0
39048 - 0 0 0 0 0 0 0 0 0 0 0 0
39049 - 0 0 0 0 0 0 0 0 0 0 0 0
39050 - 0 0 0 0 0 0 0 0 0 0 0 0
39051 - 0 0 0 0 0 0 0 0 0 14 14 14
39052 - 42 42 42 90 90 90 22 22 22 2 2 6
39053 - 42 42 42 2 2 6 18 18 18 218 218 218
39054 -253 253 253 253 253 253 253 253 253 253 253 253
39055 -253 253 253 253 253 253 253 253 253 253 253 253
39056 -253 253 253 253 253 253 253 253 253 253 253 253
39057 -253 253 253 253 253 253 253 253 253 253 253 253
39058 -253 253 253 253 253 253 253 253 253 253 253 253
39059 -253 253 253 253 253 253 250 250 250 221 221 221
39060 -218 218 218 101 101 101 2 2 6 14 14 14
39061 - 18 18 18 38 38 38 10 10 10 2 2 6
39062 - 2 2 6 2 2 6 2 2 6 78 78 78
39063 - 58 58 58 22 22 22 6 6 6 0 0 0
39064 - 0 0 0 0 0 0 0 0 0 0 0 0
39065 - 0 0 0 0 0 0 0 0 0 0 0 0
39066 - 0 0 0 0 0 0 0 0 0 0 0 0
39067 - 0 0 0 0 0 0 0 0 0 0 0 0
39068 - 0 0 0 0 0 0 0 0 0 0 0 0
39069 - 0 0 0 0 0 0 0 0 0 0 0 0
39070 - 0 0 0 0 0 0 0 0 0 0 0 0
39071 - 0 0 0 0 0 0 6 6 6 18 18 18
39072 - 54 54 54 82 82 82 2 2 6 26 26 26
39073 - 22 22 22 2 2 6 123 123 123 253 253 253
39074 -253 253 253 253 253 253 253 253 253 253 253 253
39075 -253 253 253 253 253 253 253 253 253 253 253 253
39076 -253 253 253 253 253 253 253 253 253 253 253 253
39077 -253 253 253 253 253 253 253 253 253 253 253 253
39078 -253 253 253 253 253 253 253 253 253 253 253 253
39079 -253 253 253 253 253 253 253 253 253 250 250 250
39080 -238 238 238 198 198 198 6 6 6 38 38 38
39081 - 58 58 58 26 26 26 38 38 38 2 2 6
39082 - 2 2 6 2 2 6 2 2 6 46 46 46
39083 - 78 78 78 30 30 30 10 10 10 0 0 0
39084 - 0 0 0 0 0 0 0 0 0 0 0 0
39085 - 0 0 0 0 0 0 0 0 0 0 0 0
39086 - 0 0 0 0 0 0 0 0 0 0 0 0
39087 - 0 0 0 0 0 0 0 0 0 0 0 0
39088 - 0 0 0 0 0 0 0 0 0 0 0 0
39089 - 0 0 0 0 0 0 0 0 0 0 0 0
39090 - 0 0 0 0 0 0 0 0 0 0 0 0
39091 - 0 0 0 0 0 0 10 10 10 30 30 30
39092 - 74 74 74 58 58 58 2 2 6 42 42 42
39093 - 2 2 6 22 22 22 231 231 231 253 253 253
39094 -253 253 253 253 253 253 253 253 253 253 253 253
39095 -253 253 253 253 253 253 253 253 253 250 250 250
39096 -253 253 253 253 253 253 253 253 253 253 253 253
39097 -253 253 253 253 253 253 253 253 253 253 253 253
39098 -253 253 253 253 253 253 253 253 253 253 253 253
39099 -253 253 253 253 253 253 253 253 253 253 253 253
39100 -253 253 253 246 246 246 46 46 46 38 38 38
39101 - 42 42 42 14 14 14 38 38 38 14 14 14
39102 - 2 2 6 2 2 6 2 2 6 6 6 6
39103 - 86 86 86 46 46 46 14 14 14 0 0 0
39104 - 0 0 0 0 0 0 0 0 0 0 0 0
39105 - 0 0 0 0 0 0 0 0 0 0 0 0
39106 - 0 0 0 0 0 0 0 0 0 0 0 0
39107 - 0 0 0 0 0 0 0 0 0 0 0 0
39108 - 0 0 0 0 0 0 0 0 0 0 0 0
39109 - 0 0 0 0 0 0 0 0 0 0 0 0
39110 - 0 0 0 0 0 0 0 0 0 0 0 0
39111 - 0 0 0 6 6 6 14 14 14 42 42 42
39112 - 90 90 90 18 18 18 18 18 18 26 26 26
39113 - 2 2 6 116 116 116 253 253 253 253 253 253
39114 -253 253 253 253 253 253 253 253 253 253 253 253
39115 -253 253 253 253 253 253 250 250 250 238 238 238
39116 -253 253 253 253 253 253 253 253 253 253 253 253
39117 -253 253 253 253 253 253 253 253 253 253 253 253
39118 -253 253 253 253 253 253 253 253 253 253 253 253
39119 -253 253 253 253 253 253 253 253 253 253 253 253
39120 -253 253 253 253 253 253 94 94 94 6 6 6
39121 - 2 2 6 2 2 6 10 10 10 34 34 34
39122 - 2 2 6 2 2 6 2 2 6 2 2 6
39123 - 74 74 74 58 58 58 22 22 22 6 6 6
39124 - 0 0 0 0 0 0 0 0 0 0 0 0
39125 - 0 0 0 0 0 0 0 0 0 0 0 0
39126 - 0 0 0 0 0 0 0 0 0 0 0 0
39127 - 0 0 0 0 0 0 0 0 0 0 0 0
39128 - 0 0 0 0 0 0 0 0 0 0 0 0
39129 - 0 0 0 0 0 0 0 0 0 0 0 0
39130 - 0 0 0 0 0 0 0 0 0 0 0 0
39131 - 0 0 0 10 10 10 26 26 26 66 66 66
39132 - 82 82 82 2 2 6 38 38 38 6 6 6
39133 - 14 14 14 210 210 210 253 253 253 253 253 253
39134 -253 253 253 253 253 253 253 253 253 253 253 253
39135 -253 253 253 253 253 253 246 246 246 242 242 242
39136 -253 253 253 253 253 253 253 253 253 253 253 253
39137 -253 253 253 253 253 253 253 253 253 253 253 253
39138 -253 253 253 253 253 253 253 253 253 253 253 253
39139 -253 253 253 253 253 253 253 253 253 253 253 253
39140 -253 253 253 253 253 253 144 144 144 2 2 6
39141 - 2 2 6 2 2 6 2 2 6 46 46 46
39142 - 2 2 6 2 2 6 2 2 6 2 2 6
39143 - 42 42 42 74 74 74 30 30 30 10 10 10
39144 - 0 0 0 0 0 0 0 0 0 0 0 0
39145 - 0 0 0 0 0 0 0 0 0 0 0 0
39146 - 0 0 0 0 0 0 0 0 0 0 0 0
39147 - 0 0 0 0 0 0 0 0 0 0 0 0
39148 - 0 0 0 0 0 0 0 0 0 0 0 0
39149 - 0 0 0 0 0 0 0 0 0 0 0 0
39150 - 0 0 0 0 0 0 0 0 0 0 0 0
39151 - 6 6 6 14 14 14 42 42 42 90 90 90
39152 - 26 26 26 6 6 6 42 42 42 2 2 6
39153 - 74 74 74 250 250 250 253 253 253 253 253 253
39154 -253 253 253 253 253 253 253 253 253 253 253 253
39155 -253 253 253 253 253 253 242 242 242 242 242 242
39156 -253 253 253 253 253 253 253 253 253 253 253 253
39157 -253 253 253 253 253 253 253 253 253 253 253 253
39158 -253 253 253 253 253 253 253 253 253 253 253 253
39159 -253 253 253 253 253 253 253 253 253 253 253 253
39160 -253 253 253 253 253 253 182 182 182 2 2 6
39161 - 2 2 6 2 2 6 2 2 6 46 46 46
39162 - 2 2 6 2 2 6 2 2 6 2 2 6
39163 - 10 10 10 86 86 86 38 38 38 10 10 10
39164 - 0 0 0 0 0 0 0 0 0 0 0 0
39165 - 0 0 0 0 0 0 0 0 0 0 0 0
39166 - 0 0 0 0 0 0 0 0 0 0 0 0
39167 - 0 0 0 0 0 0 0 0 0 0 0 0
39168 - 0 0 0 0 0 0 0 0 0 0 0 0
39169 - 0 0 0 0 0 0 0 0 0 0 0 0
39170 - 0 0 0 0 0 0 0 0 0 0 0 0
39171 - 10 10 10 26 26 26 66 66 66 82 82 82
39172 - 2 2 6 22 22 22 18 18 18 2 2 6
39173 -149 149 149 253 253 253 253 253 253 253 253 253
39174 -253 253 253 253 253 253 253 253 253 253 253 253
39175 -253 253 253 253 253 253 234 234 234 242 242 242
39176 -253 253 253 253 253 253 253 253 253 253 253 253
39177 -253 253 253 253 253 253 253 253 253 253 253 253
39178 -253 253 253 253 253 253 253 253 253 253 253 253
39179 -253 253 253 253 253 253 253 253 253 253 253 253
39180 -253 253 253 253 253 253 206 206 206 2 2 6
39181 - 2 2 6 2 2 6 2 2 6 38 38 38
39182 - 2 2 6 2 2 6 2 2 6 2 2 6
39183 - 6 6 6 86 86 86 46 46 46 14 14 14
39184 - 0 0 0 0 0 0 0 0 0 0 0 0
39185 - 0 0 0 0 0 0 0 0 0 0 0 0
39186 - 0 0 0 0 0 0 0 0 0 0 0 0
39187 - 0 0 0 0 0 0 0 0 0 0 0 0
39188 - 0 0 0 0 0 0 0 0 0 0 0 0
39189 - 0 0 0 0 0 0 0 0 0 0 0 0
39190 - 0 0 0 0 0 0 0 0 0 6 6 6
39191 - 18 18 18 46 46 46 86 86 86 18 18 18
39192 - 2 2 6 34 34 34 10 10 10 6 6 6
39193 -210 210 210 253 253 253 253 253 253 253 253 253
39194 -253 253 253 253 253 253 253 253 253 253 253 253
39195 -253 253 253 253 253 253 234 234 234 242 242 242
39196 -253 253 253 253 253 253 253 253 253 253 253 253
39197 -253 253 253 253 253 253 253 253 253 253 253 253
39198 -253 253 253 253 253 253 253 253 253 253 253 253
39199 -253 253 253 253 253 253 253 253 253 253 253 253
39200 -253 253 253 253 253 253 221 221 221 6 6 6
39201 - 2 2 6 2 2 6 6 6 6 30 30 30
39202 - 2 2 6 2 2 6 2 2 6 2 2 6
39203 - 2 2 6 82 82 82 54 54 54 18 18 18
39204 - 6 6 6 0 0 0 0 0 0 0 0 0
39205 - 0 0 0 0 0 0 0 0 0 0 0 0
39206 - 0 0 0 0 0 0 0 0 0 0 0 0
39207 - 0 0 0 0 0 0 0 0 0 0 0 0
39208 - 0 0 0 0 0 0 0 0 0 0 0 0
39209 - 0 0 0 0 0 0 0 0 0 0 0 0
39210 - 0 0 0 0 0 0 0 0 0 10 10 10
39211 - 26 26 26 66 66 66 62 62 62 2 2 6
39212 - 2 2 6 38 38 38 10 10 10 26 26 26
39213 -238 238 238 253 253 253 253 253 253 253 253 253
39214 -253 253 253 253 253 253 253 253 253 253 253 253
39215 -253 253 253 253 253 253 231 231 231 238 238 238
39216 -253 253 253 253 253 253 253 253 253 253 253 253
39217 -253 253 253 253 253 253 253 253 253 253 253 253
39218 -253 253 253 253 253 253 253 253 253 253 253 253
39219 -253 253 253 253 253 253 253 253 253 253 253 253
39220 -253 253 253 253 253 253 231 231 231 6 6 6
39221 - 2 2 6 2 2 6 10 10 10 30 30 30
39222 - 2 2 6 2 2 6 2 2 6 2 2 6
39223 - 2 2 6 66 66 66 58 58 58 22 22 22
39224 - 6 6 6 0 0 0 0 0 0 0 0 0
39225 - 0 0 0 0 0 0 0 0 0 0 0 0
39226 - 0 0 0 0 0 0 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 0 0 0 0 0 0
39228 - 0 0 0 0 0 0 0 0 0 0 0 0
39229 - 0 0 0 0 0 0 0 0 0 0 0 0
39230 - 0 0 0 0 0 0 0 0 0 10 10 10
39231 - 38 38 38 78 78 78 6 6 6 2 2 6
39232 - 2 2 6 46 46 46 14 14 14 42 42 42
39233 -246 246 246 253 253 253 253 253 253 253 253 253
39234 -253 253 253 253 253 253 253 253 253 253 253 253
39235 -253 253 253 253 253 253 231 231 231 242 242 242
39236 -253 253 253 253 253 253 253 253 253 253 253 253
39237 -253 253 253 253 253 253 253 253 253 253 253 253
39238 -253 253 253 253 253 253 253 253 253 253 253 253
39239 -253 253 253 253 253 253 253 253 253 253 253 253
39240 -253 253 253 253 253 253 234 234 234 10 10 10
39241 - 2 2 6 2 2 6 22 22 22 14 14 14
39242 - 2 2 6 2 2 6 2 2 6 2 2 6
39243 - 2 2 6 66 66 66 62 62 62 22 22 22
39244 - 6 6 6 0 0 0 0 0 0 0 0 0
39245 - 0 0 0 0 0 0 0 0 0 0 0 0
39246 - 0 0 0 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 0 0 0 0 0 0
39248 - 0 0 0 0 0 0 0 0 0 0 0 0
39249 - 0 0 0 0 0 0 0 0 0 0 0 0
39250 - 0 0 0 0 0 0 6 6 6 18 18 18
39251 - 50 50 50 74 74 74 2 2 6 2 2 6
39252 - 14 14 14 70 70 70 34 34 34 62 62 62
39253 -250 250 250 253 253 253 253 253 253 253 253 253
39254 -253 253 253 253 253 253 253 253 253 253 253 253
39255 -253 253 253 253 253 253 231 231 231 246 246 246
39256 -253 253 253 253 253 253 253 253 253 253 253 253
39257 -253 253 253 253 253 253 253 253 253 253 253 253
39258 -253 253 253 253 253 253 253 253 253 253 253 253
39259 -253 253 253 253 253 253 253 253 253 253 253 253
39260 -253 253 253 253 253 253 234 234 234 14 14 14
39261 - 2 2 6 2 2 6 30 30 30 2 2 6
39262 - 2 2 6 2 2 6 2 2 6 2 2 6
39263 - 2 2 6 66 66 66 62 62 62 22 22 22
39264 - 6 6 6 0 0 0 0 0 0 0 0 0
39265 - 0 0 0 0 0 0 0 0 0 0 0 0
39266 - 0 0 0 0 0 0 0 0 0 0 0 0
39267 - 0 0 0 0 0 0 0 0 0 0 0 0
39268 - 0 0 0 0 0 0 0 0 0 0 0 0
39269 - 0 0 0 0 0 0 0 0 0 0 0 0
39270 - 0 0 0 0 0 0 6 6 6 18 18 18
39271 - 54 54 54 62 62 62 2 2 6 2 2 6
39272 - 2 2 6 30 30 30 46 46 46 70 70 70
39273 -250 250 250 253 253 253 253 253 253 253 253 253
39274 -253 253 253 253 253 253 253 253 253 253 253 253
39275 -253 253 253 253 253 253 231 231 231 246 246 246
39276 -253 253 253 253 253 253 253 253 253 253 253 253
39277 -253 253 253 253 253 253 253 253 253 253 253 253
39278 -253 253 253 253 253 253 253 253 253 253 253 253
39279 -253 253 253 253 253 253 253 253 253 253 253 253
39280 -253 253 253 253 253 253 226 226 226 10 10 10
39281 - 2 2 6 6 6 6 30 30 30 2 2 6
39282 - 2 2 6 2 2 6 2 2 6 2 2 6
39283 - 2 2 6 66 66 66 58 58 58 22 22 22
39284 - 6 6 6 0 0 0 0 0 0 0 0 0
39285 - 0 0 0 0 0 0 0 0 0 0 0 0
39286 - 0 0 0 0 0 0 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 0 0 0
39288 - 0 0 0 0 0 0 0 0 0 0 0 0
39289 - 0 0 0 0 0 0 0 0 0 0 0 0
39290 - 0 0 0 0 0 0 6 6 6 22 22 22
39291 - 58 58 58 62 62 62 2 2 6 2 2 6
39292 - 2 2 6 2 2 6 30 30 30 78 78 78
39293 -250 250 250 253 253 253 253 253 253 253 253 253
39294 -253 253 253 253 253 253 253 253 253 253 253 253
39295 -253 253 253 253 253 253 231 231 231 246 246 246
39296 -253 253 253 253 253 253 253 253 253 253 253 253
39297 -253 253 253 253 253 253 253 253 253 253 253 253
39298 -253 253 253 253 253 253 253 253 253 253 253 253
39299 -253 253 253 253 253 253 253 253 253 253 253 253
39300 -253 253 253 253 253 253 206 206 206 2 2 6
39301 - 22 22 22 34 34 34 18 14 6 22 22 22
39302 - 26 26 26 18 18 18 6 6 6 2 2 6
39303 - 2 2 6 82 82 82 54 54 54 18 18 18
39304 - 6 6 6 0 0 0 0 0 0 0 0 0
39305 - 0 0 0 0 0 0 0 0 0 0 0 0
39306 - 0 0 0 0 0 0 0 0 0 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 0 0 0
39308 - 0 0 0 0 0 0 0 0 0 0 0 0
39309 - 0 0 0 0 0 0 0 0 0 0 0 0
39310 - 0 0 0 0 0 0 6 6 6 26 26 26
39311 - 62 62 62 106 106 106 74 54 14 185 133 11
39312 -210 162 10 121 92 8 6 6 6 62 62 62
39313 -238 238 238 253 253 253 253 253 253 253 253 253
39314 -253 253 253 253 253 253 253 253 253 253 253 253
39315 -253 253 253 253 253 253 231 231 231 246 246 246
39316 -253 253 253 253 253 253 253 253 253 253 253 253
39317 -253 253 253 253 253 253 253 253 253 253 253 253
39318 -253 253 253 253 253 253 253 253 253 253 253 253
39319 -253 253 253 253 253 253 253 253 253 253 253 253
39320 -253 253 253 253 253 253 158 158 158 18 18 18
39321 - 14 14 14 2 2 6 2 2 6 2 2 6
39322 - 6 6 6 18 18 18 66 66 66 38 38 38
39323 - 6 6 6 94 94 94 50 50 50 18 18 18
39324 - 6 6 6 0 0 0 0 0 0 0 0 0
39325 - 0 0 0 0 0 0 0 0 0 0 0 0
39326 - 0 0 0 0 0 0 0 0 0 0 0 0
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 0 0 0 0 0 0 0 0 0 0 0 0
39329 - 0 0 0 0 0 0 0 0 0 6 6 6
39330 - 10 10 10 10 10 10 18 18 18 38 38 38
39331 - 78 78 78 142 134 106 216 158 10 242 186 14
39332 -246 190 14 246 190 14 156 118 10 10 10 10
39333 - 90 90 90 238 238 238 253 253 253 253 253 253
39334 -253 253 253 253 253 253 253 253 253 253 253 253
39335 -253 253 253 253 253 253 231 231 231 250 250 250
39336 -253 253 253 253 253 253 253 253 253 253 253 253
39337 -253 253 253 253 253 253 253 253 253 253 253 253
39338 -253 253 253 253 253 253 253 253 253 253 253 253
39339 -253 253 253 253 253 253 253 253 253 246 230 190
39340 -238 204 91 238 204 91 181 142 44 37 26 9
39341 - 2 2 6 2 2 6 2 2 6 2 2 6
39342 - 2 2 6 2 2 6 38 38 38 46 46 46
39343 - 26 26 26 106 106 106 54 54 54 18 18 18
39344 - 6 6 6 0 0 0 0 0 0 0 0 0
39345 - 0 0 0 0 0 0 0 0 0 0 0 0
39346 - 0 0 0 0 0 0 0 0 0 0 0 0
39347 - 0 0 0 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 0 0 0 0 0 0
39349 - 0 0 0 6 6 6 14 14 14 22 22 22
39350 - 30 30 30 38 38 38 50 50 50 70 70 70
39351 -106 106 106 190 142 34 226 170 11 242 186 14
39352 -246 190 14 246 190 14 246 190 14 154 114 10
39353 - 6 6 6 74 74 74 226 226 226 253 253 253
39354 -253 253 253 253 253 253 253 253 253 253 253 253
39355 -253 253 253 253 253 253 231 231 231 250 250 250
39356 -253 253 253 253 253 253 253 253 253 253 253 253
39357 -253 253 253 253 253 253 253 253 253 253 253 253
39358 -253 253 253 253 253 253 253 253 253 253 253 253
39359 -253 253 253 253 253 253 253 253 253 228 184 62
39360 -241 196 14 241 208 19 232 195 16 38 30 10
39361 - 2 2 6 2 2 6 2 2 6 2 2 6
39362 - 2 2 6 6 6 6 30 30 30 26 26 26
39363 -203 166 17 154 142 90 66 66 66 26 26 26
39364 - 6 6 6 0 0 0 0 0 0 0 0 0
39365 - 0 0 0 0 0 0 0 0 0 0 0 0
39366 - 0 0 0 0 0 0 0 0 0 0 0 0
39367 - 0 0 0 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 6 6 6 18 18 18 38 38 38 58 58 58
39370 - 78 78 78 86 86 86 101 101 101 123 123 123
39371 -175 146 61 210 150 10 234 174 13 246 186 14
39372 -246 190 14 246 190 14 246 190 14 238 190 10
39373 -102 78 10 2 2 6 46 46 46 198 198 198
39374 -253 253 253 253 253 253 253 253 253 253 253 253
39375 -253 253 253 253 253 253 234 234 234 242 242 242
39376 -253 253 253 253 253 253 253 253 253 253 253 253
39377 -253 253 253 253 253 253 253 253 253 253 253 253
39378 -253 253 253 253 253 253 253 253 253 253 253 253
39379 -253 253 253 253 253 253 253 253 253 224 178 62
39380 -242 186 14 241 196 14 210 166 10 22 18 6
39381 - 2 2 6 2 2 6 2 2 6 2 2 6
39382 - 2 2 6 2 2 6 6 6 6 121 92 8
39383 -238 202 15 232 195 16 82 82 82 34 34 34
39384 - 10 10 10 0 0 0 0 0 0 0 0 0
39385 - 0 0 0 0 0 0 0 0 0 0 0 0
39386 - 0 0 0 0 0 0 0 0 0 0 0 0
39387 - 0 0 0 0 0 0 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 14 14 14 38 38 38 70 70 70 154 122 46
39390 -190 142 34 200 144 11 197 138 11 197 138 11
39391 -213 154 11 226 170 11 242 186 14 246 190 14
39392 -246 190 14 246 190 14 246 190 14 246 190 14
39393 -225 175 15 46 32 6 2 2 6 22 22 22
39394 -158 158 158 250 250 250 253 253 253 253 253 253
39395 -253 253 253 253 253 253 253 253 253 253 253 253
39396 -253 253 253 253 253 253 253 253 253 253 253 253
39397 -253 253 253 253 253 253 253 253 253 253 253 253
39398 -253 253 253 253 253 253 253 253 253 253 253 253
39399 -253 253 253 250 250 250 242 242 242 224 178 62
39400 -239 182 13 236 186 11 213 154 11 46 32 6
39401 - 2 2 6 2 2 6 2 2 6 2 2 6
39402 - 2 2 6 2 2 6 61 42 6 225 175 15
39403 -238 190 10 236 186 11 112 100 78 42 42 42
39404 - 14 14 14 0 0 0 0 0 0 0 0 0
39405 - 0 0 0 0 0 0 0 0 0 0 0 0
39406 - 0 0 0 0 0 0 0 0 0 0 0 0
39407 - 0 0 0 0 0 0 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 6 6 6
39409 - 22 22 22 54 54 54 154 122 46 213 154 11
39410 -226 170 11 230 174 11 226 170 11 226 170 11
39411 -236 178 12 242 186 14 246 190 14 246 190 14
39412 -246 190 14 246 190 14 246 190 14 246 190 14
39413 -241 196 14 184 144 12 10 10 10 2 2 6
39414 - 6 6 6 116 116 116 242 242 242 253 253 253
39415 -253 253 253 253 253 253 253 253 253 253 253 253
39416 -253 253 253 253 253 253 253 253 253 253 253 253
39417 -253 253 253 253 253 253 253 253 253 253 253 253
39418 -253 253 253 253 253 253 253 253 253 253 253 253
39419 -253 253 253 231 231 231 198 198 198 214 170 54
39420 -236 178 12 236 178 12 210 150 10 137 92 6
39421 - 18 14 6 2 2 6 2 2 6 2 2 6
39422 - 6 6 6 70 47 6 200 144 11 236 178 12
39423 -239 182 13 239 182 13 124 112 88 58 58 58
39424 - 22 22 22 6 6 6 0 0 0 0 0 0
39425 - 0 0 0 0 0 0 0 0 0 0 0 0
39426 - 0 0 0 0 0 0 0 0 0 0 0 0
39427 - 0 0 0 0 0 0 0 0 0 0 0 0
39428 - 0 0 0 0 0 0 0 0 0 10 10 10
39429 - 30 30 30 70 70 70 180 133 36 226 170 11
39430 -239 182 13 242 186 14 242 186 14 246 186 14
39431 -246 190 14 246 190 14 246 190 14 246 190 14
39432 -246 190 14 246 190 14 246 190 14 246 190 14
39433 -246 190 14 232 195 16 98 70 6 2 2 6
39434 - 2 2 6 2 2 6 66 66 66 221 221 221
39435 -253 253 253 253 253 253 253 253 253 253 253 253
39436 -253 253 253 253 253 253 253 253 253 253 253 253
39437 -253 253 253 253 253 253 253 253 253 253 253 253
39438 -253 253 253 253 253 253 253 253 253 253 253 253
39439 -253 253 253 206 206 206 198 198 198 214 166 58
39440 -230 174 11 230 174 11 216 158 10 192 133 9
39441 -163 110 8 116 81 8 102 78 10 116 81 8
39442 -167 114 7 197 138 11 226 170 11 239 182 13
39443 -242 186 14 242 186 14 162 146 94 78 78 78
39444 - 34 34 34 14 14 14 6 6 6 0 0 0
39445 - 0 0 0 0 0 0 0 0 0 0 0 0
39446 - 0 0 0 0 0 0 0 0 0 0 0 0
39447 - 0 0 0 0 0 0 0 0 0 0 0 0
39448 - 0 0 0 0 0 0 0 0 0 6 6 6
39449 - 30 30 30 78 78 78 190 142 34 226 170 11
39450 -239 182 13 246 190 14 246 190 14 246 190 14
39451 -246 190 14 246 190 14 246 190 14 246 190 14
39452 -246 190 14 246 190 14 246 190 14 246 190 14
39453 -246 190 14 241 196 14 203 166 17 22 18 6
39454 - 2 2 6 2 2 6 2 2 6 38 38 38
39455 -218 218 218 253 253 253 253 253 253 253 253 253
39456 -253 253 253 253 253 253 253 253 253 253 253 253
39457 -253 253 253 253 253 253 253 253 253 253 253 253
39458 -253 253 253 253 253 253 253 253 253 253 253 253
39459 -250 250 250 206 206 206 198 198 198 202 162 69
39460 -226 170 11 236 178 12 224 166 10 210 150 10
39461 -200 144 11 197 138 11 192 133 9 197 138 11
39462 -210 150 10 226 170 11 242 186 14 246 190 14
39463 -246 190 14 246 186 14 225 175 15 124 112 88
39464 - 62 62 62 30 30 30 14 14 14 6 6 6
39465 - 0 0 0 0 0 0 0 0 0 0 0 0
39466 - 0 0 0 0 0 0 0 0 0 0 0 0
39467 - 0 0 0 0 0 0 0 0 0 0 0 0
39468 - 0 0 0 0 0 0 0 0 0 10 10 10
39469 - 30 30 30 78 78 78 174 135 50 224 166 10
39470 -239 182 13 246 190 14 246 190 14 246 190 14
39471 -246 190 14 246 190 14 246 190 14 246 190 14
39472 -246 190 14 246 190 14 246 190 14 246 190 14
39473 -246 190 14 246 190 14 241 196 14 139 102 15
39474 - 2 2 6 2 2 6 2 2 6 2 2 6
39475 - 78 78 78 250 250 250 253 253 253 253 253 253
39476 -253 253 253 253 253 253 253 253 253 253 253 253
39477 -253 253 253 253 253 253 253 253 253 253 253 253
39478 -253 253 253 253 253 253 253 253 253 253 253 253
39479 -250 250 250 214 214 214 198 198 198 190 150 46
39480 -219 162 10 236 178 12 234 174 13 224 166 10
39481 -216 158 10 213 154 11 213 154 11 216 158 10
39482 -226 170 11 239 182 13 246 190 14 246 190 14
39483 -246 190 14 246 190 14 242 186 14 206 162 42
39484 -101 101 101 58 58 58 30 30 30 14 14 14
39485 - 6 6 6 0 0 0 0 0 0 0 0 0
39486 - 0 0 0 0 0 0 0 0 0 0 0 0
39487 - 0 0 0 0 0 0 0 0 0 0 0 0
39488 - 0 0 0 0 0 0 0 0 0 10 10 10
39489 - 30 30 30 74 74 74 174 135 50 216 158 10
39490 -236 178 12 246 190 14 246 190 14 246 190 14
39491 -246 190 14 246 190 14 246 190 14 246 190 14
39492 -246 190 14 246 190 14 246 190 14 246 190 14
39493 -246 190 14 246 190 14 241 196 14 226 184 13
39494 - 61 42 6 2 2 6 2 2 6 2 2 6
39495 - 22 22 22 238 238 238 253 253 253 253 253 253
39496 -253 253 253 253 253 253 253 253 253 253 253 253
39497 -253 253 253 253 253 253 253 253 253 253 253 253
39498 -253 253 253 253 253 253 253 253 253 253 253 253
39499 -253 253 253 226 226 226 187 187 187 180 133 36
39500 -216 158 10 236 178 12 239 182 13 236 178 12
39501 -230 174 11 226 170 11 226 170 11 230 174 11
39502 -236 178 12 242 186 14 246 190 14 246 190 14
39503 -246 190 14 246 190 14 246 186 14 239 182 13
39504 -206 162 42 106 106 106 66 66 66 34 34 34
39505 - 14 14 14 6 6 6 0 0 0 0 0 0
39506 - 0 0 0 0 0 0 0 0 0 0 0 0
39507 - 0 0 0 0 0 0 0 0 0 0 0 0
39508 - 0 0 0 0 0 0 0 0 0 6 6 6
39509 - 26 26 26 70 70 70 163 133 67 213 154 11
39510 -236 178 12 246 190 14 246 190 14 246 190 14
39511 -246 190 14 246 190 14 246 190 14 246 190 14
39512 -246 190 14 246 190 14 246 190 14 246 190 14
39513 -246 190 14 246 190 14 246 190 14 241 196 14
39514 -190 146 13 18 14 6 2 2 6 2 2 6
39515 - 46 46 46 246 246 246 253 253 253 253 253 253
39516 -253 253 253 253 253 253 253 253 253 253 253 253
39517 -253 253 253 253 253 253 253 253 253 253 253 253
39518 -253 253 253 253 253 253 253 253 253 253 253 253
39519 -253 253 253 221 221 221 86 86 86 156 107 11
39520 -216 158 10 236 178 12 242 186 14 246 186 14
39521 -242 186 14 239 182 13 239 182 13 242 186 14
39522 -242 186 14 246 186 14 246 190 14 246 190 14
39523 -246 190 14 246 190 14 246 190 14 246 190 14
39524 -242 186 14 225 175 15 142 122 72 66 66 66
39525 - 30 30 30 10 10 10 0 0 0 0 0 0
39526 - 0 0 0 0 0 0 0 0 0 0 0 0
39527 - 0 0 0 0 0 0 0 0 0 0 0 0
39528 - 0 0 0 0 0 0 0 0 0 6 6 6
39529 - 26 26 26 70 70 70 163 133 67 210 150 10
39530 -236 178 12 246 190 14 246 190 14 246 190 14
39531 -246 190 14 246 190 14 246 190 14 246 190 14
39532 -246 190 14 246 190 14 246 190 14 246 190 14
39533 -246 190 14 246 190 14 246 190 14 246 190 14
39534 -232 195 16 121 92 8 34 34 34 106 106 106
39535 -221 221 221 253 253 253 253 253 253 253 253 253
39536 -253 253 253 253 253 253 253 253 253 253 253 253
39537 -253 253 253 253 253 253 253 253 253 253 253 253
39538 -253 253 253 253 253 253 253 253 253 253 253 253
39539 -242 242 242 82 82 82 18 14 6 163 110 8
39540 -216 158 10 236 178 12 242 186 14 246 190 14
39541 -246 190 14 246 190 14 246 190 14 246 190 14
39542 -246 190 14 246 190 14 246 190 14 246 190 14
39543 -246 190 14 246 190 14 246 190 14 246 190 14
39544 -246 190 14 246 190 14 242 186 14 163 133 67
39545 - 46 46 46 18 18 18 6 6 6 0 0 0
39546 - 0 0 0 0 0 0 0 0 0 0 0 0
39547 - 0 0 0 0 0 0 0 0 0 0 0 0
39548 - 0 0 0 0 0 0 0 0 0 10 10 10
39549 - 30 30 30 78 78 78 163 133 67 210 150 10
39550 -236 178 12 246 186 14 246 190 14 246 190 14
39551 -246 190 14 246 190 14 246 190 14 246 190 14
39552 -246 190 14 246 190 14 246 190 14 246 190 14
39553 -246 190 14 246 190 14 246 190 14 246 190 14
39554 -241 196 14 215 174 15 190 178 144 253 253 253
39555 -253 253 253 253 253 253 253 253 253 253 253 253
39556 -253 253 253 253 253 253 253 253 253 253 253 253
39557 -253 253 253 253 253 253 253 253 253 253 253 253
39558 -253 253 253 253 253 253 253 253 253 218 218 218
39559 - 58 58 58 2 2 6 22 18 6 167 114 7
39560 -216 158 10 236 178 12 246 186 14 246 190 14
39561 -246 190 14 246 190 14 246 190 14 246 190 14
39562 -246 190 14 246 190 14 246 190 14 246 190 14
39563 -246 190 14 246 190 14 246 190 14 246 190 14
39564 -246 190 14 246 186 14 242 186 14 190 150 46
39565 - 54 54 54 22 22 22 6 6 6 0 0 0
39566 - 0 0 0 0 0 0 0 0 0 0 0 0
39567 - 0 0 0 0 0 0 0 0 0 0 0 0
39568 - 0 0 0 0 0 0 0 0 0 14 14 14
39569 - 38 38 38 86 86 86 180 133 36 213 154 11
39570 -236 178 12 246 186 14 246 190 14 246 190 14
39571 -246 190 14 246 190 14 246 190 14 246 190 14
39572 -246 190 14 246 190 14 246 190 14 246 190 14
39573 -246 190 14 246 190 14 246 190 14 246 190 14
39574 -246 190 14 232 195 16 190 146 13 214 214 214
39575 -253 253 253 253 253 253 253 253 253 253 253 253
39576 -253 253 253 253 253 253 253 253 253 253 253 253
39577 -253 253 253 253 253 253 253 253 253 253 253 253
39578 -253 253 253 250 250 250 170 170 170 26 26 26
39579 - 2 2 6 2 2 6 37 26 9 163 110 8
39580 -219 162 10 239 182 13 246 186 14 246 190 14
39581 -246 190 14 246 190 14 246 190 14 246 190 14
39582 -246 190 14 246 190 14 246 190 14 246 190 14
39583 -246 190 14 246 190 14 246 190 14 246 190 14
39584 -246 186 14 236 178 12 224 166 10 142 122 72
39585 - 46 46 46 18 18 18 6 6 6 0 0 0
39586 - 0 0 0 0 0 0 0 0 0 0 0 0
39587 - 0 0 0 0 0 0 0 0 0 0 0 0
39588 - 0 0 0 0 0 0 6 6 6 18 18 18
39589 - 50 50 50 109 106 95 192 133 9 224 166 10
39590 -242 186 14 246 190 14 246 190 14 246 190 14
39591 -246 190 14 246 190 14 246 190 14 246 190 14
39592 -246 190 14 246 190 14 246 190 14 246 190 14
39593 -246 190 14 246 190 14 246 190 14 246 190 14
39594 -242 186 14 226 184 13 210 162 10 142 110 46
39595 -226 226 226 253 253 253 253 253 253 253 253 253
39596 -253 253 253 253 253 253 253 253 253 253 253 253
39597 -253 253 253 253 253 253 253 253 253 253 253 253
39598 -198 198 198 66 66 66 2 2 6 2 2 6
39599 - 2 2 6 2 2 6 50 34 6 156 107 11
39600 -219 162 10 239 182 13 246 186 14 246 190 14
39601 -246 190 14 246 190 14 246 190 14 246 190 14
39602 -246 190 14 246 190 14 246 190 14 246 190 14
39603 -246 190 14 246 190 14 246 190 14 242 186 14
39604 -234 174 13 213 154 11 154 122 46 66 66 66
39605 - 30 30 30 10 10 10 0 0 0 0 0 0
39606 - 0 0 0 0 0 0 0 0 0 0 0 0
39607 - 0 0 0 0 0 0 0 0 0 0 0 0
39608 - 0 0 0 0 0 0 6 6 6 22 22 22
39609 - 58 58 58 154 121 60 206 145 10 234 174 13
39610 -242 186 14 246 186 14 246 190 14 246 190 14
39611 -246 190 14 246 190 14 246 190 14 246 190 14
39612 -246 190 14 246 190 14 246 190 14 246 190 14
39613 -246 190 14 246 190 14 246 190 14 246 190 14
39614 -246 186 14 236 178 12 210 162 10 163 110 8
39615 - 61 42 6 138 138 138 218 218 218 250 250 250
39616 -253 253 253 253 253 253 253 253 253 250 250 250
39617 -242 242 242 210 210 210 144 144 144 66 66 66
39618 - 6 6 6 2 2 6 2 2 6 2 2 6
39619 - 2 2 6 2 2 6 61 42 6 163 110 8
39620 -216 158 10 236 178 12 246 190 14 246 190 14
39621 -246 190 14 246 190 14 246 190 14 246 190 14
39622 -246 190 14 246 190 14 246 190 14 246 190 14
39623 -246 190 14 239 182 13 230 174 11 216 158 10
39624 -190 142 34 124 112 88 70 70 70 38 38 38
39625 - 18 18 18 6 6 6 0 0 0 0 0 0
39626 - 0 0 0 0 0 0 0 0 0 0 0 0
39627 - 0 0 0 0 0 0 0 0 0 0 0 0
39628 - 0 0 0 0 0 0 6 6 6 22 22 22
39629 - 62 62 62 168 124 44 206 145 10 224 166 10
39630 -236 178 12 239 182 13 242 186 14 242 186 14
39631 -246 186 14 246 190 14 246 190 14 246 190 14
39632 -246 190 14 246 190 14 246 190 14 246 190 14
39633 -246 190 14 246 190 14 246 190 14 246 190 14
39634 -246 190 14 236 178 12 216 158 10 175 118 6
39635 - 80 54 7 2 2 6 6 6 6 30 30 30
39636 - 54 54 54 62 62 62 50 50 50 38 38 38
39637 - 14 14 14 2 2 6 2 2 6 2 2 6
39638 - 2 2 6 2 2 6 2 2 6 2 2 6
39639 - 2 2 6 6 6 6 80 54 7 167 114 7
39640 -213 154 11 236 178 12 246 190 14 246 190 14
39641 -246 190 14 246 190 14 246 190 14 246 190 14
39642 -246 190 14 242 186 14 239 182 13 239 182 13
39643 -230 174 11 210 150 10 174 135 50 124 112 88
39644 - 82 82 82 54 54 54 34 34 34 18 18 18
39645 - 6 6 6 0 0 0 0 0 0 0 0 0
39646 - 0 0 0 0 0 0 0 0 0 0 0 0
39647 - 0 0 0 0 0 0 0 0 0 0 0 0
39648 - 0 0 0 0 0 0 6 6 6 18 18 18
39649 - 50 50 50 158 118 36 192 133 9 200 144 11
39650 -216 158 10 219 162 10 224 166 10 226 170 11
39651 -230 174 11 236 178 12 239 182 13 239 182 13
39652 -242 186 14 246 186 14 246 190 14 246 190 14
39653 -246 190 14 246 190 14 246 190 14 246 190 14
39654 -246 186 14 230 174 11 210 150 10 163 110 8
39655 -104 69 6 10 10 10 2 2 6 2 2 6
39656 - 2 2 6 2 2 6 2 2 6 2 2 6
39657 - 2 2 6 2 2 6 2 2 6 2 2 6
39658 - 2 2 6 2 2 6 2 2 6 2 2 6
39659 - 2 2 6 6 6 6 91 60 6 167 114 7
39660 -206 145 10 230 174 11 242 186 14 246 190 14
39661 -246 190 14 246 190 14 246 186 14 242 186 14
39662 -239 182 13 230 174 11 224 166 10 213 154 11
39663 -180 133 36 124 112 88 86 86 86 58 58 58
39664 - 38 38 38 22 22 22 10 10 10 6 6 6
39665 - 0 0 0 0 0 0 0 0 0 0 0 0
39666 - 0 0 0 0 0 0 0 0 0 0 0 0
39667 - 0 0 0 0 0 0 0 0 0 0 0 0
39668 - 0 0 0 0 0 0 0 0 0 14 14 14
39669 - 34 34 34 70 70 70 138 110 50 158 118 36
39670 -167 114 7 180 123 7 192 133 9 197 138 11
39671 -200 144 11 206 145 10 213 154 11 219 162 10
39672 -224 166 10 230 174 11 239 182 13 242 186 14
39673 -246 186 14 246 186 14 246 186 14 246 186 14
39674 -239 182 13 216 158 10 185 133 11 152 99 6
39675 -104 69 6 18 14 6 2 2 6 2 2 6
39676 - 2 2 6 2 2 6 2 2 6 2 2 6
39677 - 2 2 6 2 2 6 2 2 6 2 2 6
39678 - 2 2 6 2 2 6 2 2 6 2 2 6
39679 - 2 2 6 6 6 6 80 54 7 152 99 6
39680 -192 133 9 219 162 10 236 178 12 239 182 13
39681 -246 186 14 242 186 14 239 182 13 236 178 12
39682 -224 166 10 206 145 10 192 133 9 154 121 60
39683 - 94 94 94 62 62 62 42 42 42 22 22 22
39684 - 14 14 14 6 6 6 0 0 0 0 0 0
39685 - 0 0 0 0 0 0 0 0 0 0 0 0
39686 - 0 0 0 0 0 0 0 0 0 0 0 0
39687 - 0 0 0 0 0 0 0 0 0 0 0 0
39688 - 0 0 0 0 0 0 0 0 0 6 6 6
39689 - 18 18 18 34 34 34 58 58 58 78 78 78
39690 -101 98 89 124 112 88 142 110 46 156 107 11
39691 -163 110 8 167 114 7 175 118 6 180 123 7
39692 -185 133 11 197 138 11 210 150 10 219 162 10
39693 -226 170 11 236 178 12 236 178 12 234 174 13
39694 -219 162 10 197 138 11 163 110 8 130 83 6
39695 - 91 60 6 10 10 10 2 2 6 2 2 6
39696 - 18 18 18 38 38 38 38 38 38 38 38 38
39697 - 38 38 38 38 38 38 38 38 38 38 38 38
39698 - 38 38 38 38 38 38 26 26 26 2 2 6
39699 - 2 2 6 6 6 6 70 47 6 137 92 6
39700 -175 118 6 200 144 11 219 162 10 230 174 11
39701 -234 174 13 230 174 11 219 162 10 210 150 10
39702 -192 133 9 163 110 8 124 112 88 82 82 82
39703 - 50 50 50 30 30 30 14 14 14 6 6 6
39704 - 0 0 0 0 0 0 0 0 0 0 0 0
39705 - 0 0 0 0 0 0 0 0 0 0 0 0
39706 - 0 0 0 0 0 0 0 0 0 0 0 0
39707 - 0 0 0 0 0 0 0 0 0 0 0 0
39708 - 0 0 0 0 0 0 0 0 0 0 0 0
39709 - 6 6 6 14 14 14 22 22 22 34 34 34
39710 - 42 42 42 58 58 58 74 74 74 86 86 86
39711 -101 98 89 122 102 70 130 98 46 121 87 25
39712 -137 92 6 152 99 6 163 110 8 180 123 7
39713 -185 133 11 197 138 11 206 145 10 200 144 11
39714 -180 123 7 156 107 11 130 83 6 104 69 6
39715 - 50 34 6 54 54 54 110 110 110 101 98 89
39716 - 86 86 86 82 82 82 78 78 78 78 78 78
39717 - 78 78 78 78 78 78 78 78 78 78 78 78
39718 - 78 78 78 82 82 82 86 86 86 94 94 94
39719 -106 106 106 101 101 101 86 66 34 124 80 6
39720 -156 107 11 180 123 7 192 133 9 200 144 11
39721 -206 145 10 200 144 11 192 133 9 175 118 6
39722 -139 102 15 109 106 95 70 70 70 42 42 42
39723 - 22 22 22 10 10 10 0 0 0 0 0 0
39724 - 0 0 0 0 0 0 0 0 0 0 0 0
39725 - 0 0 0 0 0 0 0 0 0 0 0 0
39726 - 0 0 0 0 0 0 0 0 0 0 0 0
39727 - 0 0 0 0 0 0 0 0 0 0 0 0
39728 - 0 0 0 0 0 0 0 0 0 0 0 0
39729 - 0 0 0 0 0 0 6 6 6 10 10 10
39730 - 14 14 14 22 22 22 30 30 30 38 38 38
39731 - 50 50 50 62 62 62 74 74 74 90 90 90
39732 -101 98 89 112 100 78 121 87 25 124 80 6
39733 -137 92 6 152 99 6 152 99 6 152 99 6
39734 -138 86 6 124 80 6 98 70 6 86 66 30
39735 -101 98 89 82 82 82 58 58 58 46 46 46
39736 - 38 38 38 34 34 34 34 34 34 34 34 34
39737 - 34 34 34 34 34 34 34 34 34 34 34 34
39738 - 34 34 34 34 34 34 38 38 38 42 42 42
39739 - 54 54 54 82 82 82 94 86 76 91 60 6
39740 -134 86 6 156 107 11 167 114 7 175 118 6
39741 -175 118 6 167 114 7 152 99 6 121 87 25
39742 -101 98 89 62 62 62 34 34 34 18 18 18
39743 - 6 6 6 0 0 0 0 0 0 0 0 0
39744 - 0 0 0 0 0 0 0 0 0 0 0 0
39745 - 0 0 0 0 0 0 0 0 0 0 0 0
39746 - 0 0 0 0 0 0 0 0 0 0 0 0
39747 - 0 0 0 0 0 0 0 0 0 0 0 0
39748 - 0 0 0 0 0 0 0 0 0 0 0 0
39749 - 0 0 0 0 0 0 0 0 0 0 0 0
39750 - 0 0 0 6 6 6 6 6 6 10 10 10
39751 - 18 18 18 22 22 22 30 30 30 42 42 42
39752 - 50 50 50 66 66 66 86 86 86 101 98 89
39753 -106 86 58 98 70 6 104 69 6 104 69 6
39754 -104 69 6 91 60 6 82 62 34 90 90 90
39755 - 62 62 62 38 38 38 22 22 22 14 14 14
39756 - 10 10 10 10 10 10 10 10 10 10 10 10
39757 - 10 10 10 10 10 10 6 6 6 10 10 10
39758 - 10 10 10 10 10 10 10 10 10 14 14 14
39759 - 22 22 22 42 42 42 70 70 70 89 81 66
39760 - 80 54 7 104 69 6 124 80 6 137 92 6
39761 -134 86 6 116 81 8 100 82 52 86 86 86
39762 - 58 58 58 30 30 30 14 14 14 6 6 6
39763 - 0 0 0 0 0 0 0 0 0 0 0 0
39764 - 0 0 0 0 0 0 0 0 0 0 0 0
39765 - 0 0 0 0 0 0 0 0 0 0 0 0
39766 - 0 0 0 0 0 0 0 0 0 0 0 0
39767 - 0 0 0 0 0 0 0 0 0 0 0 0
39768 - 0 0 0 0 0 0 0 0 0 0 0 0
39769 - 0 0 0 0 0 0 0 0 0 0 0 0
39770 - 0 0 0 0 0 0 0 0 0 0 0 0
39771 - 0 0 0 6 6 6 10 10 10 14 14 14
39772 - 18 18 18 26 26 26 38 38 38 54 54 54
39773 - 70 70 70 86 86 86 94 86 76 89 81 66
39774 - 89 81 66 86 86 86 74 74 74 50 50 50
39775 - 30 30 30 14 14 14 6 6 6 0 0 0
39776 - 0 0 0 0 0 0 0 0 0 0 0 0
39777 - 0 0 0 0 0 0 0 0 0 0 0 0
39778 - 0 0 0 0 0 0 0 0 0 0 0 0
39779 - 6 6 6 18 18 18 34 34 34 58 58 58
39780 - 82 82 82 89 81 66 89 81 66 89 81 66
39781 - 94 86 66 94 86 76 74 74 74 50 50 50
39782 - 26 26 26 14 14 14 6 6 6 0 0 0
39783 - 0 0 0 0 0 0 0 0 0 0 0 0
39784 - 0 0 0 0 0 0 0 0 0 0 0 0
39785 - 0 0 0 0 0 0 0 0 0 0 0 0
39786 - 0 0 0 0 0 0 0 0 0 0 0 0
39787 - 0 0 0 0 0 0 0 0 0 0 0 0
39788 - 0 0 0 0 0 0 0 0 0 0 0 0
39789 - 0 0 0 0 0 0 0 0 0 0 0 0
39790 - 0 0 0 0 0 0 0 0 0 0 0 0
39791 - 0 0 0 0 0 0 0 0 0 0 0 0
39792 - 6 6 6 6 6 6 14 14 14 18 18 18
39793 - 30 30 30 38 38 38 46 46 46 54 54 54
39794 - 50 50 50 42 42 42 30 30 30 18 18 18
39795 - 10 10 10 0 0 0 0 0 0 0 0 0
39796 - 0 0 0 0 0 0 0 0 0 0 0 0
39797 - 0 0 0 0 0 0 0 0 0 0 0 0
39798 - 0 0 0 0 0 0 0 0 0 0 0 0
39799 - 0 0 0 6 6 6 14 14 14 26 26 26
39800 - 38 38 38 50 50 50 58 58 58 58 58 58
39801 - 54 54 54 42 42 42 30 30 30 18 18 18
39802 - 10 10 10 0 0 0 0 0 0 0 0 0
39803 - 0 0 0 0 0 0 0 0 0 0 0 0
39804 - 0 0 0 0 0 0 0 0 0 0 0 0
39805 - 0 0 0 0 0 0 0 0 0 0 0 0
39806 - 0 0 0 0 0 0 0 0 0 0 0 0
39807 - 0 0 0 0 0 0 0 0 0 0 0 0
39808 - 0 0 0 0 0 0 0 0 0 0 0 0
39809 - 0 0 0 0 0 0 0 0 0 0 0 0
39810 - 0 0 0 0 0 0 0 0 0 0 0 0
39811 - 0 0 0 0 0 0 0 0 0 0 0 0
39812 - 0 0 0 0 0 0 0 0 0 6 6 6
39813 - 6 6 6 10 10 10 14 14 14 18 18 18
39814 - 18 18 18 14 14 14 10 10 10 6 6 6
39815 - 0 0 0 0 0 0 0 0 0 0 0 0
39816 - 0 0 0 0 0 0 0 0 0 0 0 0
39817 - 0 0 0 0 0 0 0 0 0 0 0 0
39818 - 0 0 0 0 0 0 0 0 0 0 0 0
39819 - 0 0 0 0 0 0 0 0 0 6 6 6
39820 - 14 14 14 18 18 18 22 22 22 22 22 22
39821 - 18 18 18 14 14 14 10 10 10 6 6 6
39822 - 0 0 0 0 0 0 0 0 0 0 0 0
39823 - 0 0 0 0 0 0 0 0 0 0 0 0
39824 - 0 0 0 0 0 0 0 0 0 0 0 0
39825 - 0 0 0 0 0 0 0 0 0 0 0 0
39826 - 0 0 0 0 0 0 0 0 0 0 0 0
39827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39838 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39839 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39840 +4 4 4 4 4 4
39841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39852 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39853 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39854 +4 4 4 4 4 4
39855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39866 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39867 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39868 +4 4 4 4 4 4
39869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39882 +4 4 4 4 4 4
39883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39896 +4 4 4 4 4 4
39897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39910 +4 4 4 4 4 4
39911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39915 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39916 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39920 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39921 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39922 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39924 +4 4 4 4 4 4
39925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39929 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
39930 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
39931 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39934 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
39935 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
39936 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
39937 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39938 +4 4 4 4 4 4
39939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39943 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
39944 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
39945 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39948 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
39949 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
39950 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
39951 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
39952 +4 4 4 4 4 4
39953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39956 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
39957 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
39958 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
39959 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
39960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39961 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39962 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
39963 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
39964 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
39965 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
39966 +4 4 4 4 4 4
39967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39970 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
39971 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
39972 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
39973 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
39974 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39975 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
39976 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
39977 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
39978 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
39979 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
39980 +4 4 4 4 4 4
39981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39984 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
39985 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
39986 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
39987 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
39988 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39989 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
39990 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
39991 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
39992 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
39993 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
39994 +4 4 4 4 4 4
39995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39997 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
39998 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
39999 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40000 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40001 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40002 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40003 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40004 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40005 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40006 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40007 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40008 +4 4 4 4 4 4
40009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40011 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40012 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40013 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40014 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40015 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40016 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40017 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40018 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40019 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40020 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40021 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40022 +4 4 4 4 4 4
40023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40025 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40026 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40027 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40028 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40029 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40030 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40031 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40032 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40033 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40034 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40035 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40036 +4 4 4 4 4 4
40037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40039 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40040 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40041 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40042 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40043 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40044 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40045 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40046 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40047 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40048 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40049 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40050 +4 4 4 4 4 4
40051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40052 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40053 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40054 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40055 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40056 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40057 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40058 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40059 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40060 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40061 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40062 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40063 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40064 +4 4 4 4 4 4
40065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40066 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40067 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40068 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40069 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40070 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40071 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40072 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40073 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40074 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40075 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40076 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40077 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40078 +0 0 0 4 4 4
40079 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40080 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40081 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40082 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40083 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40084 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40085 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40086 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40087 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40088 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40089 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40090 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40091 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40092 +2 0 0 0 0 0
40093 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40094 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40095 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40096 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40097 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40098 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40099 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40100 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40101 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40102 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40103 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40104 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40105 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40106 +37 38 37 0 0 0
40107 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40108 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40109 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40110 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40111 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40112 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40113 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40114 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40115 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40116 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40117 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40118 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40119 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40120 +85 115 134 4 0 0
40121 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40122 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40123 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40124 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40125 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40126 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40127 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40128 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40129 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40130 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40131 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40132 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40133 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40134 +60 73 81 4 0 0
40135 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40136 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40137 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40138 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40139 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40140 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40141 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40142 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40143 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40144 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40145 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40146 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40147 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40148 +16 19 21 4 0 0
40149 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40150 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40151 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40152 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40153 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40154 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40155 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40156 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40157 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40158 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40159 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40160 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40161 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40162 +4 0 0 4 3 3
40163 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40164 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40165 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40167 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40168 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40169 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40170 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40171 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40172 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40173 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40174 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40175 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40176 +3 2 2 4 4 4
40177 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40178 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40179 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40180 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40181 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40182 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40183 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40184 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40185 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40186 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40187 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40188 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40189 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40190 +4 4 4 4 4 4
40191 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40192 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40193 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40194 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40195 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40196 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40197 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40198 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40199 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40200 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40201 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40202 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40203 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40204 +4 4 4 4 4 4
40205 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40206 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40207 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40208 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40209 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40210 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40211 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40212 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40213 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40214 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40215 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40216 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40217 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40218 +5 5 5 5 5 5
40219 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40220 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40221 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40222 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40223 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40224 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40225 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40226 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40227 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40228 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40229 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40230 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40231 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40232 +5 5 5 4 4 4
40233 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40234 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40235 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40236 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40237 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40238 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40239 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40240 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40241 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40242 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40243 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40244 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40246 +4 4 4 4 4 4
40247 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40248 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40249 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40250 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40251 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40252 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40253 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40254 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40255 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40256 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40257 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40258 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40260 +4 4 4 4 4 4
40261 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40262 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40263 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40264 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40265 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40266 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40267 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40268 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40269 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40270 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40271 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40274 +4 4 4 4 4 4
40275 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40276 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40277 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40278 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40279 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40280 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40281 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40282 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40283 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40284 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40285 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40288 +4 4 4 4 4 4
40289 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40290 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40291 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40292 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40293 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40294 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40295 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40296 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40297 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40298 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40299 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40302 +4 4 4 4 4 4
40303 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40304 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40305 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40306 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40307 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40308 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40309 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40310 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40311 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40312 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40313 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40316 +4 4 4 4 4 4
40317 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40318 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40319 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40320 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40321 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40322 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40323 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40324 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40325 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40326 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40327 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40330 +4 4 4 4 4 4
40331 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40332 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40333 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40334 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40335 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40336 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40337 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40338 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40339 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40340 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40341 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40344 +4 4 4 4 4 4
40345 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40346 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40347 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40348 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40349 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40350 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40351 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40352 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40353 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40354 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40355 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40356 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40357 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40358 +4 4 4 4 4 4
40359 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40360 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40361 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40362 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40363 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40364 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40365 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40366 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40367 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40368 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40369 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40370 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40371 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40372 +4 4 4 4 4 4
40373 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40374 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40375 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40376 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40377 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40378 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40379 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40380 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40381 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40382 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40383 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40384 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40385 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40386 +4 4 4 4 4 4
40387 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40388 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40389 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40390 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40391 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40392 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40393 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40394 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40395 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40396 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40397 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40398 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40399 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40400 +4 4 4 4 4 4
40401 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40402 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40403 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40404 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40405 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40406 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40407 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40408 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40409 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40410 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40411 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40412 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40413 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40414 +4 4 4 4 4 4
40415 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40416 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40417 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40418 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40419 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40420 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40421 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40422 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40423 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40424 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40425 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40426 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40428 +4 4 4 4 4 4
40429 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40430 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40431 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40432 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40433 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40434 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40435 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40436 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40437 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40438 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40439 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40440 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40442 +4 4 4 4 4 4
40443 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40444 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40445 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40446 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40447 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40448 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40449 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40450 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40451 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40452 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40453 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40456 +4 4 4 4 4 4
40457 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40458 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40459 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40460 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40461 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40462 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40463 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40464 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40465 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40466 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40467 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40470 +4 4 4 4 4 4
40471 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40472 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40473 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40474 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40475 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40476 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40477 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40478 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40479 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40480 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40481 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40484 +4 4 4 4 4 4
40485 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40486 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40487 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40488 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40489 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40490 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40491 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40492 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40493 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40494 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40495 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40498 +4 4 4 4 4 4
40499 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40500 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40501 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40502 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40503 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40504 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40505 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40506 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40507 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40508 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40509 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40512 +4 4 4 4 4 4
40513 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40514 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40515 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40516 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40517 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40518 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40519 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40520 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40521 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40522 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40523 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40526 +4 4 4 4 4 4
40527 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40528 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40529 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40530 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40531 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40532 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40533 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40534 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40535 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40536 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40537 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40538 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40540 +4 4 4 4 4 4
40541 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40542 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40543 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40544 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40545 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40546 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40547 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40548 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40549 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40550 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40551 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40554 +4 4 4 4 4 4
40555 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40556 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40557 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40558 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40559 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40560 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40561 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40562 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40563 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40564 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40565 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40568 +4 4 4 4 4 4
40569 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40570 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40571 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40572 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40573 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40574 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40575 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40576 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40577 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40578 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40579 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40582 +4 4 4 4 4 4
40583 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40584 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40585 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40586 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40587 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40588 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40589 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40590 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40591 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40592 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40593 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40596 +4 4 4 4 4 4
40597 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40598 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40599 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40600 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40601 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40602 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40603 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40604 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40605 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40606 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40607 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40610 +4 4 4 4 4 4
40611 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40612 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40613 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40614 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40615 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40616 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40617 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40618 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40619 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40620 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40621 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40624 +4 4 4 4 4 4
40625 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40626 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40627 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40628 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40629 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40630 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40631 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40632 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40633 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40634 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40635 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40638 +4 4 4 4 4 4
40639 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40640 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40641 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40642 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40643 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40644 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40645 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40646 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40647 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40648 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40652 +4 4 4 4 4 4
40653 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40654 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40655 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40656 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40657 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40658 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40659 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40660 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40661 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40662 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40666 +4 4 4 4 4 4
40667 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40668 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40669 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40670 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40671 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40672 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40673 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40674 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40675 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40676 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40680 +4 4 4 4 4 4
40681 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40682 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40683 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40684 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40685 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40686 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40687 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40688 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40689 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40690 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40692 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40694 +4 4 4 4 4 4
40695 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40696 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40697 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40698 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40699 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40700 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40701 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40702 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40703 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40708 +4 4 4 4 4 4
40709 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40710 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40711 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40712 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40713 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40714 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40715 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40716 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40717 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40722 +4 4 4 4 4 4
40723 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40724 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40725 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40726 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40727 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40728 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40729 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40730 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40731 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40736 +4 4 4 4 4 4
40737 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40738 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40739 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40740 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40741 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40742 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40743 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40744 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40750 +4 4 4 4 4 4
40751 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40752 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40753 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40754 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40755 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40756 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40757 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40758 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40761 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40764 +4 4 4 4 4 4
40765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40766 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40767 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40768 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40769 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40770 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40771 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40772 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40775 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40778 +4 4 4 4 4 4
40779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40780 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40781 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40782 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40783 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40784 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40785 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40786 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40792 +4 4 4 4 4 4
40793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40794 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40795 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40796 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40797 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40798 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40799 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40800 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40803 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40806 +4 4 4 4 4 4
40807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40809 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40810 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40811 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40812 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40813 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40814 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40817 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40820 +4 4 4 4 4 4
40821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40824 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40825 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40826 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40827 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40834 +4 4 4 4 4 4
40835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40838 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40839 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40840 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40841 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40848 +4 4 4 4 4 4
40849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40852 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40853 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40854 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40855 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40862 +4 4 4 4 4 4
40863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40866 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40867 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40868 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40869 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40876 +4 4 4 4 4 4
40877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40881 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40882 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40883 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40890 +4 4 4 4 4 4
40891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40895 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40896 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40897 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40904 +4 4 4 4 4 4
40905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40909 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40910 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40911 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40918 +4 4 4 4 4 4
40919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40923 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
40924 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
40925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40932 +4 4 4 4 4 4
40933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40937 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40938 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
40939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40946 +4 4 4 4 4 4
40947 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
40948 index a159b63..4ab532d 100644
40949 --- a/drivers/video/udlfb.c
40950 +++ b/drivers/video/udlfb.c
40951 @@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
40952 dlfb_urb_completion(urb);
40953
40954 error:
40955 - atomic_add(bytes_sent, &dev->bytes_sent);
40956 - atomic_add(bytes_identical, &dev->bytes_identical);
40957 - atomic_add(width*height*2, &dev->bytes_rendered);
40958 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40959 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40960 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
40961 end_cycles = get_cycles();
40962 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40963 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40964 >> 10)), /* Kcycles */
40965 &dev->cpu_kcycles_used);
40966
40967 @@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
40968 dlfb_urb_completion(urb);
40969
40970 error:
40971 - atomic_add(bytes_sent, &dev->bytes_sent);
40972 - atomic_add(bytes_identical, &dev->bytes_identical);
40973 - atomic_add(bytes_rendered, &dev->bytes_rendered);
40974 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40975 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40976 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
40977 end_cycles = get_cycles();
40978 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40979 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40980 >> 10)), /* Kcycles */
40981 &dev->cpu_kcycles_used);
40982 }
40983 @@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
40984 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40985 struct dlfb_data *dev = fb_info->par;
40986 return snprintf(buf, PAGE_SIZE, "%u\n",
40987 - atomic_read(&dev->bytes_rendered));
40988 + atomic_read_unchecked(&dev->bytes_rendered));
40989 }
40990
40991 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40992 @@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40993 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40994 struct dlfb_data *dev = fb_info->par;
40995 return snprintf(buf, PAGE_SIZE, "%u\n",
40996 - atomic_read(&dev->bytes_identical));
40997 + atomic_read_unchecked(&dev->bytes_identical));
40998 }
40999
41000 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41001 @@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41002 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41003 struct dlfb_data *dev = fb_info->par;
41004 return snprintf(buf, PAGE_SIZE, "%u\n",
41005 - atomic_read(&dev->bytes_sent));
41006 + atomic_read_unchecked(&dev->bytes_sent));
41007 }
41008
41009 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41010 @@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41011 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41012 struct dlfb_data *dev = fb_info->par;
41013 return snprintf(buf, PAGE_SIZE, "%u\n",
41014 - atomic_read(&dev->cpu_kcycles_used));
41015 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41016 }
41017
41018 static ssize_t edid_show(
41019 @@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41020 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41021 struct dlfb_data *dev = fb_info->par;
41022
41023 - atomic_set(&dev->bytes_rendered, 0);
41024 - atomic_set(&dev->bytes_identical, 0);
41025 - atomic_set(&dev->bytes_sent, 0);
41026 - atomic_set(&dev->cpu_kcycles_used, 0);
41027 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41028 + atomic_set_unchecked(&dev->bytes_identical, 0);
41029 + atomic_set_unchecked(&dev->bytes_sent, 0);
41030 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41031
41032 return count;
41033 }
41034 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41035 index b0e2a42..e2df3ad 100644
41036 --- a/drivers/video/uvesafb.c
41037 +++ b/drivers/video/uvesafb.c
41038 @@ -19,6 +19,7 @@
41039 #include <linux/io.h>
41040 #include <linux/mutex.h>
41041 #include <linux/slab.h>
41042 +#include <linux/moduleloader.h>
41043 #include <video/edid.h>
41044 #include <video/uvesafb.h>
41045 #ifdef CONFIG_X86
41046 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41047 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41048 par->pmi_setpal = par->ypan = 0;
41049 } else {
41050 +
41051 +#ifdef CONFIG_PAX_KERNEXEC
41052 +#ifdef CONFIG_MODULES
41053 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41054 +#endif
41055 + if (!par->pmi_code) {
41056 + par->pmi_setpal = par->ypan = 0;
41057 + return 0;
41058 + }
41059 +#endif
41060 +
41061 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41062 + task->t.regs.edi);
41063 +
41064 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41065 + pax_open_kernel();
41066 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41067 + pax_close_kernel();
41068 +
41069 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41070 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41071 +#else
41072 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41073 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41074 +#endif
41075 +
41076 printk(KERN_INFO "uvesafb: protected mode interface info at "
41077 "%04x:%04x\n",
41078 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41079 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41080 par->ypan = ypan;
41081
41082 if (par->pmi_setpal || par->ypan) {
41083 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41084 if (__supported_pte_mask & _PAGE_NX) {
41085 par->pmi_setpal = par->ypan = 0;
41086 printk(KERN_WARNING "uvesafb: NX protection is actively."
41087 "We have better not to use the PMI.\n");
41088 - } else {
41089 + } else
41090 +#endif
41091 uvesafb_vbe_getpmi(task, par);
41092 - }
41093 }
41094 #else
41095 /* The protected mode interface is not available on non-x86. */
41096 @@ -1836,6 +1860,11 @@ out:
41097 if (par->vbe_modes)
41098 kfree(par->vbe_modes);
41099
41100 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41101 + if (par->pmi_code)
41102 + module_free_exec(NULL, par->pmi_code);
41103 +#endif
41104 +
41105 framebuffer_release(info);
41106 return err;
41107 }
41108 @@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
41109 kfree(par->vbe_state_orig);
41110 if (par->vbe_state_saved)
41111 kfree(par->vbe_state_saved);
41112 +
41113 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41114 + if (par->pmi_code)
41115 + module_free_exec(NULL, par->pmi_code);
41116 +#endif
41117 +
41118 }
41119
41120 framebuffer_release(info);
41121 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41122 index 501b340..86bd4cf 100644
41123 --- a/drivers/video/vesafb.c
41124 +++ b/drivers/video/vesafb.c
41125 @@ -9,6 +9,7 @@
41126 */
41127
41128 #include <linux/module.h>
41129 +#include <linux/moduleloader.h>
41130 #include <linux/kernel.h>
41131 #include <linux/errno.h>
41132 #include <linux/string.h>
41133 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41134 static int vram_total __initdata; /* Set total amount of memory */
41135 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41136 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41137 -static void (*pmi_start)(void) __read_mostly;
41138 -static void (*pmi_pal) (void) __read_mostly;
41139 +static void (*pmi_start)(void) __read_only;
41140 +static void (*pmi_pal) (void) __read_only;
41141 static int depth __read_mostly;
41142 static int vga_compat __read_mostly;
41143 /* --------------------------------------------------------------------- */
41144 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41145 unsigned int size_vmode;
41146 unsigned int size_remap;
41147 unsigned int size_total;
41148 + void *pmi_code = NULL;
41149
41150 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41151 return -ENODEV;
41152 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41153 size_remap = size_total;
41154 vesafb_fix.smem_len = size_remap;
41155
41156 -#ifndef __i386__
41157 - screen_info.vesapm_seg = 0;
41158 -#endif
41159 -
41160 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41161 printk(KERN_WARNING
41162 "vesafb: cannot reserve video memory at 0x%lx\n",
41163 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41164 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41165 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41166
41167 +#ifdef __i386__
41168 +
41169 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41170 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41171 + if (!pmi_code)
41172 +#elif !defined(CONFIG_PAX_KERNEXEC)
41173 + if (0)
41174 +#endif
41175 +
41176 +#endif
41177 + screen_info.vesapm_seg = 0;
41178 +
41179 if (screen_info.vesapm_seg) {
41180 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41181 - screen_info.vesapm_seg,screen_info.vesapm_off);
41182 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41183 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41184 }
41185
41186 if (screen_info.vesapm_seg < 0xc000)
41187 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41188
41189 if (ypan || pmi_setpal) {
41190 unsigned short *pmi_base;
41191 +
41192 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41193 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41194 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41195 +
41196 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41197 + pax_open_kernel();
41198 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41199 +#else
41200 + pmi_code = pmi_base;
41201 +#endif
41202 +
41203 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41204 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41205 +
41206 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41207 + pmi_start = ktva_ktla(pmi_start);
41208 + pmi_pal = ktva_ktla(pmi_pal);
41209 + pax_close_kernel();
41210 +#endif
41211 +
41212 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41213 if (pmi_base[3]) {
41214 printk(KERN_INFO "vesafb: pmi: ports = ");
41215 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41216 info->node, info->fix.id);
41217 return 0;
41218 err:
41219 +
41220 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41221 + module_free_exec(NULL, pmi_code);
41222 +#endif
41223 +
41224 if (info->screen_base)
41225 iounmap(info->screen_base);
41226 framebuffer_release(info);
41227 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41228 index 88714ae..16c2e11 100644
41229 --- a/drivers/video/via/via_clock.h
41230 +++ b/drivers/video/via/via_clock.h
41231 @@ -56,7 +56,7 @@ struct via_clock {
41232
41233 void (*set_engine_pll_state)(u8 state);
41234 void (*set_engine_pll)(struct via_pll_config config);
41235 -};
41236 +} __no_const;
41237
41238
41239 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41240 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41241 index e56c934..fc22f4b 100644
41242 --- a/drivers/xen/xen-pciback/conf_space.h
41243 +++ b/drivers/xen/xen-pciback/conf_space.h
41244 @@ -44,15 +44,15 @@ struct config_field {
41245 struct {
41246 conf_dword_write write;
41247 conf_dword_read read;
41248 - } dw;
41249 + } __no_const dw;
41250 struct {
41251 conf_word_write write;
41252 conf_word_read read;
41253 - } w;
41254 + } __no_const w;
41255 struct {
41256 conf_byte_write write;
41257 conf_byte_read read;
41258 - } b;
41259 + } __no_const b;
41260 } u;
41261 struct list_head list;
41262 };
41263 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41264 index 014c8dd..6f3dfe6 100644
41265 --- a/fs/9p/vfs_inode.c
41266 +++ b/fs/9p/vfs_inode.c
41267 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41268 void
41269 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41270 {
41271 - char *s = nd_get_link(nd);
41272 + const char *s = nd_get_link(nd);
41273
41274 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41275 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41276 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41277 index e95d1b6..3454244 100644
41278 --- a/fs/Kconfig.binfmt
41279 +++ b/fs/Kconfig.binfmt
41280 @@ -89,7 +89,7 @@ config HAVE_AOUT
41281
41282 config BINFMT_AOUT
41283 tristate "Kernel support for a.out and ECOFF binaries"
41284 - depends on HAVE_AOUT
41285 + depends on HAVE_AOUT && BROKEN
41286 ---help---
41287 A.out (Assembler.OUTput) is a set of formats for libraries and
41288 executables used in the earliest versions of UNIX. Linux used
41289 diff --git a/fs/aio.c b/fs/aio.c
41290 index e7f2fad..15ad8a4 100644
41291 --- a/fs/aio.c
41292 +++ b/fs/aio.c
41293 @@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41294 size += sizeof(struct io_event) * nr_events;
41295 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41296
41297 - if (nr_pages < 0)
41298 + if (nr_pages <= 0)
41299 return -EINVAL;
41300
41301 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41302 @@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41303 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41304 {
41305 ssize_t ret;
41306 + struct iovec iovstack;
41307
41308 #ifdef CONFIG_COMPAT
41309 if (compat)
41310 ret = compat_rw_copy_check_uvector(type,
41311 (struct compat_iovec __user *)kiocb->ki_buf,
41312 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41313 + kiocb->ki_nbytes, 1, &iovstack,
41314 &kiocb->ki_iovec, 1);
41315 else
41316 #endif
41317 ret = rw_copy_check_uvector(type,
41318 (struct iovec __user *)kiocb->ki_buf,
41319 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41320 + kiocb->ki_nbytes, 1, &iovstack,
41321 &kiocb->ki_iovec, 1);
41322 if (ret < 0)
41323 goto out;
41324 @@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41325 if (ret < 0)
41326 goto out;
41327
41328 + if (kiocb->ki_iovec == &iovstack) {
41329 + kiocb->ki_inline_vec = iovstack;
41330 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41331 + }
41332 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41333 kiocb->ki_cur_seg = 0;
41334 /* ki_nbytes/left now reflect bytes instead of segs */
41335 diff --git a/fs/attr.c b/fs/attr.c
41336 index d94d1b6..f9bccd6 100644
41337 --- a/fs/attr.c
41338 +++ b/fs/attr.c
41339 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41340 unsigned long limit;
41341
41342 limit = rlimit(RLIMIT_FSIZE);
41343 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41344 if (limit != RLIM_INFINITY && offset > limit)
41345 goto out_sig;
41346 if (offset > inode->i_sb->s_maxbytes)
41347 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41348 index da8876d..9f3e6d8 100644
41349 --- a/fs/autofs4/waitq.c
41350 +++ b/fs/autofs4/waitq.c
41351 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41352 {
41353 unsigned long sigpipe, flags;
41354 mm_segment_t fs;
41355 - const char *data = (const char *)addr;
41356 + const char __user *data = (const char __force_user *)addr;
41357 ssize_t wr = 0;
41358
41359 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41360 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41361 index e18da23..affc30e 100644
41362 --- a/fs/befs/linuxvfs.c
41363 +++ b/fs/befs/linuxvfs.c
41364 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41365 {
41366 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41367 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41368 - char *link = nd_get_link(nd);
41369 + const char *link = nd_get_link(nd);
41370 if (!IS_ERR(link))
41371 kfree(link);
41372 }
41373 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41374 index d146e18..12d1bd1 100644
41375 --- a/fs/binfmt_aout.c
41376 +++ b/fs/binfmt_aout.c
41377 @@ -16,6 +16,7 @@
41378 #include <linux/string.h>
41379 #include <linux/fs.h>
41380 #include <linux/file.h>
41381 +#include <linux/security.h>
41382 #include <linux/stat.h>
41383 #include <linux/fcntl.h>
41384 #include <linux/ptrace.h>
41385 @@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41386 #endif
41387 # define START_STACK(u) ((void __user *)u.start_stack)
41388
41389 + memset(&dump, 0, sizeof(dump));
41390 +
41391 fs = get_fs();
41392 set_fs(KERNEL_DS);
41393 has_dumped = 1;
41394 @@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41395
41396 /* If the size of the dump file exceeds the rlimit, then see what would happen
41397 if we wrote the stack, but not the data area. */
41398 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41399 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41400 dump.u_dsize = 0;
41401
41402 /* Make sure we have enough room to write the stack and data areas. */
41403 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41404 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41405 dump.u_ssize = 0;
41406
41407 @@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41408 rlim = rlimit(RLIMIT_DATA);
41409 if (rlim >= RLIM_INFINITY)
41410 rlim = ~0;
41411 +
41412 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41413 if (ex.a_data + ex.a_bss > rlim)
41414 return -ENOMEM;
41415
41416 @@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41417
41418 install_exec_creds(bprm);
41419
41420 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41421 + current->mm->pax_flags = 0UL;
41422 +#endif
41423 +
41424 +#ifdef CONFIG_PAX_PAGEEXEC
41425 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41426 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41427 +
41428 +#ifdef CONFIG_PAX_EMUTRAMP
41429 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41430 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41431 +#endif
41432 +
41433 +#ifdef CONFIG_PAX_MPROTECT
41434 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41435 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41436 +#endif
41437 +
41438 + }
41439 +#endif
41440 +
41441 if (N_MAGIC(ex) == OMAGIC) {
41442 unsigned long text_addr, map_size;
41443 loff_t pos;
41444 @@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41445 }
41446
41447 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41448 - PROT_READ | PROT_WRITE | PROT_EXEC,
41449 + PROT_READ | PROT_WRITE,
41450 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41451 fd_offset + ex.a_text);
41452 if (error != N_DATADDR(ex)) {
41453 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41454 index 16f7354..185d8dc 100644
41455 --- a/fs/binfmt_elf.c
41456 +++ b/fs/binfmt_elf.c
41457 @@ -32,6 +32,7 @@
41458 #include <linux/elf.h>
41459 #include <linux/utsname.h>
41460 #include <linux/coredump.h>
41461 +#include <linux/xattr.h>
41462 #include <asm/uaccess.h>
41463 #include <asm/param.h>
41464 #include <asm/page.h>
41465 @@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41466 #define elf_core_dump NULL
41467 #endif
41468
41469 +#ifdef CONFIG_PAX_MPROTECT
41470 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41471 +#endif
41472 +
41473 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41474 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41475 #else
41476 @@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
41477 .load_binary = load_elf_binary,
41478 .load_shlib = load_elf_library,
41479 .core_dump = elf_core_dump,
41480 +
41481 +#ifdef CONFIG_PAX_MPROTECT
41482 + .handle_mprotect= elf_handle_mprotect,
41483 +#endif
41484 +
41485 .min_coredump = ELF_EXEC_PAGESIZE,
41486 };
41487
41488 @@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
41489
41490 static int set_brk(unsigned long start, unsigned long end)
41491 {
41492 + unsigned long e = end;
41493 +
41494 start = ELF_PAGEALIGN(start);
41495 end = ELF_PAGEALIGN(end);
41496 if (end > start) {
41497 @@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41498 if (BAD_ADDR(addr))
41499 return addr;
41500 }
41501 - current->mm->start_brk = current->mm->brk = end;
41502 + current->mm->start_brk = current->mm->brk = e;
41503 return 0;
41504 }
41505
41506 @@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41507 elf_addr_t __user *u_rand_bytes;
41508 const char *k_platform = ELF_PLATFORM;
41509 const char *k_base_platform = ELF_BASE_PLATFORM;
41510 - unsigned char k_rand_bytes[16];
41511 + u32 k_rand_bytes[4];
41512 int items;
41513 elf_addr_t *elf_info;
41514 int ei_index = 0;
41515 const struct cred *cred = current_cred();
41516 struct vm_area_struct *vma;
41517 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41518
41519 /*
41520 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41521 @@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41522 * Generate 16 random bytes for userspace PRNG seeding.
41523 */
41524 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41525 - u_rand_bytes = (elf_addr_t __user *)
41526 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41527 + srandom32(k_rand_bytes[0] ^ random32());
41528 + srandom32(k_rand_bytes[1] ^ random32());
41529 + srandom32(k_rand_bytes[2] ^ random32());
41530 + srandom32(k_rand_bytes[3] ^ random32());
41531 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41532 + u_rand_bytes = (elf_addr_t __user *) p;
41533 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41534 return -EFAULT;
41535
41536 @@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41537 return -EFAULT;
41538 current->mm->env_end = p;
41539
41540 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41541 +
41542 /* Put the elf_info on the stack in the right place. */
41543 sp = (elf_addr_t __user *)envp + 1;
41544 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41545 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41546 return -EFAULT;
41547 return 0;
41548 }
41549 @@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41550 {
41551 struct elf_phdr *elf_phdata;
41552 struct elf_phdr *eppnt;
41553 - unsigned long load_addr = 0;
41554 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41555 int load_addr_set = 0;
41556 unsigned long last_bss = 0, elf_bss = 0;
41557 - unsigned long error = ~0UL;
41558 + unsigned long error = -EINVAL;
41559 unsigned long total_size;
41560 int retval, i, size;
41561
41562 @@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41563 goto out_close;
41564 }
41565
41566 +#ifdef CONFIG_PAX_SEGMEXEC
41567 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41568 + pax_task_size = SEGMEXEC_TASK_SIZE;
41569 +#endif
41570 +
41571 eppnt = elf_phdata;
41572 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41573 if (eppnt->p_type == PT_LOAD) {
41574 @@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41575 k = load_addr + eppnt->p_vaddr;
41576 if (BAD_ADDR(k) ||
41577 eppnt->p_filesz > eppnt->p_memsz ||
41578 - eppnt->p_memsz > TASK_SIZE ||
41579 - TASK_SIZE - eppnt->p_memsz < k) {
41580 + eppnt->p_memsz > pax_task_size ||
41581 + pax_task_size - eppnt->p_memsz < k) {
41582 error = -ENOMEM;
41583 goto out_close;
41584 }
41585 @@ -525,6 +549,351 @@ out:
41586 return error;
41587 }
41588
41589 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41590 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41591 +{
41592 + unsigned long pax_flags = 0UL;
41593 +
41594 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41595 +
41596 +#ifdef CONFIG_PAX_PAGEEXEC
41597 + if (elf_phdata->p_flags & PF_PAGEEXEC)
41598 + pax_flags |= MF_PAX_PAGEEXEC;
41599 +#endif
41600 +
41601 +#ifdef CONFIG_PAX_SEGMEXEC
41602 + if (elf_phdata->p_flags & PF_SEGMEXEC)
41603 + pax_flags |= MF_PAX_SEGMEXEC;
41604 +#endif
41605 +
41606 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41607 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41608 + if ((__supported_pte_mask & _PAGE_NX))
41609 + pax_flags &= ~MF_PAX_SEGMEXEC;
41610 + else
41611 + pax_flags &= ~MF_PAX_PAGEEXEC;
41612 + }
41613 +#endif
41614 +
41615 +#ifdef CONFIG_PAX_EMUTRAMP
41616 + if (elf_phdata->p_flags & PF_EMUTRAMP)
41617 + pax_flags |= MF_PAX_EMUTRAMP;
41618 +#endif
41619 +
41620 +#ifdef CONFIG_PAX_MPROTECT
41621 + if (elf_phdata->p_flags & PF_MPROTECT)
41622 + pax_flags |= MF_PAX_MPROTECT;
41623 +#endif
41624 +
41625 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41626 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41627 + pax_flags |= MF_PAX_RANDMMAP;
41628 +#endif
41629 +
41630 +#endif
41631 +
41632 + return pax_flags;
41633 +}
41634 +
41635 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41636 +{
41637 + unsigned long pax_flags = 0UL;
41638 +
41639 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41640 +
41641 +#ifdef CONFIG_PAX_PAGEEXEC
41642 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41643 + pax_flags |= MF_PAX_PAGEEXEC;
41644 +#endif
41645 +
41646 +#ifdef CONFIG_PAX_SEGMEXEC
41647 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41648 + pax_flags |= MF_PAX_SEGMEXEC;
41649 +#endif
41650 +
41651 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41652 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41653 + if ((__supported_pte_mask & _PAGE_NX))
41654 + pax_flags &= ~MF_PAX_SEGMEXEC;
41655 + else
41656 + pax_flags &= ~MF_PAX_PAGEEXEC;
41657 + }
41658 +#endif
41659 +
41660 +#ifdef CONFIG_PAX_EMUTRAMP
41661 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41662 + pax_flags |= MF_PAX_EMUTRAMP;
41663 +#endif
41664 +
41665 +#ifdef CONFIG_PAX_MPROTECT
41666 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41667 + pax_flags |= MF_PAX_MPROTECT;
41668 +#endif
41669 +
41670 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41671 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41672 + pax_flags |= MF_PAX_RANDMMAP;
41673 +#endif
41674 +
41675 +#endif
41676 +
41677 + return pax_flags;
41678 +}
41679 +
41680 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41681 +{
41682 + unsigned long pax_flags = 0UL;
41683 +
41684 +#ifdef CONFIG_PAX_EI_PAX
41685 +
41686 +#ifdef CONFIG_PAX_PAGEEXEC
41687 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41688 + pax_flags |= MF_PAX_PAGEEXEC;
41689 +#endif
41690 +
41691 +#ifdef CONFIG_PAX_SEGMEXEC
41692 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41693 + pax_flags |= MF_PAX_SEGMEXEC;
41694 +#endif
41695 +
41696 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41697 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41698 + if ((__supported_pte_mask & _PAGE_NX))
41699 + pax_flags &= ~MF_PAX_SEGMEXEC;
41700 + else
41701 + pax_flags &= ~MF_PAX_PAGEEXEC;
41702 + }
41703 +#endif
41704 +
41705 +#ifdef CONFIG_PAX_EMUTRAMP
41706 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41707 + pax_flags |= MF_PAX_EMUTRAMP;
41708 +#endif
41709 +
41710 +#ifdef CONFIG_PAX_MPROTECT
41711 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41712 + pax_flags |= MF_PAX_MPROTECT;
41713 +#endif
41714 +
41715 +#ifdef CONFIG_PAX_ASLR
41716 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41717 + pax_flags |= MF_PAX_RANDMMAP;
41718 +#endif
41719 +
41720 +#else
41721 +
41722 +#ifdef CONFIG_PAX_PAGEEXEC
41723 + pax_flags |= MF_PAX_PAGEEXEC;
41724 +#endif
41725 +
41726 +#ifdef CONFIG_PAX_MPROTECT
41727 + pax_flags |= MF_PAX_MPROTECT;
41728 +#endif
41729 +
41730 +#ifdef CONFIG_PAX_RANDMMAP
41731 + pax_flags |= MF_PAX_RANDMMAP;
41732 +#endif
41733 +
41734 +#ifdef CONFIG_PAX_SEGMEXEC
41735 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41736 + pax_flags &= ~MF_PAX_PAGEEXEC;
41737 + pax_flags |= MF_PAX_SEGMEXEC;
41738 + }
41739 +#endif
41740 +
41741 +#endif
41742 +
41743 + return pax_flags;
41744 +}
41745 +
41746 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41747 +{
41748 +
41749 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41750 + unsigned long i;
41751 +
41752 + for (i = 0UL; i < elf_ex->e_phnum; i++)
41753 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41754 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41755 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41756 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41757 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41758 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41759 + return ~0UL;
41760 +
41761 +#ifdef CONFIG_PAX_SOFTMODE
41762 + if (pax_softmode)
41763 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41764 + else
41765 +#endif
41766 +
41767 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41768 + break;
41769 + }
41770 +#endif
41771 +
41772 + return ~0UL;
41773 +}
41774 +
41775 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41776 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41777 +{
41778 + unsigned long pax_flags = 0UL;
41779 +
41780 +#ifdef CONFIG_PAX_PAGEEXEC
41781 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41782 + pax_flags |= MF_PAX_PAGEEXEC;
41783 +#endif
41784 +
41785 +#ifdef CONFIG_PAX_SEGMEXEC
41786 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41787 + pax_flags |= MF_PAX_SEGMEXEC;
41788 +#endif
41789 +
41790 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41791 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41792 + if ((__supported_pte_mask & _PAGE_NX))
41793 + pax_flags &= ~MF_PAX_SEGMEXEC;
41794 + else
41795 + pax_flags &= ~MF_PAX_PAGEEXEC;
41796 + }
41797 +#endif
41798 +
41799 +#ifdef CONFIG_PAX_EMUTRAMP
41800 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41801 + pax_flags |= MF_PAX_EMUTRAMP;
41802 +#endif
41803 +
41804 +#ifdef CONFIG_PAX_MPROTECT
41805 + if (pax_flags_softmode & MF_PAX_MPROTECT)
41806 + pax_flags |= MF_PAX_MPROTECT;
41807 +#endif
41808 +
41809 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41810 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41811 + pax_flags |= MF_PAX_RANDMMAP;
41812 +#endif
41813 +
41814 + return pax_flags;
41815 +}
41816 +
41817 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41818 +{
41819 + unsigned long pax_flags = 0UL;
41820 +
41821 +#ifdef CONFIG_PAX_PAGEEXEC
41822 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41823 + pax_flags |= MF_PAX_PAGEEXEC;
41824 +#endif
41825 +
41826 +#ifdef CONFIG_PAX_SEGMEXEC
41827 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41828 + pax_flags |= MF_PAX_SEGMEXEC;
41829 +#endif
41830 +
41831 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41832 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41833 + if ((__supported_pte_mask & _PAGE_NX))
41834 + pax_flags &= ~MF_PAX_SEGMEXEC;
41835 + else
41836 + pax_flags &= ~MF_PAX_PAGEEXEC;
41837 + }
41838 +#endif
41839 +
41840 +#ifdef CONFIG_PAX_EMUTRAMP
41841 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41842 + pax_flags |= MF_PAX_EMUTRAMP;
41843 +#endif
41844 +
41845 +#ifdef CONFIG_PAX_MPROTECT
41846 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41847 + pax_flags |= MF_PAX_MPROTECT;
41848 +#endif
41849 +
41850 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41851 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41852 + pax_flags |= MF_PAX_RANDMMAP;
41853 +#endif
41854 +
41855 + return pax_flags;
41856 +}
41857 +#endif
41858 +
41859 +static unsigned long pax_parse_xattr_pax(struct file * const file)
41860 +{
41861 +
41862 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41863 + ssize_t xattr_size, i;
41864 + unsigned char xattr_value[5];
41865 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
41866 +
41867 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
41868 + if (xattr_size <= 0)
41869 + return ~0UL;
41870 +
41871 + for (i = 0; i < xattr_size; i++)
41872 + switch (xattr_value[i]) {
41873 + default:
41874 + return ~0UL;
41875 +
41876 +#define parse_flag(option1, option2, flag) \
41877 + case option1: \
41878 + pax_flags_hardmode |= MF_PAX_##flag; \
41879 + break; \
41880 + case option2: \
41881 + pax_flags_softmode |= MF_PAX_##flag; \
41882 + break;
41883 +
41884 + parse_flag('p', 'P', PAGEEXEC);
41885 + parse_flag('e', 'E', EMUTRAMP);
41886 + parse_flag('m', 'M', MPROTECT);
41887 + parse_flag('r', 'R', RANDMMAP);
41888 + parse_flag('s', 'S', SEGMEXEC);
41889 +
41890 +#undef parse_flag
41891 + }
41892 +
41893 + if (pax_flags_hardmode & pax_flags_softmode)
41894 + return ~0UL;
41895 +
41896 +#ifdef CONFIG_PAX_SOFTMODE
41897 + if (pax_softmode)
41898 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
41899 + else
41900 +#endif
41901 +
41902 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
41903 +#else
41904 + return ~0UL;
41905 +#endif
41906 +
41907 +}
41908 +
41909 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
41910 +{
41911 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
41912 +
41913 + pax_flags = pax_parse_ei_pax(elf_ex);
41914 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
41915 + xattr_pax_flags = pax_parse_xattr_pax(file);
41916 +
41917 + if (pt_pax_flags == ~0UL)
41918 + pt_pax_flags = xattr_pax_flags;
41919 + else if (xattr_pax_flags == ~0UL)
41920 + xattr_pax_flags = pt_pax_flags;
41921 + if (pt_pax_flags != xattr_pax_flags)
41922 + return -EINVAL;
41923 + if (pt_pax_flags != ~0UL)
41924 + pax_flags = pt_pax_flags;
41925 +
41926 + if (0 > pax_check_flags(&pax_flags))
41927 + return -EINVAL;
41928 +
41929 + current->mm->pax_flags = pax_flags;
41930 + return 0;
41931 +}
41932 +#endif
41933 +
41934 /*
41935 * These are the functions used to load ELF style executables and shared
41936 * libraries. There is no binary dependent code anywhere else.
41937 @@ -541,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
41938 {
41939 unsigned int random_variable = 0;
41940
41941 +#ifdef CONFIG_PAX_RANDUSTACK
41942 + if (randomize_va_space)
41943 + return stack_top - current->mm->delta_stack;
41944 +#endif
41945 +
41946 if ((current->flags & PF_RANDOMIZE) &&
41947 !(current->personality & ADDR_NO_RANDOMIZE)) {
41948 random_variable = get_random_int() & STACK_RND_MASK;
41949 @@ -559,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41950 unsigned long load_addr = 0, load_bias = 0;
41951 int load_addr_set = 0;
41952 char * elf_interpreter = NULL;
41953 - unsigned long error;
41954 + unsigned long error = 0;
41955 struct elf_phdr *elf_ppnt, *elf_phdata;
41956 unsigned long elf_bss, elf_brk;
41957 int retval, i;
41958 @@ -569,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41959 unsigned long start_code, end_code, start_data, end_data;
41960 unsigned long reloc_func_desc __maybe_unused = 0;
41961 int executable_stack = EXSTACK_DEFAULT;
41962 - unsigned long def_flags = 0;
41963 struct {
41964 struct elfhdr elf_ex;
41965 struct elfhdr interp_elf_ex;
41966 } *loc;
41967 + unsigned long pax_task_size = TASK_SIZE;
41968
41969 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
41970 if (!loc) {
41971 @@ -709,11 +1083,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41972 goto out_free_dentry;
41973
41974 /* OK, This is the point of no return */
41975 - current->mm->def_flags = def_flags;
41976 +
41977 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41978 + current->mm->pax_flags = 0UL;
41979 +#endif
41980 +
41981 +#ifdef CONFIG_PAX_DLRESOLVE
41982 + current->mm->call_dl_resolve = 0UL;
41983 +#endif
41984 +
41985 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
41986 + current->mm->call_syscall = 0UL;
41987 +#endif
41988 +
41989 +#ifdef CONFIG_PAX_ASLR
41990 + current->mm->delta_mmap = 0UL;
41991 + current->mm->delta_stack = 0UL;
41992 +#endif
41993 +
41994 + current->mm->def_flags = 0;
41995 +
41996 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41997 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
41998 + send_sig(SIGKILL, current, 0);
41999 + goto out_free_dentry;
42000 + }
42001 +#endif
42002 +
42003 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42004 + pax_set_initial_flags(bprm);
42005 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42006 + if (pax_set_initial_flags_func)
42007 + (pax_set_initial_flags_func)(bprm);
42008 +#endif
42009 +
42010 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42011 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42012 + current->mm->context.user_cs_limit = PAGE_SIZE;
42013 + current->mm->def_flags |= VM_PAGEEXEC;
42014 + }
42015 +#endif
42016 +
42017 +#ifdef CONFIG_PAX_SEGMEXEC
42018 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42019 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42020 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42021 + pax_task_size = SEGMEXEC_TASK_SIZE;
42022 + current->mm->def_flags |= VM_NOHUGEPAGE;
42023 + }
42024 +#endif
42025 +
42026 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42027 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42028 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42029 + put_cpu();
42030 + }
42031 +#endif
42032
42033 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42034 may depend on the personality. */
42035 SET_PERSONALITY(loc->elf_ex);
42036 +
42037 +#ifdef CONFIG_PAX_ASLR
42038 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42039 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42040 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42041 + }
42042 +#endif
42043 +
42044 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42045 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42046 + executable_stack = EXSTACK_DISABLE_X;
42047 + current->personality &= ~READ_IMPLIES_EXEC;
42048 + } else
42049 +#endif
42050 +
42051 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42052 current->personality |= READ_IMPLIES_EXEC;
42053
42054 @@ -804,6 +1248,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42055 #else
42056 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42057 #endif
42058 +
42059 +#ifdef CONFIG_PAX_RANDMMAP
42060 + /* PaX: randomize base address at the default exe base if requested */
42061 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42062 +#ifdef CONFIG_SPARC64
42063 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42064 +#else
42065 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42066 +#endif
42067 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42068 + elf_flags |= MAP_FIXED;
42069 + }
42070 +#endif
42071 +
42072 }
42073
42074 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42075 @@ -836,9 +1294,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42076 * allowed task size. Note that p_filesz must always be
42077 * <= p_memsz so it is only necessary to check p_memsz.
42078 */
42079 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42080 - elf_ppnt->p_memsz > TASK_SIZE ||
42081 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42082 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42083 + elf_ppnt->p_memsz > pax_task_size ||
42084 + pax_task_size - elf_ppnt->p_memsz < k) {
42085 /* set_brk can never work. Avoid overflows. */
42086 send_sig(SIGKILL, current, 0);
42087 retval = -EINVAL;
42088 @@ -877,11 +1335,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42089 goto out_free_dentry;
42090 }
42091 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42092 - send_sig(SIGSEGV, current, 0);
42093 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42094 - goto out_free_dentry;
42095 + /*
42096 + * This bss-zeroing can fail if the ELF
42097 + * file specifies odd protections. So
42098 + * we don't check the return value
42099 + */
42100 }
42101
42102 +#ifdef CONFIG_PAX_RANDMMAP
42103 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42104 + unsigned long start, size;
42105 +
42106 + start = ELF_PAGEALIGN(elf_brk);
42107 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42108 + down_write(&current->mm->mmap_sem);
42109 + retval = -ENOMEM;
42110 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42111 + unsigned long prot = PROT_NONE;
42112 +
42113 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42114 +// if (current->personality & ADDR_NO_RANDOMIZE)
42115 +// prot = PROT_READ;
42116 + start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42117 + retval = IS_ERR_VALUE(start) ? start : 0;
42118 + }
42119 + up_write(&current->mm->mmap_sem);
42120 + if (retval == 0)
42121 + retval = set_brk(start + size, start + size + PAGE_SIZE);
42122 + if (retval < 0) {
42123 + send_sig(SIGKILL, current, 0);
42124 + goto out_free_dentry;
42125 + }
42126 + }
42127 +#endif
42128 +
42129 if (elf_interpreter) {
42130 unsigned long uninitialized_var(interp_map_addr);
42131
42132 @@ -1109,7 +1596,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
42133 * Decide what to dump of a segment, part, all or none.
42134 */
42135 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42136 - unsigned long mm_flags)
42137 + unsigned long mm_flags, long signr)
42138 {
42139 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42140
42141 @@ -1146,7 +1633,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42142 if (vma->vm_file == NULL)
42143 return 0;
42144
42145 - if (FILTER(MAPPED_PRIVATE))
42146 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42147 goto whole;
42148
42149 /*
42150 @@ -1368,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42151 {
42152 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42153 int i = 0;
42154 - do
42155 + do {
42156 i += 2;
42157 - while (auxv[i - 2] != AT_NULL);
42158 + } while (auxv[i - 2] != AT_NULL);
42159 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42160 }
42161
42162 @@ -1892,14 +2379,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42163 }
42164
42165 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42166 - unsigned long mm_flags)
42167 + struct coredump_params *cprm)
42168 {
42169 struct vm_area_struct *vma;
42170 size_t size = 0;
42171
42172 for (vma = first_vma(current, gate_vma); vma != NULL;
42173 vma = next_vma(vma, gate_vma))
42174 - size += vma_dump_size(vma, mm_flags);
42175 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42176 return size;
42177 }
42178
42179 @@ -1993,7 +2480,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42180
42181 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42182
42183 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42184 + offset += elf_core_vma_data_size(gate_vma, cprm);
42185 offset += elf_core_extra_data_size();
42186 e_shoff = offset;
42187
42188 @@ -2007,10 +2494,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42189 offset = dataoff;
42190
42191 size += sizeof(*elf);
42192 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42193 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42194 goto end_coredump;
42195
42196 size += sizeof(*phdr4note);
42197 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42198 if (size > cprm->limit
42199 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42200 goto end_coredump;
42201 @@ -2024,7 +2513,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42202 phdr.p_offset = offset;
42203 phdr.p_vaddr = vma->vm_start;
42204 phdr.p_paddr = 0;
42205 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42206 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42207 phdr.p_memsz = vma->vm_end - vma->vm_start;
42208 offset += phdr.p_filesz;
42209 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42210 @@ -2035,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42211 phdr.p_align = ELF_EXEC_PAGESIZE;
42212
42213 size += sizeof(phdr);
42214 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42215 if (size > cprm->limit
42216 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42217 goto end_coredump;
42218 @@ -2059,7 +2549,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42219 unsigned long addr;
42220 unsigned long end;
42221
42222 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42223 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42224
42225 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42226 struct page *page;
42227 @@ -2068,6 +2558,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42228 page = get_dump_page(addr);
42229 if (page) {
42230 void *kaddr = kmap(page);
42231 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42232 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42233 !dump_write(cprm->file, kaddr,
42234 PAGE_SIZE);
42235 @@ -2085,6 +2576,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42236
42237 if (e_phnum == PN_XNUM) {
42238 size += sizeof(*shdr4extnum);
42239 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42240 if (size > cprm->limit
42241 || !dump_write(cprm->file, shdr4extnum,
42242 sizeof(*shdr4extnum)))
42243 @@ -2105,6 +2597,97 @@ out:
42244
42245 #endif /* CONFIG_ELF_CORE */
42246
42247 +#ifdef CONFIG_PAX_MPROTECT
42248 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42249 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42250 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42251 + *
42252 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42253 + * basis because we want to allow the common case and not the special ones.
42254 + */
42255 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42256 +{
42257 + struct elfhdr elf_h;
42258 + struct elf_phdr elf_p;
42259 + unsigned long i;
42260 + unsigned long oldflags;
42261 + bool is_textrel_rw, is_textrel_rx, is_relro;
42262 +
42263 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42264 + return;
42265 +
42266 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42267 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42268 +
42269 +#ifdef CONFIG_PAX_ELFRELOCS
42270 + /* possible TEXTREL */
42271 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42272 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42273 +#else
42274 + is_textrel_rw = false;
42275 + is_textrel_rx = false;
42276 +#endif
42277 +
42278 + /* possible RELRO */
42279 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42280 +
42281 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42282 + return;
42283 +
42284 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42285 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42286 +
42287 +#ifdef CONFIG_PAX_ETEXECRELOCS
42288 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42289 +#else
42290 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42291 +#endif
42292 +
42293 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42294 + !elf_check_arch(&elf_h) ||
42295 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42296 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42297 + return;
42298 +
42299 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42300 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42301 + return;
42302 + switch (elf_p.p_type) {
42303 + case PT_DYNAMIC:
42304 + if (!is_textrel_rw && !is_textrel_rx)
42305 + continue;
42306 + i = 0UL;
42307 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42308 + elf_dyn dyn;
42309 +
42310 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42311 + return;
42312 + if (dyn.d_tag == DT_NULL)
42313 + return;
42314 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42315 + gr_log_textrel(vma);
42316 + if (is_textrel_rw)
42317 + vma->vm_flags |= VM_MAYWRITE;
42318 + else
42319 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42320 + vma->vm_flags &= ~VM_MAYWRITE;
42321 + return;
42322 + }
42323 + i++;
42324 + }
42325 + return;
42326 +
42327 + case PT_GNU_RELRO:
42328 + if (!is_relro)
42329 + continue;
42330 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42331 + vma->vm_flags &= ~VM_MAYWRITE;
42332 + return;
42333 + }
42334 + }
42335 +}
42336 +#endif
42337 +
42338 static int __init init_elf_binfmt(void)
42339 {
42340 register_binfmt(&elf_format);
42341 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42342 index 6b2daf9..a70dccb 100644
42343 --- a/fs/binfmt_flat.c
42344 +++ b/fs/binfmt_flat.c
42345 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42346 realdatastart = (unsigned long) -ENOMEM;
42347 printk("Unable to allocate RAM for process data, errno %d\n",
42348 (int)-realdatastart);
42349 + down_write(&current->mm->mmap_sem);
42350 do_munmap(current->mm, textpos, text_len);
42351 + up_write(&current->mm->mmap_sem);
42352 ret = realdatastart;
42353 goto err;
42354 }
42355 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42356 }
42357 if (IS_ERR_VALUE(result)) {
42358 printk("Unable to read data+bss, errno %d\n", (int)-result);
42359 + down_write(&current->mm->mmap_sem);
42360 do_munmap(current->mm, textpos, text_len);
42361 do_munmap(current->mm, realdatastart, len);
42362 + up_write(&current->mm->mmap_sem);
42363 ret = result;
42364 goto err;
42365 }
42366 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42367 }
42368 if (IS_ERR_VALUE(result)) {
42369 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42370 + down_write(&current->mm->mmap_sem);
42371 do_munmap(current->mm, textpos, text_len + data_len + extra +
42372 MAX_SHARED_LIBS * sizeof(unsigned long));
42373 + up_write(&current->mm->mmap_sem);
42374 ret = result;
42375 goto err;
42376 }
42377 diff --git a/fs/bio.c b/fs/bio.c
42378 index 84da885..2149cd9 100644
42379 --- a/fs/bio.c
42380 +++ b/fs/bio.c
42381 @@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42382 /*
42383 * Overflow, abort
42384 */
42385 - if (end < start)
42386 + if (end < start || end - start > INT_MAX - nr_pages)
42387 return ERR_PTR(-EINVAL);
42388
42389 nr_pages += end - start;
42390 @@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42391 const int read = bio_data_dir(bio) == READ;
42392 struct bio_map_data *bmd = bio->bi_private;
42393 int i;
42394 - char *p = bmd->sgvecs[0].iov_base;
42395 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42396
42397 __bio_for_each_segment(bvec, bio, i, 0) {
42398 char *addr = page_address(bvec->bv_page);
42399 diff --git a/fs/block_dev.c b/fs/block_dev.c
42400 index ba11c30..623d736 100644
42401 --- a/fs/block_dev.c
42402 +++ b/fs/block_dev.c
42403 @@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42404 else if (bdev->bd_contains == bdev)
42405 return true; /* is a whole device which isn't held */
42406
42407 - else if (whole->bd_holder == bd_may_claim)
42408 + else if (whole->bd_holder == (void *)bd_may_claim)
42409 return true; /* is a partition of a device that is being partitioned */
42410 else if (whole->bd_holder != NULL)
42411 return false; /* is a partition of a held device */
42412 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42413 index c053e90..e5f1afc 100644
42414 --- a/fs/btrfs/check-integrity.c
42415 +++ b/fs/btrfs/check-integrity.c
42416 @@ -156,7 +156,7 @@ struct btrfsic_block {
42417 union {
42418 bio_end_io_t *bio;
42419 bh_end_io_t *bh;
42420 - } orig_bio_bh_end_io;
42421 + } __no_const orig_bio_bh_end_io;
42422 int submit_bio_bh_rw;
42423 u64 flush_gen; /* only valid if !never_written */
42424 };
42425 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42426 index 4106264..8157ede 100644
42427 --- a/fs/btrfs/ctree.c
42428 +++ b/fs/btrfs/ctree.c
42429 @@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42430 free_extent_buffer(buf);
42431 add_root_to_dirty_list(root);
42432 } else {
42433 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42434 - parent_start = parent->start;
42435 - else
42436 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42437 + if (parent)
42438 + parent_start = parent->start;
42439 + else
42440 + parent_start = 0;
42441 + } else
42442 parent_start = 0;
42443
42444 WARN_ON(trans->transid != btrfs_header_generation(parent));
42445 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42446 index 61b16c6..b492c09 100644
42447 --- a/fs/btrfs/inode.c
42448 +++ b/fs/btrfs/inode.c
42449 @@ -7071,7 +7071,7 @@ fail:
42450 return -ENOMEM;
42451 }
42452
42453 -static int btrfs_getattr(struct vfsmount *mnt,
42454 +int btrfs_getattr(struct vfsmount *mnt,
42455 struct dentry *dentry, struct kstat *stat)
42456 {
42457 struct inode *inode = dentry->d_inode;
42458 @@ -7085,6 +7085,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42459 return 0;
42460 }
42461
42462 +EXPORT_SYMBOL(btrfs_getattr);
42463 +
42464 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42465 +{
42466 + return BTRFS_I(inode)->root->anon_dev;
42467 +}
42468 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42469 +
42470 /*
42471 * If a file is moved, it will inherit the cow and compression flags of the new
42472 * directory.
42473 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42474 index 14f8e1f..ab8d81f 100644
42475 --- a/fs/btrfs/ioctl.c
42476 +++ b/fs/btrfs/ioctl.c
42477 @@ -2882,9 +2882,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42478 for (i = 0; i < num_types; i++) {
42479 struct btrfs_space_info *tmp;
42480
42481 + /* Don't copy in more than we allocated */
42482 if (!slot_count)
42483 break;
42484
42485 + slot_count--;
42486 +
42487 info = NULL;
42488 rcu_read_lock();
42489 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42490 @@ -2906,15 +2909,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42491 memcpy(dest, &space, sizeof(space));
42492 dest++;
42493 space_args.total_spaces++;
42494 - slot_count--;
42495 }
42496 - if (!slot_count)
42497 - break;
42498 }
42499 up_read(&info->groups_sem);
42500 }
42501
42502 - user_dest = (struct btrfs_ioctl_space_info *)
42503 + user_dest = (struct btrfs_ioctl_space_info __user *)
42504 (arg + sizeof(struct btrfs_ioctl_space_args));
42505
42506 if (copy_to_user(user_dest, dest_orig, alloc_size))
42507 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42508 index 646ee21..f020f87 100644
42509 --- a/fs/btrfs/relocation.c
42510 +++ b/fs/btrfs/relocation.c
42511 @@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42512 }
42513 spin_unlock(&rc->reloc_root_tree.lock);
42514
42515 - BUG_ON((struct btrfs_root *)node->data != root);
42516 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42517
42518 if (!del) {
42519 spin_lock(&rc->reloc_root_tree.lock);
42520 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42521 index 622f469..e8d2d55 100644
42522 --- a/fs/cachefiles/bind.c
42523 +++ b/fs/cachefiles/bind.c
42524 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42525 args);
42526
42527 /* start by checking things over */
42528 - ASSERT(cache->fstop_percent >= 0 &&
42529 - cache->fstop_percent < cache->fcull_percent &&
42530 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42531 cache->fcull_percent < cache->frun_percent &&
42532 cache->frun_percent < 100);
42533
42534 - ASSERT(cache->bstop_percent >= 0 &&
42535 - cache->bstop_percent < cache->bcull_percent &&
42536 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42537 cache->bcull_percent < cache->brun_percent &&
42538 cache->brun_percent < 100);
42539
42540 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42541 index 0a1467b..6a53245 100644
42542 --- a/fs/cachefiles/daemon.c
42543 +++ b/fs/cachefiles/daemon.c
42544 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42545 if (n > buflen)
42546 return -EMSGSIZE;
42547
42548 - if (copy_to_user(_buffer, buffer, n) != 0)
42549 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42550 return -EFAULT;
42551
42552 return n;
42553 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42554 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42555 return -EIO;
42556
42557 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42558 + if (datalen > PAGE_SIZE - 1)
42559 return -EOPNOTSUPP;
42560
42561 /* drag the command string into the kernel so we can parse it */
42562 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42563 if (args[0] != '%' || args[1] != '\0')
42564 return -EINVAL;
42565
42566 - if (fstop < 0 || fstop >= cache->fcull_percent)
42567 + if (fstop >= cache->fcull_percent)
42568 return cachefiles_daemon_range_error(cache, args);
42569
42570 cache->fstop_percent = fstop;
42571 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42572 if (args[0] != '%' || args[1] != '\0')
42573 return -EINVAL;
42574
42575 - if (bstop < 0 || bstop >= cache->bcull_percent)
42576 + if (bstop >= cache->bcull_percent)
42577 return cachefiles_daemon_range_error(cache, args);
42578
42579 cache->bstop_percent = bstop;
42580 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42581 index bd6bc1b..b627b53 100644
42582 --- a/fs/cachefiles/internal.h
42583 +++ b/fs/cachefiles/internal.h
42584 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42585 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42586 struct rb_root active_nodes; /* active nodes (can't be culled) */
42587 rwlock_t active_lock; /* lock for active_nodes */
42588 - atomic_t gravecounter; /* graveyard uniquifier */
42589 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42590 unsigned frun_percent; /* when to stop culling (% files) */
42591 unsigned fcull_percent; /* when to start culling (% files) */
42592 unsigned fstop_percent; /* when to stop allocating (% files) */
42593 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42594 * proc.c
42595 */
42596 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42597 -extern atomic_t cachefiles_lookup_histogram[HZ];
42598 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42599 -extern atomic_t cachefiles_create_histogram[HZ];
42600 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42601 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42602 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42603
42604 extern int __init cachefiles_proc_init(void);
42605 extern void cachefiles_proc_cleanup(void);
42606 static inline
42607 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42608 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42609 {
42610 unsigned long jif = jiffies - start_jif;
42611 if (jif >= HZ)
42612 jif = HZ - 1;
42613 - atomic_inc(&histogram[jif]);
42614 + atomic_inc_unchecked(&histogram[jif]);
42615 }
42616
42617 #else
42618 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42619 index 7f0771d..87d4f36 100644
42620 --- a/fs/cachefiles/namei.c
42621 +++ b/fs/cachefiles/namei.c
42622 @@ -318,7 +318,7 @@ try_again:
42623 /* first step is to make up a grave dentry in the graveyard */
42624 sprintf(nbuffer, "%08x%08x",
42625 (uint32_t) get_seconds(),
42626 - (uint32_t) atomic_inc_return(&cache->gravecounter));
42627 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42628
42629 /* do the multiway lock magic */
42630 trap = lock_rename(cache->graveyard, dir);
42631 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42632 index eccd339..4c1d995 100644
42633 --- a/fs/cachefiles/proc.c
42634 +++ b/fs/cachefiles/proc.c
42635 @@ -14,9 +14,9 @@
42636 #include <linux/seq_file.h>
42637 #include "internal.h"
42638
42639 -atomic_t cachefiles_lookup_histogram[HZ];
42640 -atomic_t cachefiles_mkdir_histogram[HZ];
42641 -atomic_t cachefiles_create_histogram[HZ];
42642 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42643 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42644 +atomic_unchecked_t cachefiles_create_histogram[HZ];
42645
42646 /*
42647 * display the latency histogram
42648 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42649 return 0;
42650 default:
42651 index = (unsigned long) v - 3;
42652 - x = atomic_read(&cachefiles_lookup_histogram[index]);
42653 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
42654 - z = atomic_read(&cachefiles_create_histogram[index]);
42655 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42656 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42657 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42658 if (x == 0 && y == 0 && z == 0)
42659 return 0;
42660
42661 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42662 index 0e3c092..818480e 100644
42663 --- a/fs/cachefiles/rdwr.c
42664 +++ b/fs/cachefiles/rdwr.c
42665 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42666 old_fs = get_fs();
42667 set_fs(KERNEL_DS);
42668 ret = file->f_op->write(
42669 - file, (const void __user *) data, len, &pos);
42670 + file, (const void __force_user *) data, len, &pos);
42671 set_fs(old_fs);
42672 kunmap(page);
42673 if (ret != len)
42674 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42675 index 3e8094b..cb3ff3d 100644
42676 --- a/fs/ceph/dir.c
42677 +++ b/fs/ceph/dir.c
42678 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42679 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42680 struct ceph_mds_client *mdsc = fsc->mdsc;
42681 unsigned frag = fpos_frag(filp->f_pos);
42682 - int off = fpos_off(filp->f_pos);
42683 + unsigned int off = fpos_off(filp->f_pos);
42684 int err;
42685 u32 ftype;
42686 struct ceph_mds_reply_info_parsed *rinfo;
42687 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
42688 if (nd &&
42689 (nd->flags & LOOKUP_OPEN) &&
42690 !(nd->intent.open.flags & O_CREAT)) {
42691 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
42692 + int mode = nd->intent.open.create_mode & ~current_umask();
42693 return ceph_lookup_open(dir, dentry, nd, mode, 1);
42694 }
42695
42696 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42697 index 2704646..c581c91 100644
42698 --- a/fs/cifs/cifs_debug.c
42699 +++ b/fs/cifs/cifs_debug.c
42700 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42701
42702 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42703 #ifdef CONFIG_CIFS_STATS2
42704 - atomic_set(&totBufAllocCount, 0);
42705 - atomic_set(&totSmBufAllocCount, 0);
42706 + atomic_set_unchecked(&totBufAllocCount, 0);
42707 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42708 #endif /* CONFIG_CIFS_STATS2 */
42709 spin_lock(&cifs_tcp_ses_lock);
42710 list_for_each(tmp1, &cifs_tcp_ses_list) {
42711 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42712 tcon = list_entry(tmp3,
42713 struct cifs_tcon,
42714 tcon_list);
42715 - atomic_set(&tcon->num_smbs_sent, 0);
42716 - atomic_set(&tcon->num_writes, 0);
42717 - atomic_set(&tcon->num_reads, 0);
42718 - atomic_set(&tcon->num_oplock_brks, 0);
42719 - atomic_set(&tcon->num_opens, 0);
42720 - atomic_set(&tcon->num_posixopens, 0);
42721 - atomic_set(&tcon->num_posixmkdirs, 0);
42722 - atomic_set(&tcon->num_closes, 0);
42723 - atomic_set(&tcon->num_deletes, 0);
42724 - atomic_set(&tcon->num_mkdirs, 0);
42725 - atomic_set(&tcon->num_rmdirs, 0);
42726 - atomic_set(&tcon->num_renames, 0);
42727 - atomic_set(&tcon->num_t2renames, 0);
42728 - atomic_set(&tcon->num_ffirst, 0);
42729 - atomic_set(&tcon->num_fnext, 0);
42730 - atomic_set(&tcon->num_fclose, 0);
42731 - atomic_set(&tcon->num_hardlinks, 0);
42732 - atomic_set(&tcon->num_symlinks, 0);
42733 - atomic_set(&tcon->num_locks, 0);
42734 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42735 + atomic_set_unchecked(&tcon->num_writes, 0);
42736 + atomic_set_unchecked(&tcon->num_reads, 0);
42737 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42738 + atomic_set_unchecked(&tcon->num_opens, 0);
42739 + atomic_set_unchecked(&tcon->num_posixopens, 0);
42740 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42741 + atomic_set_unchecked(&tcon->num_closes, 0);
42742 + atomic_set_unchecked(&tcon->num_deletes, 0);
42743 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
42744 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
42745 + atomic_set_unchecked(&tcon->num_renames, 0);
42746 + atomic_set_unchecked(&tcon->num_t2renames, 0);
42747 + atomic_set_unchecked(&tcon->num_ffirst, 0);
42748 + atomic_set_unchecked(&tcon->num_fnext, 0);
42749 + atomic_set_unchecked(&tcon->num_fclose, 0);
42750 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
42751 + atomic_set_unchecked(&tcon->num_symlinks, 0);
42752 + atomic_set_unchecked(&tcon->num_locks, 0);
42753 }
42754 }
42755 }
42756 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42757 smBufAllocCount.counter, cifs_min_small);
42758 #ifdef CONFIG_CIFS_STATS2
42759 seq_printf(m, "Total Large %d Small %d Allocations\n",
42760 - atomic_read(&totBufAllocCount),
42761 - atomic_read(&totSmBufAllocCount));
42762 + atomic_read_unchecked(&totBufAllocCount),
42763 + atomic_read_unchecked(&totSmBufAllocCount));
42764 #endif /* CONFIG_CIFS_STATS2 */
42765
42766 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42767 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42768 if (tcon->need_reconnect)
42769 seq_puts(m, "\tDISCONNECTED ");
42770 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42771 - atomic_read(&tcon->num_smbs_sent),
42772 - atomic_read(&tcon->num_oplock_brks));
42773 + atomic_read_unchecked(&tcon->num_smbs_sent),
42774 + atomic_read_unchecked(&tcon->num_oplock_brks));
42775 seq_printf(m, "\nReads: %d Bytes: %lld",
42776 - atomic_read(&tcon->num_reads),
42777 + atomic_read_unchecked(&tcon->num_reads),
42778 (long long)(tcon->bytes_read));
42779 seq_printf(m, "\nWrites: %d Bytes: %lld",
42780 - atomic_read(&tcon->num_writes),
42781 + atomic_read_unchecked(&tcon->num_writes),
42782 (long long)(tcon->bytes_written));
42783 seq_printf(m, "\nFlushes: %d",
42784 - atomic_read(&tcon->num_flushes));
42785 + atomic_read_unchecked(&tcon->num_flushes));
42786 seq_printf(m, "\nLocks: %d HardLinks: %d "
42787 "Symlinks: %d",
42788 - atomic_read(&tcon->num_locks),
42789 - atomic_read(&tcon->num_hardlinks),
42790 - atomic_read(&tcon->num_symlinks));
42791 + atomic_read_unchecked(&tcon->num_locks),
42792 + atomic_read_unchecked(&tcon->num_hardlinks),
42793 + atomic_read_unchecked(&tcon->num_symlinks));
42794 seq_printf(m, "\nOpens: %d Closes: %d "
42795 "Deletes: %d",
42796 - atomic_read(&tcon->num_opens),
42797 - atomic_read(&tcon->num_closes),
42798 - atomic_read(&tcon->num_deletes));
42799 + atomic_read_unchecked(&tcon->num_opens),
42800 + atomic_read_unchecked(&tcon->num_closes),
42801 + atomic_read_unchecked(&tcon->num_deletes));
42802 seq_printf(m, "\nPosix Opens: %d "
42803 "Posix Mkdirs: %d",
42804 - atomic_read(&tcon->num_posixopens),
42805 - atomic_read(&tcon->num_posixmkdirs));
42806 + atomic_read_unchecked(&tcon->num_posixopens),
42807 + atomic_read_unchecked(&tcon->num_posixmkdirs));
42808 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42809 - atomic_read(&tcon->num_mkdirs),
42810 - atomic_read(&tcon->num_rmdirs));
42811 + atomic_read_unchecked(&tcon->num_mkdirs),
42812 + atomic_read_unchecked(&tcon->num_rmdirs));
42813 seq_printf(m, "\nRenames: %d T2 Renames %d",
42814 - atomic_read(&tcon->num_renames),
42815 - atomic_read(&tcon->num_t2renames));
42816 + atomic_read_unchecked(&tcon->num_renames),
42817 + atomic_read_unchecked(&tcon->num_t2renames));
42818 seq_printf(m, "\nFindFirst: %d FNext %d "
42819 "FClose %d",
42820 - atomic_read(&tcon->num_ffirst),
42821 - atomic_read(&tcon->num_fnext),
42822 - atomic_read(&tcon->num_fclose));
42823 + atomic_read_unchecked(&tcon->num_ffirst),
42824 + atomic_read_unchecked(&tcon->num_fnext),
42825 + atomic_read_unchecked(&tcon->num_fclose));
42826 }
42827 }
42828 }
42829 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42830 index 541ef81..a78deb8 100644
42831 --- a/fs/cifs/cifsfs.c
42832 +++ b/fs/cifs/cifsfs.c
42833 @@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
42834 cifs_req_cachep = kmem_cache_create("cifs_request",
42835 CIFSMaxBufSize +
42836 MAX_CIFS_HDR_SIZE, 0,
42837 - SLAB_HWCACHE_ALIGN, NULL);
42838 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42839 if (cifs_req_cachep == NULL)
42840 return -ENOMEM;
42841
42842 @@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
42843 efficient to alloc 1 per page off the slab compared to 17K (5page)
42844 alloc of large cifs buffers even when page debugging is on */
42845 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42846 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42847 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42848 NULL);
42849 if (cifs_sm_req_cachep == NULL) {
42850 mempool_destroy(cifs_req_poolp);
42851 @@ -1097,8 +1097,8 @@ init_cifs(void)
42852 atomic_set(&bufAllocCount, 0);
42853 atomic_set(&smBufAllocCount, 0);
42854 #ifdef CONFIG_CIFS_STATS2
42855 - atomic_set(&totBufAllocCount, 0);
42856 - atomic_set(&totSmBufAllocCount, 0);
42857 + atomic_set_unchecked(&totBufAllocCount, 0);
42858 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42859 #endif /* CONFIG_CIFS_STATS2 */
42860
42861 atomic_set(&midCount, 0);
42862 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42863 index 73fea28..b996b84 100644
42864 --- a/fs/cifs/cifsglob.h
42865 +++ b/fs/cifs/cifsglob.h
42866 @@ -439,28 +439,28 @@ struct cifs_tcon {
42867 __u16 Flags; /* optional support bits */
42868 enum statusEnum tidStatus;
42869 #ifdef CONFIG_CIFS_STATS
42870 - atomic_t num_smbs_sent;
42871 - atomic_t num_writes;
42872 - atomic_t num_reads;
42873 - atomic_t num_flushes;
42874 - atomic_t num_oplock_brks;
42875 - atomic_t num_opens;
42876 - atomic_t num_closes;
42877 - atomic_t num_deletes;
42878 - atomic_t num_mkdirs;
42879 - atomic_t num_posixopens;
42880 - atomic_t num_posixmkdirs;
42881 - atomic_t num_rmdirs;
42882 - atomic_t num_renames;
42883 - atomic_t num_t2renames;
42884 - atomic_t num_ffirst;
42885 - atomic_t num_fnext;
42886 - atomic_t num_fclose;
42887 - atomic_t num_hardlinks;
42888 - atomic_t num_symlinks;
42889 - atomic_t num_locks;
42890 - atomic_t num_acl_get;
42891 - atomic_t num_acl_set;
42892 + atomic_unchecked_t num_smbs_sent;
42893 + atomic_unchecked_t num_writes;
42894 + atomic_unchecked_t num_reads;
42895 + atomic_unchecked_t num_flushes;
42896 + atomic_unchecked_t num_oplock_brks;
42897 + atomic_unchecked_t num_opens;
42898 + atomic_unchecked_t num_closes;
42899 + atomic_unchecked_t num_deletes;
42900 + atomic_unchecked_t num_mkdirs;
42901 + atomic_unchecked_t num_posixopens;
42902 + atomic_unchecked_t num_posixmkdirs;
42903 + atomic_unchecked_t num_rmdirs;
42904 + atomic_unchecked_t num_renames;
42905 + atomic_unchecked_t num_t2renames;
42906 + atomic_unchecked_t num_ffirst;
42907 + atomic_unchecked_t num_fnext;
42908 + atomic_unchecked_t num_fclose;
42909 + atomic_unchecked_t num_hardlinks;
42910 + atomic_unchecked_t num_symlinks;
42911 + atomic_unchecked_t num_locks;
42912 + atomic_unchecked_t num_acl_get;
42913 + atomic_unchecked_t num_acl_set;
42914 #ifdef CONFIG_CIFS_STATS2
42915 unsigned long long time_writes;
42916 unsigned long long time_reads;
42917 @@ -677,7 +677,7 @@ convert_delimiter(char *path, char delim)
42918 }
42919
42920 #ifdef CONFIG_CIFS_STATS
42921 -#define cifs_stats_inc atomic_inc
42922 +#define cifs_stats_inc atomic_inc_unchecked
42923
42924 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
42925 unsigned int bytes)
42926 @@ -1036,8 +1036,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
42927 /* Various Debug counters */
42928 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
42929 #ifdef CONFIG_CIFS_STATS2
42930 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
42931 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
42932 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
42933 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
42934 #endif
42935 GLOBAL_EXTERN atomic_t smBufAllocCount;
42936 GLOBAL_EXTERN atomic_t midCount;
42937 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
42938 index 6b0e064..94e6c3c 100644
42939 --- a/fs/cifs/link.c
42940 +++ b/fs/cifs/link.c
42941 @@ -600,7 +600,7 @@ symlink_exit:
42942
42943 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
42944 {
42945 - char *p = nd_get_link(nd);
42946 + const char *p = nd_get_link(nd);
42947 if (!IS_ERR(p))
42948 kfree(p);
42949 }
42950 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
42951 index c29d1aa..58018da 100644
42952 --- a/fs/cifs/misc.c
42953 +++ b/fs/cifs/misc.c
42954 @@ -156,7 +156,7 @@ cifs_buf_get(void)
42955 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
42956 atomic_inc(&bufAllocCount);
42957 #ifdef CONFIG_CIFS_STATS2
42958 - atomic_inc(&totBufAllocCount);
42959 + atomic_inc_unchecked(&totBufAllocCount);
42960 #endif /* CONFIG_CIFS_STATS2 */
42961 }
42962
42963 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
42964 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
42965 atomic_inc(&smBufAllocCount);
42966 #ifdef CONFIG_CIFS_STATS2
42967 - atomic_inc(&totSmBufAllocCount);
42968 + atomic_inc_unchecked(&totSmBufAllocCount);
42969 #endif /* CONFIG_CIFS_STATS2 */
42970
42971 }
42972 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
42973 index 6901578..d402eb5 100644
42974 --- a/fs/coda/cache.c
42975 +++ b/fs/coda/cache.c
42976 @@ -24,7 +24,7 @@
42977 #include "coda_linux.h"
42978 #include "coda_cache.h"
42979
42980 -static atomic_t permission_epoch = ATOMIC_INIT(0);
42981 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
42982
42983 /* replace or extend an acl cache hit */
42984 void coda_cache_enter(struct inode *inode, int mask)
42985 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
42986 struct coda_inode_info *cii = ITOC(inode);
42987
42988 spin_lock(&cii->c_lock);
42989 - cii->c_cached_epoch = atomic_read(&permission_epoch);
42990 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
42991 if (cii->c_uid != current_fsuid()) {
42992 cii->c_uid = current_fsuid();
42993 cii->c_cached_perm = mask;
42994 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
42995 {
42996 struct coda_inode_info *cii = ITOC(inode);
42997 spin_lock(&cii->c_lock);
42998 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
42999 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43000 spin_unlock(&cii->c_lock);
43001 }
43002
43003 /* remove all acl caches */
43004 void coda_cache_clear_all(struct super_block *sb)
43005 {
43006 - atomic_inc(&permission_epoch);
43007 + atomic_inc_unchecked(&permission_epoch);
43008 }
43009
43010
43011 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43012 spin_lock(&cii->c_lock);
43013 hit = (mask & cii->c_cached_perm) == mask &&
43014 cii->c_uid == current_fsuid() &&
43015 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43016 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43017 spin_unlock(&cii->c_lock);
43018
43019 return hit;
43020 diff --git a/fs/compat.c b/fs/compat.c
43021 index f2944ac..62845d2 100644
43022 --- a/fs/compat.c
43023 +++ b/fs/compat.c
43024 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43025
43026 set_fs(KERNEL_DS);
43027 /* The __user pointer cast is valid because of the set_fs() */
43028 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43029 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43030 set_fs(oldfs);
43031 /* truncating is ok because it's a user address */
43032 if (!ret)
43033 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43034 goto out;
43035
43036 ret = -EINVAL;
43037 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43038 + if (nr_segs > UIO_MAXIOV)
43039 goto out;
43040 if (nr_segs > fast_segs) {
43041 ret = -ENOMEM;
43042 @@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
43043
43044 struct compat_readdir_callback {
43045 struct compat_old_linux_dirent __user *dirent;
43046 + struct file * file;
43047 int result;
43048 };
43049
43050 @@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43051 buf->result = -EOVERFLOW;
43052 return -EOVERFLOW;
43053 }
43054 +
43055 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43056 + return 0;
43057 +
43058 buf->result++;
43059 dirent = buf->dirent;
43060 if (!access_ok(VERIFY_WRITE, dirent,
43061 @@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43062
43063 buf.result = 0;
43064 buf.dirent = dirent;
43065 + buf.file = file;
43066
43067 error = vfs_readdir(file, compat_fillonedir, &buf);
43068 if (buf.result)
43069 @@ -900,6 +906,7 @@ struct compat_linux_dirent {
43070 struct compat_getdents_callback {
43071 struct compat_linux_dirent __user *current_dir;
43072 struct compat_linux_dirent __user *previous;
43073 + struct file * file;
43074 int count;
43075 int error;
43076 };
43077 @@ -921,6 +928,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43078 buf->error = -EOVERFLOW;
43079 return -EOVERFLOW;
43080 }
43081 +
43082 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43083 + return 0;
43084 +
43085 dirent = buf->previous;
43086 if (dirent) {
43087 if (__put_user(offset, &dirent->d_off))
43088 @@ -968,6 +979,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43089 buf.previous = NULL;
43090 buf.count = count;
43091 buf.error = 0;
43092 + buf.file = file;
43093
43094 error = vfs_readdir(file, compat_filldir, &buf);
43095 if (error >= 0)
43096 @@ -989,6 +1001,7 @@ out:
43097 struct compat_getdents_callback64 {
43098 struct linux_dirent64 __user *current_dir;
43099 struct linux_dirent64 __user *previous;
43100 + struct file * file;
43101 int count;
43102 int error;
43103 };
43104 @@ -1005,6 +1018,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43105 buf->error = -EINVAL; /* only used if we fail.. */
43106 if (reclen > buf->count)
43107 return -EINVAL;
43108 +
43109 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43110 + return 0;
43111 +
43112 dirent = buf->previous;
43113
43114 if (dirent) {
43115 @@ -1056,13 +1073,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43116 buf.previous = NULL;
43117 buf.count = count;
43118 buf.error = 0;
43119 + buf.file = file;
43120
43121 error = vfs_readdir(file, compat_filldir64, &buf);
43122 if (error >= 0)
43123 error = buf.error;
43124 lastdirent = buf.previous;
43125 if (lastdirent) {
43126 - typeof(lastdirent->d_off) d_off = file->f_pos;
43127 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43128 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43129 error = -EFAULT;
43130 else
43131 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43132 index 112e45a..b59845b 100644
43133 --- a/fs/compat_binfmt_elf.c
43134 +++ b/fs/compat_binfmt_elf.c
43135 @@ -30,11 +30,13 @@
43136 #undef elf_phdr
43137 #undef elf_shdr
43138 #undef elf_note
43139 +#undef elf_dyn
43140 #undef elf_addr_t
43141 #define elfhdr elf32_hdr
43142 #define elf_phdr elf32_phdr
43143 #define elf_shdr elf32_shdr
43144 #define elf_note elf32_note
43145 +#define elf_dyn Elf32_Dyn
43146 #define elf_addr_t Elf32_Addr
43147
43148 /*
43149 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43150 index debdfe0..75d31d4 100644
43151 --- a/fs/compat_ioctl.c
43152 +++ b/fs/compat_ioctl.c
43153 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43154
43155 err = get_user(palp, &up->palette);
43156 err |= get_user(length, &up->length);
43157 + if (err)
43158 + return -EFAULT;
43159
43160 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43161 err = put_user(compat_ptr(palp), &up_native->palette);
43162 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43163 return -EFAULT;
43164 if (__get_user(udata, &ss32->iomem_base))
43165 return -EFAULT;
43166 - ss.iomem_base = compat_ptr(udata);
43167 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43168 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43169 __get_user(ss.port_high, &ss32->port_high))
43170 return -EFAULT;
43171 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43172 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43173 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43174 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43175 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43176 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43177 return -EFAULT;
43178
43179 return ioctl_preallocate(file, p);
43180 @@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43181 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43182 {
43183 unsigned int a, b;
43184 - a = *(unsigned int *)p;
43185 - b = *(unsigned int *)q;
43186 + a = *(const unsigned int *)p;
43187 + b = *(const unsigned int *)q;
43188 if (a > b)
43189 return 1;
43190 if (a < b)
43191 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43192 index 7e6c52d..94bc756 100644
43193 --- a/fs/configfs/dir.c
43194 +++ b/fs/configfs/dir.c
43195 @@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43196 }
43197 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43198 struct configfs_dirent *next;
43199 - const char * name;
43200 + const unsigned char * name;
43201 + char d_name[sizeof(next->s_dentry->d_iname)];
43202 int len;
43203 struct inode *inode = NULL;
43204
43205 @@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43206 continue;
43207
43208 name = configfs_get_name(next);
43209 - len = strlen(name);
43210 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43211 + len = next->s_dentry->d_name.len;
43212 + memcpy(d_name, name, len);
43213 + name = d_name;
43214 + } else
43215 + len = strlen(name);
43216
43217 /*
43218 * We'll have a dentry and an inode for
43219 diff --git a/fs/dcache.c b/fs/dcache.c
43220 index b80531c..8ca7e2d 100644
43221 --- a/fs/dcache.c
43222 +++ b/fs/dcache.c
43223 @@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned long mempages)
43224 mempages -= reserve;
43225
43226 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43227 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43228 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43229
43230 dcache_init();
43231 inode_init();
43232 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43233 index b80bc84..0d46d1a 100644
43234 --- a/fs/debugfs/inode.c
43235 +++ b/fs/debugfs/inode.c
43236 @@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43237 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43238 {
43239 return debugfs_create_file(name,
43240 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43241 + S_IFDIR | S_IRWXU,
43242 +#else
43243 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43244 +#endif
43245 parent, NULL, NULL);
43246 }
43247 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43248 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43249 index ab35b11..b30af66 100644
43250 --- a/fs/ecryptfs/inode.c
43251 +++ b/fs/ecryptfs/inode.c
43252 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43253 old_fs = get_fs();
43254 set_fs(get_ds());
43255 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43256 - (char __user *)lower_buf,
43257 + (char __force_user *)lower_buf,
43258 lower_bufsiz);
43259 set_fs(old_fs);
43260 if (rc < 0)
43261 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43262 }
43263 old_fs = get_fs();
43264 set_fs(get_ds());
43265 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43266 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43267 set_fs(old_fs);
43268 if (rc < 0) {
43269 kfree(buf);
43270 @@ -733,7 +733,7 @@ out:
43271 static void
43272 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43273 {
43274 - char *buf = nd_get_link(nd);
43275 + const char *buf = nd_get_link(nd);
43276 if (!IS_ERR(buf)) {
43277 /* Free the char* */
43278 kfree(buf);
43279 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43280 index 3a06f40..f7af544 100644
43281 --- a/fs/ecryptfs/miscdev.c
43282 +++ b/fs/ecryptfs/miscdev.c
43283 @@ -345,7 +345,7 @@ check_list:
43284 goto out_unlock_msg_ctx;
43285 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43286 if (msg_ctx->msg) {
43287 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43288 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43289 goto out_unlock_msg_ctx;
43290 i += packet_length_size;
43291 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43292 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43293 index b2a34a1..162fa69 100644
43294 --- a/fs/ecryptfs/read_write.c
43295 +++ b/fs/ecryptfs/read_write.c
43296 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43297 return -EIO;
43298 fs_save = get_fs();
43299 set_fs(get_ds());
43300 - rc = vfs_write(lower_file, data, size, &offset);
43301 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43302 set_fs(fs_save);
43303 mark_inode_dirty_sync(ecryptfs_inode);
43304 return rc;
43305 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43306 return -EIO;
43307 fs_save = get_fs();
43308 set_fs(get_ds());
43309 - rc = vfs_read(lower_file, data, size, &offset);
43310 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43311 set_fs(fs_save);
43312 return rc;
43313 }
43314 diff --git a/fs/exec.c b/fs/exec.c
43315 index b1fd202..ba60b98 100644
43316 --- a/fs/exec.c
43317 +++ b/fs/exec.c
43318 @@ -55,6 +55,15 @@
43319 #include <linux/pipe_fs_i.h>
43320 #include <linux/oom.h>
43321 #include <linux/compat.h>
43322 +#include <linux/random.h>
43323 +#include <linux/seq_file.h>
43324 +
43325 +#ifdef CONFIG_PAX_REFCOUNT
43326 +#include <linux/kallsyms.h>
43327 +#include <linux/kdebug.h>
43328 +#endif
43329 +
43330 +#include <trace/events/fs.h>
43331
43332 #include <asm/uaccess.h>
43333 #include <asm/mmu_context.h>
43334 @@ -66,6 +75,15 @@
43335
43336 #include <trace/events/sched.h>
43337
43338 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
43339 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
43340 +#endif
43341 +
43342 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43343 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43344 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43345 +#endif
43346 +
43347 int core_uses_pid;
43348 char core_pattern[CORENAME_MAX_SIZE] = "core";
43349 unsigned int core_pipe_limit;
43350 @@ -75,7 +93,7 @@ struct core_name {
43351 char *corename;
43352 int used, size;
43353 };
43354 -static atomic_t call_count = ATOMIC_INIT(1);
43355 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43356
43357 /* The maximal length of core_pattern is also specified in sysctl.c */
43358
43359 @@ -191,18 +209,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43360 int write)
43361 {
43362 struct page *page;
43363 - int ret;
43364
43365 -#ifdef CONFIG_STACK_GROWSUP
43366 - if (write) {
43367 - ret = expand_downwards(bprm->vma, pos);
43368 - if (ret < 0)
43369 - return NULL;
43370 - }
43371 -#endif
43372 - ret = get_user_pages(current, bprm->mm, pos,
43373 - 1, write, 1, &page, NULL);
43374 - if (ret <= 0)
43375 + if (0 > expand_downwards(bprm->vma, pos))
43376 + return NULL;
43377 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43378 return NULL;
43379
43380 if (write) {
43381 @@ -218,6 +228,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43382 if (size <= ARG_MAX)
43383 return page;
43384
43385 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43386 + // only allow 512KB for argv+env on suid/sgid binaries
43387 + // to prevent easy ASLR exhaustion
43388 + if (((bprm->cred->euid != current_euid()) ||
43389 + (bprm->cred->egid != current_egid())) &&
43390 + (size > (512 * 1024))) {
43391 + put_page(page);
43392 + return NULL;
43393 + }
43394 +#endif
43395 +
43396 /*
43397 * Limit to 1/4-th the stack size for the argv+env strings.
43398 * This ensures that:
43399 @@ -277,6 +298,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43400 vma->vm_end = STACK_TOP_MAX;
43401 vma->vm_start = vma->vm_end - PAGE_SIZE;
43402 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43403 +
43404 +#ifdef CONFIG_PAX_SEGMEXEC
43405 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43406 +#endif
43407 +
43408 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43409 INIT_LIST_HEAD(&vma->anon_vma_chain);
43410
43411 @@ -291,6 +317,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43412 mm->stack_vm = mm->total_vm = 1;
43413 up_write(&mm->mmap_sem);
43414 bprm->p = vma->vm_end - sizeof(void *);
43415 +
43416 +#ifdef CONFIG_PAX_RANDUSTACK
43417 + if (randomize_va_space)
43418 + bprm->p ^= random32() & ~PAGE_MASK;
43419 +#endif
43420 +
43421 return 0;
43422 err:
43423 up_write(&mm->mmap_sem);
43424 @@ -399,19 +431,7 @@ err:
43425 return err;
43426 }
43427
43428 -struct user_arg_ptr {
43429 -#ifdef CONFIG_COMPAT
43430 - bool is_compat;
43431 -#endif
43432 - union {
43433 - const char __user *const __user *native;
43434 -#ifdef CONFIG_COMPAT
43435 - compat_uptr_t __user *compat;
43436 -#endif
43437 - } ptr;
43438 -};
43439 -
43440 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43441 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43442 {
43443 const char __user *native;
43444
43445 @@ -420,14 +440,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43446 compat_uptr_t compat;
43447
43448 if (get_user(compat, argv.ptr.compat + nr))
43449 - return ERR_PTR(-EFAULT);
43450 + return (const char __force_user *)ERR_PTR(-EFAULT);
43451
43452 return compat_ptr(compat);
43453 }
43454 #endif
43455
43456 if (get_user(native, argv.ptr.native + nr))
43457 - return ERR_PTR(-EFAULT);
43458 + return (const char __force_user *)ERR_PTR(-EFAULT);
43459
43460 return native;
43461 }
43462 @@ -446,7 +466,7 @@ static int count(struct user_arg_ptr argv, int max)
43463 if (!p)
43464 break;
43465
43466 - if (IS_ERR(p))
43467 + if (IS_ERR((const char __force_kernel *)p))
43468 return -EFAULT;
43469
43470 if (i++ >= max)
43471 @@ -480,7 +500,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43472
43473 ret = -EFAULT;
43474 str = get_user_arg_ptr(argv, argc);
43475 - if (IS_ERR(str))
43476 + if (IS_ERR((const char __force_kernel *)str))
43477 goto out;
43478
43479 len = strnlen_user(str, MAX_ARG_STRLEN);
43480 @@ -562,7 +582,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43481 int r;
43482 mm_segment_t oldfs = get_fs();
43483 struct user_arg_ptr argv = {
43484 - .ptr.native = (const char __user *const __user *)__argv,
43485 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43486 };
43487
43488 set_fs(KERNEL_DS);
43489 @@ -597,7 +617,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43490 unsigned long new_end = old_end - shift;
43491 struct mmu_gather tlb;
43492
43493 - BUG_ON(new_start > new_end);
43494 + if (new_start >= new_end || new_start < mmap_min_addr)
43495 + return -ENOMEM;
43496
43497 /*
43498 * ensure there are no vmas between where we want to go
43499 @@ -606,6 +627,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43500 if (vma != find_vma(mm, new_start))
43501 return -EFAULT;
43502
43503 +#ifdef CONFIG_PAX_SEGMEXEC
43504 + BUG_ON(pax_find_mirror_vma(vma));
43505 +#endif
43506 +
43507 /*
43508 * cover the whole range: [new_start, old_end)
43509 */
43510 @@ -686,10 +711,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43511 stack_top = arch_align_stack(stack_top);
43512 stack_top = PAGE_ALIGN(stack_top);
43513
43514 - if (unlikely(stack_top < mmap_min_addr) ||
43515 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43516 - return -ENOMEM;
43517 -
43518 stack_shift = vma->vm_end - stack_top;
43519
43520 bprm->p -= stack_shift;
43521 @@ -701,8 +722,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43522 bprm->exec -= stack_shift;
43523
43524 down_write(&mm->mmap_sem);
43525 +
43526 + /* Move stack pages down in memory. */
43527 + if (stack_shift) {
43528 + ret = shift_arg_pages(vma, stack_shift);
43529 + if (ret)
43530 + goto out_unlock;
43531 + }
43532 +
43533 vm_flags = VM_STACK_FLAGS;
43534
43535 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43536 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43537 + vm_flags &= ~VM_EXEC;
43538 +
43539 +#ifdef CONFIG_PAX_MPROTECT
43540 + if (mm->pax_flags & MF_PAX_MPROTECT)
43541 + vm_flags &= ~VM_MAYEXEC;
43542 +#endif
43543 +
43544 + }
43545 +#endif
43546 +
43547 /*
43548 * Adjust stack execute permissions; explicitly enable for
43549 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43550 @@ -721,13 +762,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43551 goto out_unlock;
43552 BUG_ON(prev != vma);
43553
43554 - /* Move stack pages down in memory. */
43555 - if (stack_shift) {
43556 - ret = shift_arg_pages(vma, stack_shift);
43557 - if (ret)
43558 - goto out_unlock;
43559 - }
43560 -
43561 /* mprotect_fixup is overkill to remove the temporary stack flags */
43562 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43563
43564 @@ -785,6 +819,8 @@ struct file *open_exec(const char *name)
43565
43566 fsnotify_open(file);
43567
43568 + trace_open_exec(name);
43569 +
43570 err = deny_write_access(file);
43571 if (err)
43572 goto exit;
43573 @@ -808,7 +844,7 @@ int kernel_read(struct file *file, loff_t offset,
43574 old_fs = get_fs();
43575 set_fs(get_ds());
43576 /* The cast to a user pointer is valid due to the set_fs() */
43577 - result = vfs_read(file, (void __user *)addr, count, &pos);
43578 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43579 set_fs(old_fs);
43580 return result;
43581 }
43582 @@ -1254,7 +1290,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43583 }
43584 rcu_read_unlock();
43585
43586 - if (p->fs->users > n_fs) {
43587 + if (atomic_read(&p->fs->users) > n_fs) {
43588 bprm->unsafe |= LSM_UNSAFE_SHARE;
43589 } else {
43590 res = -EAGAIN;
43591 @@ -1451,6 +1487,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43592
43593 EXPORT_SYMBOL(search_binary_handler);
43594
43595 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43596 +static DEFINE_PER_CPU(u64, exec_counter);
43597 +static int __init init_exec_counters(void)
43598 +{
43599 + unsigned int cpu;
43600 +
43601 + for_each_possible_cpu(cpu) {
43602 + per_cpu(exec_counter, cpu) = (u64)cpu;
43603 + }
43604 +
43605 + return 0;
43606 +}
43607 +early_initcall(init_exec_counters);
43608 +static inline void increment_exec_counter(void)
43609 +{
43610 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
43611 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43612 +}
43613 +#else
43614 +static inline void increment_exec_counter(void) {}
43615 +#endif
43616 +
43617 /*
43618 * sys_execve() executes a new program.
43619 */
43620 @@ -1459,6 +1517,11 @@ static int do_execve_common(const char *filename,
43621 struct user_arg_ptr envp,
43622 struct pt_regs *regs)
43623 {
43624 +#ifdef CONFIG_GRKERNSEC
43625 + struct file *old_exec_file;
43626 + struct acl_subject_label *old_acl;
43627 + struct rlimit old_rlim[RLIM_NLIMITS];
43628 +#endif
43629 struct linux_binprm *bprm;
43630 struct file *file;
43631 struct files_struct *displaced;
43632 @@ -1466,6 +1529,8 @@ static int do_execve_common(const char *filename,
43633 int retval;
43634 const struct cred *cred = current_cred();
43635
43636 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43637 +
43638 /*
43639 * We move the actual failure in case of RLIMIT_NPROC excess from
43640 * set*uid() to execve() because too many poorly written programs
43641 @@ -1506,12 +1571,27 @@ static int do_execve_common(const char *filename,
43642 if (IS_ERR(file))
43643 goto out_unmark;
43644
43645 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
43646 + retval = -EPERM;
43647 + goto out_file;
43648 + }
43649 +
43650 sched_exec();
43651
43652 bprm->file = file;
43653 bprm->filename = filename;
43654 bprm->interp = filename;
43655
43656 + if (gr_process_user_ban()) {
43657 + retval = -EPERM;
43658 + goto out_file;
43659 + }
43660 +
43661 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43662 + retval = -EACCES;
43663 + goto out_file;
43664 + }
43665 +
43666 retval = bprm_mm_init(bprm);
43667 if (retval)
43668 goto out_file;
43669 @@ -1528,24 +1608,65 @@ static int do_execve_common(const char *filename,
43670 if (retval < 0)
43671 goto out;
43672
43673 +#ifdef CONFIG_GRKERNSEC
43674 + old_acl = current->acl;
43675 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43676 + old_exec_file = current->exec_file;
43677 + get_file(file);
43678 + current->exec_file = file;
43679 +#endif
43680 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43681 + /* limit suid stack to 8MB
43682 + we saved the old limits above and will restore them if this exec fails
43683 + */
43684 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43685 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43686 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43687 +#endif
43688 +
43689 + if (!gr_tpe_allow(file)) {
43690 + retval = -EACCES;
43691 + goto out_fail;
43692 + }
43693 +
43694 + if (gr_check_crash_exec(file)) {
43695 + retval = -EACCES;
43696 + goto out_fail;
43697 + }
43698 +
43699 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43700 + bprm->unsafe);
43701 + if (retval < 0)
43702 + goto out_fail;
43703 +
43704 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43705 if (retval < 0)
43706 - goto out;
43707 + goto out_fail;
43708
43709 bprm->exec = bprm->p;
43710 retval = copy_strings(bprm->envc, envp, bprm);
43711 if (retval < 0)
43712 - goto out;
43713 + goto out_fail;
43714
43715 retval = copy_strings(bprm->argc, argv, bprm);
43716 if (retval < 0)
43717 - goto out;
43718 + goto out_fail;
43719 +
43720 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43721 +
43722 + gr_handle_exec_args(bprm, argv);
43723
43724 retval = search_binary_handler(bprm,regs);
43725 if (retval < 0)
43726 - goto out;
43727 + goto out_fail;
43728 +#ifdef CONFIG_GRKERNSEC
43729 + if (old_exec_file)
43730 + fput(old_exec_file);
43731 +#endif
43732
43733 /* execve succeeded */
43734 +
43735 + increment_exec_counter();
43736 current->fs->in_exec = 0;
43737 current->in_execve = 0;
43738 acct_update_integrals(current);
43739 @@ -1554,6 +1675,14 @@ static int do_execve_common(const char *filename,
43740 put_files_struct(displaced);
43741 return retval;
43742
43743 +out_fail:
43744 +#ifdef CONFIG_GRKERNSEC
43745 + current->acl = old_acl;
43746 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43747 + fput(current->exec_file);
43748 + current->exec_file = old_exec_file;
43749 +#endif
43750 +
43751 out:
43752 if (bprm->mm) {
43753 acct_arg_size(bprm, 0);
43754 @@ -1627,7 +1756,7 @@ static int expand_corename(struct core_name *cn)
43755 {
43756 char *old_corename = cn->corename;
43757
43758 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43759 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43760 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43761
43762 if (!cn->corename) {
43763 @@ -1724,7 +1853,7 @@ static int format_corename(struct core_name *cn, long signr)
43764 int pid_in_pattern = 0;
43765 int err = 0;
43766
43767 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43768 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43769 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43770 cn->used = 0;
43771
43772 @@ -1821,6 +1950,228 @@ out:
43773 return ispipe;
43774 }
43775
43776 +int pax_check_flags(unsigned long *flags)
43777 +{
43778 + int retval = 0;
43779 +
43780 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43781 + if (*flags & MF_PAX_SEGMEXEC)
43782 + {
43783 + *flags &= ~MF_PAX_SEGMEXEC;
43784 + retval = -EINVAL;
43785 + }
43786 +#endif
43787 +
43788 + if ((*flags & MF_PAX_PAGEEXEC)
43789 +
43790 +#ifdef CONFIG_PAX_PAGEEXEC
43791 + && (*flags & MF_PAX_SEGMEXEC)
43792 +#endif
43793 +
43794 + )
43795 + {
43796 + *flags &= ~MF_PAX_PAGEEXEC;
43797 + retval = -EINVAL;
43798 + }
43799 +
43800 + if ((*flags & MF_PAX_MPROTECT)
43801 +
43802 +#ifdef CONFIG_PAX_MPROTECT
43803 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43804 +#endif
43805 +
43806 + )
43807 + {
43808 + *flags &= ~MF_PAX_MPROTECT;
43809 + retval = -EINVAL;
43810 + }
43811 +
43812 + if ((*flags & MF_PAX_EMUTRAMP)
43813 +
43814 +#ifdef CONFIG_PAX_EMUTRAMP
43815 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43816 +#endif
43817 +
43818 + )
43819 + {
43820 + *flags &= ~MF_PAX_EMUTRAMP;
43821 + retval = -EINVAL;
43822 + }
43823 +
43824 + return retval;
43825 +}
43826 +
43827 +EXPORT_SYMBOL(pax_check_flags);
43828 +
43829 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43830 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43831 +{
43832 + struct task_struct *tsk = current;
43833 + struct mm_struct *mm = current->mm;
43834 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43835 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43836 + char *path_exec = NULL;
43837 + char *path_fault = NULL;
43838 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
43839 +
43840 + if (buffer_exec && buffer_fault) {
43841 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43842 +
43843 + down_read(&mm->mmap_sem);
43844 + vma = mm->mmap;
43845 + while (vma && (!vma_exec || !vma_fault)) {
43846 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43847 + vma_exec = vma;
43848 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43849 + vma_fault = vma;
43850 + vma = vma->vm_next;
43851 + }
43852 + if (vma_exec) {
43853 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43854 + if (IS_ERR(path_exec))
43855 + path_exec = "<path too long>";
43856 + else {
43857 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43858 + if (path_exec) {
43859 + *path_exec = 0;
43860 + path_exec = buffer_exec;
43861 + } else
43862 + path_exec = "<path too long>";
43863 + }
43864 + }
43865 + if (vma_fault) {
43866 + start = vma_fault->vm_start;
43867 + end = vma_fault->vm_end;
43868 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43869 + if (vma_fault->vm_file) {
43870 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43871 + if (IS_ERR(path_fault))
43872 + path_fault = "<path too long>";
43873 + else {
43874 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43875 + if (path_fault) {
43876 + *path_fault = 0;
43877 + path_fault = buffer_fault;
43878 + } else
43879 + path_fault = "<path too long>";
43880 + }
43881 + } else
43882 + path_fault = "<anonymous mapping>";
43883 + }
43884 + up_read(&mm->mmap_sem);
43885 + }
43886 + if (tsk->signal->curr_ip)
43887 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43888 + else
43889 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43890 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43891 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43892 + task_uid(tsk), task_euid(tsk), pc, sp);
43893 + free_page((unsigned long)buffer_exec);
43894 + free_page((unsigned long)buffer_fault);
43895 + pax_report_insns(regs, pc, sp);
43896 + do_coredump(SIGKILL, SIGKILL, regs);
43897 +}
43898 +#endif
43899 +
43900 +#ifdef CONFIG_PAX_REFCOUNT
43901 +void pax_report_refcount_overflow(struct pt_regs *regs)
43902 +{
43903 + if (current->signal->curr_ip)
43904 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43905 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43906 + else
43907 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43908 + current->comm, task_pid_nr(current), current_uid(), current_euid());
43909 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43910 + show_regs(regs);
43911 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43912 +}
43913 +#endif
43914 +
43915 +#ifdef CONFIG_PAX_USERCOPY
43916 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43917 +int object_is_on_stack(const void *obj, unsigned long len)
43918 +{
43919 + const void * const stack = task_stack_page(current);
43920 + const void * const stackend = stack + THREAD_SIZE;
43921 +
43922 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43923 + const void *frame = NULL;
43924 + const void *oldframe;
43925 +#endif
43926 +
43927 + if (obj + len < obj)
43928 + return -1;
43929 +
43930 + if (obj + len <= stack || stackend <= obj)
43931 + return 0;
43932 +
43933 + if (obj < stack || stackend < obj + len)
43934 + return -1;
43935 +
43936 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43937 + oldframe = __builtin_frame_address(1);
43938 + if (oldframe)
43939 + frame = __builtin_frame_address(2);
43940 + /*
43941 + low ----------------------------------------------> high
43942 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
43943 + ^----------------^
43944 + allow copies only within here
43945 + */
43946 + while (stack <= frame && frame < stackend) {
43947 + /* if obj + len extends past the last frame, this
43948 + check won't pass and the next frame will be 0,
43949 + causing us to bail out and correctly report
43950 + the copy as invalid
43951 + */
43952 + if (obj + len <= frame)
43953 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
43954 + oldframe = frame;
43955 + frame = *(const void * const *)frame;
43956 + }
43957 + return -1;
43958 +#else
43959 + return 1;
43960 +#endif
43961 +}
43962 +
43963 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
43964 +{
43965 + if (current->signal->curr_ip)
43966 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43967 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43968 + else
43969 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43970 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43971 + dump_stack();
43972 + gr_handle_kernel_exploit();
43973 + do_group_exit(SIGKILL);
43974 +}
43975 +#endif
43976 +
43977 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
43978 +void pax_track_stack(void)
43979 +{
43980 + unsigned long sp = (unsigned long)&sp;
43981 + if (sp < current_thread_info()->lowest_stack &&
43982 + sp > (unsigned long)task_stack_page(current))
43983 + current_thread_info()->lowest_stack = sp;
43984 +}
43985 +EXPORT_SYMBOL(pax_track_stack);
43986 +#endif
43987 +
43988 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
43989 +void report_size_overflow(const char *file, unsigned int line, const char *func)
43990 +{
43991 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
43992 + dump_stack();
43993 + do_group_exit(SIGKILL);
43994 +}
43995 +EXPORT_SYMBOL(report_size_overflow);
43996 +#endif
43997 +
43998 static int zap_process(struct task_struct *start, int exit_code)
43999 {
44000 struct task_struct *t;
44001 @@ -2018,17 +2369,17 @@ static void wait_for_dump_helpers(struct file *file)
44002 pipe = file->f_path.dentry->d_inode->i_pipe;
44003
44004 pipe_lock(pipe);
44005 - pipe->readers++;
44006 - pipe->writers--;
44007 + atomic_inc(&pipe->readers);
44008 + atomic_dec(&pipe->writers);
44009
44010 - while ((pipe->readers > 1) && (!signal_pending(current))) {
44011 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44012 wake_up_interruptible_sync(&pipe->wait);
44013 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44014 pipe_wait(pipe);
44015 }
44016
44017 - pipe->readers--;
44018 - pipe->writers++;
44019 + atomic_dec(&pipe->readers);
44020 + atomic_inc(&pipe->writers);
44021 pipe_unlock(pipe);
44022
44023 }
44024 @@ -2089,7 +2440,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44025 int retval = 0;
44026 int flag = 0;
44027 int ispipe;
44028 - static atomic_t core_dump_count = ATOMIC_INIT(0);
44029 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44030 struct coredump_params cprm = {
44031 .signr = signr,
44032 .regs = regs,
44033 @@ -2104,6 +2455,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44034
44035 audit_core_dumps(signr);
44036
44037 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44038 + gr_handle_brute_attach(current, cprm.mm_flags);
44039 +
44040 binfmt = mm->binfmt;
44041 if (!binfmt || !binfmt->core_dump)
44042 goto fail;
44043 @@ -2171,7 +2525,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44044 }
44045 cprm.limit = RLIM_INFINITY;
44046
44047 - dump_count = atomic_inc_return(&core_dump_count);
44048 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
44049 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44050 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44051 task_tgid_vnr(current), current->comm);
44052 @@ -2198,6 +2552,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44053 } else {
44054 struct inode *inode;
44055
44056 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44057 +
44058 if (cprm.limit < binfmt->min_coredump)
44059 goto fail_unlock;
44060
44061 @@ -2241,7 +2597,7 @@ close_fail:
44062 filp_close(cprm.file, NULL);
44063 fail_dropcount:
44064 if (ispipe)
44065 - atomic_dec(&core_dump_count);
44066 + atomic_dec_unchecked(&core_dump_count);
44067 fail_unlock:
44068 kfree(cn.corename);
44069 fail_corename:
44070 @@ -2260,7 +2616,7 @@ fail:
44071 */
44072 int dump_write(struct file *file, const void *addr, int nr)
44073 {
44074 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44075 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44076 }
44077 EXPORT_SYMBOL(dump_write);
44078
44079 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44080 index a8cbe1b..fed04cb 100644
44081 --- a/fs/ext2/balloc.c
44082 +++ b/fs/ext2/balloc.c
44083 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44084
44085 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44086 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44087 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44088 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44089 sbi->s_resuid != current_fsuid() &&
44090 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44091 return 0;
44092 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44093 index baac1b1..1499b62 100644
44094 --- a/fs/ext3/balloc.c
44095 +++ b/fs/ext3/balloc.c
44096 @@ -1438,9 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44097
44098 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44099 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44100 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44101 + if (free_blocks < root_blocks + 1 &&
44102 !use_reservation && sbi->s_resuid != current_fsuid() &&
44103 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44104 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44105 + !capable_nolog(CAP_SYS_RESOURCE)) {
44106 return 0;
44107 }
44108 return 1;
44109 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44110 index 4bbd07a..a37bee6 100644
44111 --- a/fs/ext4/balloc.c
44112 +++ b/fs/ext4/balloc.c
44113 @@ -463,8 +463,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44114 /* Hm, nope. Are (enough) root reserved clusters available? */
44115 if (sbi->s_resuid == current_fsuid() ||
44116 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44117 - capable(CAP_SYS_RESOURCE) ||
44118 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44119 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44120 + capable_nolog(CAP_SYS_RESOURCE)) {
44121
44122 if (free_clusters >= (nclusters + dirty_clusters))
44123 return 1;
44124 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44125 index 0e01e90..ae2bd5e 100644
44126 --- a/fs/ext4/ext4.h
44127 +++ b/fs/ext4/ext4.h
44128 @@ -1225,19 +1225,19 @@ struct ext4_sb_info {
44129 unsigned long s_mb_last_start;
44130
44131 /* stats for buddy allocator */
44132 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44133 - atomic_t s_bal_success; /* we found long enough chunks */
44134 - atomic_t s_bal_allocated; /* in blocks */
44135 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44136 - atomic_t s_bal_goals; /* goal hits */
44137 - atomic_t s_bal_breaks; /* too long searches */
44138 - atomic_t s_bal_2orders; /* 2^order hits */
44139 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44140 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44141 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44142 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44143 + atomic_unchecked_t s_bal_goals; /* goal hits */
44144 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44145 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44146 spinlock_t s_bal_lock;
44147 unsigned long s_mb_buddies_generated;
44148 unsigned long long s_mb_generation_time;
44149 - atomic_t s_mb_lost_chunks;
44150 - atomic_t s_mb_preallocated;
44151 - atomic_t s_mb_discarded;
44152 + atomic_unchecked_t s_mb_lost_chunks;
44153 + atomic_unchecked_t s_mb_preallocated;
44154 + atomic_unchecked_t s_mb_discarded;
44155 atomic_t s_lock_busy;
44156
44157 /* locality groups */
44158 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44159 index 6b0a57e..1955a44 100644
44160 --- a/fs/ext4/mballoc.c
44161 +++ b/fs/ext4/mballoc.c
44162 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44163 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44164
44165 if (EXT4_SB(sb)->s_mb_stats)
44166 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44167 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44168
44169 break;
44170 }
44171 @@ -2041,7 +2041,7 @@ repeat:
44172 ac->ac_status = AC_STATUS_CONTINUE;
44173 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44174 cr = 3;
44175 - atomic_inc(&sbi->s_mb_lost_chunks);
44176 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44177 goto repeat;
44178 }
44179 }
44180 @@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
44181 if (sbi->s_mb_stats) {
44182 ext4_msg(sb, KERN_INFO,
44183 "mballoc: %u blocks %u reqs (%u success)",
44184 - atomic_read(&sbi->s_bal_allocated),
44185 - atomic_read(&sbi->s_bal_reqs),
44186 - atomic_read(&sbi->s_bal_success));
44187 + atomic_read_unchecked(&sbi->s_bal_allocated),
44188 + atomic_read_unchecked(&sbi->s_bal_reqs),
44189 + atomic_read_unchecked(&sbi->s_bal_success));
44190 ext4_msg(sb, KERN_INFO,
44191 "mballoc: %u extents scanned, %u goal hits, "
44192 "%u 2^N hits, %u breaks, %u lost",
44193 - atomic_read(&sbi->s_bal_ex_scanned),
44194 - atomic_read(&sbi->s_bal_goals),
44195 - atomic_read(&sbi->s_bal_2orders),
44196 - atomic_read(&sbi->s_bal_breaks),
44197 - atomic_read(&sbi->s_mb_lost_chunks));
44198 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44199 + atomic_read_unchecked(&sbi->s_bal_goals),
44200 + atomic_read_unchecked(&sbi->s_bal_2orders),
44201 + atomic_read_unchecked(&sbi->s_bal_breaks),
44202 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44203 ext4_msg(sb, KERN_INFO,
44204 "mballoc: %lu generated and it took %Lu",
44205 sbi->s_mb_buddies_generated,
44206 sbi->s_mb_generation_time);
44207 ext4_msg(sb, KERN_INFO,
44208 "mballoc: %u preallocated, %u discarded",
44209 - atomic_read(&sbi->s_mb_preallocated),
44210 - atomic_read(&sbi->s_mb_discarded));
44211 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44212 + atomic_read_unchecked(&sbi->s_mb_discarded));
44213 }
44214
44215 free_percpu(sbi->s_locality_groups);
44216 @@ -3045,16 +3045,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44217 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44218
44219 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44220 - atomic_inc(&sbi->s_bal_reqs);
44221 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44222 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44223 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44224 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44225 - atomic_inc(&sbi->s_bal_success);
44226 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44227 + atomic_inc_unchecked(&sbi->s_bal_success);
44228 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44229 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44230 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44231 - atomic_inc(&sbi->s_bal_goals);
44232 + atomic_inc_unchecked(&sbi->s_bal_goals);
44233 if (ac->ac_found > sbi->s_mb_max_to_scan)
44234 - atomic_inc(&sbi->s_bal_breaks);
44235 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44236 }
44237
44238 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44239 @@ -3458,7 +3458,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44240 trace_ext4_mb_new_inode_pa(ac, pa);
44241
44242 ext4_mb_use_inode_pa(ac, pa);
44243 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44244 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44245
44246 ei = EXT4_I(ac->ac_inode);
44247 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44248 @@ -3518,7 +3518,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44249 trace_ext4_mb_new_group_pa(ac, pa);
44250
44251 ext4_mb_use_group_pa(ac, pa);
44252 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44253 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44254
44255 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44256 lg = ac->ac_lg;
44257 @@ -3607,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44258 * from the bitmap and continue.
44259 */
44260 }
44261 - atomic_add(free, &sbi->s_mb_discarded);
44262 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44263
44264 return err;
44265 }
44266 @@ -3625,7 +3625,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44267 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44268 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44269 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44270 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44271 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44272 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44273
44274 return 0;
44275 diff --git a/fs/fcntl.c b/fs/fcntl.c
44276 index 75e7c1f..1eb3e4d 100644
44277 --- a/fs/fcntl.c
44278 +++ b/fs/fcntl.c
44279 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44280 if (err)
44281 return err;
44282
44283 + if (gr_handle_chroot_fowner(pid, type))
44284 + return -ENOENT;
44285 + if (gr_check_protected_task_fowner(pid, type))
44286 + return -EACCES;
44287 +
44288 f_modown(filp, pid, type, force);
44289 return 0;
44290 }
44291 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44292
44293 static int f_setown_ex(struct file *filp, unsigned long arg)
44294 {
44295 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44296 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44297 struct f_owner_ex owner;
44298 struct pid *pid;
44299 int type;
44300 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44301
44302 static int f_getown_ex(struct file *filp, unsigned long arg)
44303 {
44304 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44305 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44306 struct f_owner_ex owner;
44307 int ret = 0;
44308
44309 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44310 switch (cmd) {
44311 case F_DUPFD:
44312 case F_DUPFD_CLOEXEC:
44313 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44314 if (arg >= rlimit(RLIMIT_NOFILE))
44315 break;
44316 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44317 diff --git a/fs/fifo.c b/fs/fifo.c
44318 index b1a524d..4ee270e 100644
44319 --- a/fs/fifo.c
44320 +++ b/fs/fifo.c
44321 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44322 */
44323 filp->f_op = &read_pipefifo_fops;
44324 pipe->r_counter++;
44325 - if (pipe->readers++ == 0)
44326 + if (atomic_inc_return(&pipe->readers) == 1)
44327 wake_up_partner(inode);
44328
44329 - if (!pipe->writers) {
44330 + if (!atomic_read(&pipe->writers)) {
44331 if ((filp->f_flags & O_NONBLOCK)) {
44332 /* suppress POLLHUP until we have
44333 * seen a writer */
44334 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44335 * errno=ENXIO when there is no process reading the FIFO.
44336 */
44337 ret = -ENXIO;
44338 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44339 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44340 goto err;
44341
44342 filp->f_op = &write_pipefifo_fops;
44343 pipe->w_counter++;
44344 - if (!pipe->writers++)
44345 + if (atomic_inc_return(&pipe->writers) == 1)
44346 wake_up_partner(inode);
44347
44348 - if (!pipe->readers) {
44349 + if (!atomic_read(&pipe->readers)) {
44350 wait_for_partner(inode, &pipe->r_counter);
44351 if (signal_pending(current))
44352 goto err_wr;
44353 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44354 */
44355 filp->f_op = &rdwr_pipefifo_fops;
44356
44357 - pipe->readers++;
44358 - pipe->writers++;
44359 + atomic_inc(&pipe->readers);
44360 + atomic_inc(&pipe->writers);
44361 pipe->r_counter++;
44362 pipe->w_counter++;
44363 - if (pipe->readers == 1 || pipe->writers == 1)
44364 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44365 wake_up_partner(inode);
44366 break;
44367
44368 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44369 return 0;
44370
44371 err_rd:
44372 - if (!--pipe->readers)
44373 + if (atomic_dec_and_test(&pipe->readers))
44374 wake_up_interruptible(&pipe->wait);
44375 ret = -ERESTARTSYS;
44376 goto err;
44377
44378 err_wr:
44379 - if (!--pipe->writers)
44380 + if (atomic_dec_and_test(&pipe->writers))
44381 wake_up_interruptible(&pipe->wait);
44382 ret = -ERESTARTSYS;
44383 goto err;
44384
44385 err:
44386 - if (!pipe->readers && !pipe->writers)
44387 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44388 free_pipe_info(inode);
44389
44390 err_nocleanup:
44391 diff --git a/fs/file.c b/fs/file.c
44392 index ba3f605..fade102 100644
44393 --- a/fs/file.c
44394 +++ b/fs/file.c
44395 @@ -15,6 +15,7 @@
44396 #include <linux/slab.h>
44397 #include <linux/vmalloc.h>
44398 #include <linux/file.h>
44399 +#include <linux/security.h>
44400 #include <linux/fdtable.h>
44401 #include <linux/bitops.h>
44402 #include <linux/interrupt.h>
44403 @@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
44404 * N.B. For clone tasks sharing a files structure, this test
44405 * will limit the total number of files that can be opened.
44406 */
44407 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44408 if (nr >= rlimit(RLIMIT_NOFILE))
44409 return -EMFILE;
44410
44411 diff --git a/fs/filesystems.c b/fs/filesystems.c
44412 index 96f2428..f5eeb8e 100644
44413 --- a/fs/filesystems.c
44414 +++ b/fs/filesystems.c
44415 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44416 int len = dot ? dot - name : strlen(name);
44417
44418 fs = __get_fs_type(name, len);
44419 +
44420 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44421 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44422 +#else
44423 if (!fs && (request_module("%.*s", len, name) == 0))
44424 +#endif
44425 fs = __get_fs_type(name, len);
44426
44427 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44428 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44429 index e159e68..e7d2a6f 100644
44430 --- a/fs/fs_struct.c
44431 +++ b/fs/fs_struct.c
44432 @@ -4,6 +4,7 @@
44433 #include <linux/path.h>
44434 #include <linux/slab.h>
44435 #include <linux/fs_struct.h>
44436 +#include <linux/grsecurity.h>
44437 #include "internal.h"
44438
44439 static inline void path_get_longterm(struct path *path)
44440 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44441 write_seqcount_begin(&fs->seq);
44442 old_root = fs->root;
44443 fs->root = *path;
44444 + gr_set_chroot_entries(current, path);
44445 write_seqcount_end(&fs->seq);
44446 spin_unlock(&fs->lock);
44447 if (old_root.dentry)
44448 @@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
44449 return 1;
44450 }
44451
44452 +static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
44453 +{
44454 + if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
44455 + return 0;
44456 + *p = *new;
44457 +
44458 + gr_set_chroot_entries(task, new);
44459 +
44460 + return 1;
44461 +}
44462 +
44463 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44464 {
44465 struct task_struct *g, *p;
44466 @@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44467 int hits = 0;
44468 spin_lock(&fs->lock);
44469 write_seqcount_begin(&fs->seq);
44470 - hits += replace_path(&fs->root, old_root, new_root);
44471 + hits += replace_root_path(p, &fs->root, old_root, new_root);
44472 hits += replace_path(&fs->pwd, old_root, new_root);
44473 write_seqcount_end(&fs->seq);
44474 while (hits--) {
44475 @@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
44476 task_lock(tsk);
44477 spin_lock(&fs->lock);
44478 tsk->fs = NULL;
44479 - kill = !--fs->users;
44480 + gr_clear_chroot_entries(tsk);
44481 + kill = !atomic_dec_return(&fs->users);
44482 spin_unlock(&fs->lock);
44483 task_unlock(tsk);
44484 if (kill)
44485 @@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44486 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44487 /* We don't need to lock fs - think why ;-) */
44488 if (fs) {
44489 - fs->users = 1;
44490 + atomic_set(&fs->users, 1);
44491 fs->in_exec = 0;
44492 spin_lock_init(&fs->lock);
44493 seqcount_init(&fs->seq);
44494 @@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44495 spin_lock(&old->lock);
44496 fs->root = old->root;
44497 path_get_longterm(&fs->root);
44498 + /* instead of calling gr_set_chroot_entries here,
44499 + we call it from every caller of this function
44500 + */
44501 fs->pwd = old->pwd;
44502 path_get_longterm(&fs->pwd);
44503 spin_unlock(&old->lock);
44504 @@ -151,8 +168,9 @@ int unshare_fs_struct(void)
44505
44506 task_lock(current);
44507 spin_lock(&fs->lock);
44508 - kill = !--fs->users;
44509 + kill = !atomic_dec_return(&fs->users);
44510 current->fs = new_fs;
44511 + gr_set_chroot_entries(current, &new_fs->root);
44512 spin_unlock(&fs->lock);
44513 task_unlock(current);
44514
44515 @@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44516
44517 int current_umask(void)
44518 {
44519 - return current->fs->umask;
44520 + return current->fs->umask | gr_acl_umask();
44521 }
44522 EXPORT_SYMBOL(current_umask);
44523
44524 /* to be mentioned only in INIT_TASK */
44525 struct fs_struct init_fs = {
44526 - .users = 1,
44527 + .users = ATOMIC_INIT(1),
44528 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44529 .seq = SEQCNT_ZERO,
44530 .umask = 0022,
44531 @@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
44532 task_lock(current);
44533
44534 spin_lock(&init_fs.lock);
44535 - init_fs.users++;
44536 + atomic_inc(&init_fs.users);
44537 spin_unlock(&init_fs.lock);
44538
44539 spin_lock(&fs->lock);
44540 current->fs = &init_fs;
44541 - kill = !--fs->users;
44542 + gr_set_chroot_entries(current, &current->fs->root);
44543 + kill = !atomic_dec_return(&fs->users);
44544 spin_unlock(&fs->lock);
44545
44546 task_unlock(current);
44547 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44548 index 9905350..02eaec4 100644
44549 --- a/fs/fscache/cookie.c
44550 +++ b/fs/fscache/cookie.c
44551 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44552 parent ? (char *) parent->def->name : "<no-parent>",
44553 def->name, netfs_data);
44554
44555 - fscache_stat(&fscache_n_acquires);
44556 + fscache_stat_unchecked(&fscache_n_acquires);
44557
44558 /* if there's no parent cookie, then we don't create one here either */
44559 if (!parent) {
44560 - fscache_stat(&fscache_n_acquires_null);
44561 + fscache_stat_unchecked(&fscache_n_acquires_null);
44562 _leave(" [no parent]");
44563 return NULL;
44564 }
44565 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44566 /* allocate and initialise a cookie */
44567 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44568 if (!cookie) {
44569 - fscache_stat(&fscache_n_acquires_oom);
44570 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44571 _leave(" [ENOMEM]");
44572 return NULL;
44573 }
44574 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44575
44576 switch (cookie->def->type) {
44577 case FSCACHE_COOKIE_TYPE_INDEX:
44578 - fscache_stat(&fscache_n_cookie_index);
44579 + fscache_stat_unchecked(&fscache_n_cookie_index);
44580 break;
44581 case FSCACHE_COOKIE_TYPE_DATAFILE:
44582 - fscache_stat(&fscache_n_cookie_data);
44583 + fscache_stat_unchecked(&fscache_n_cookie_data);
44584 break;
44585 default:
44586 - fscache_stat(&fscache_n_cookie_special);
44587 + fscache_stat_unchecked(&fscache_n_cookie_special);
44588 break;
44589 }
44590
44591 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44592 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44593 atomic_dec(&parent->n_children);
44594 __fscache_cookie_put(cookie);
44595 - fscache_stat(&fscache_n_acquires_nobufs);
44596 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44597 _leave(" = NULL");
44598 return NULL;
44599 }
44600 }
44601
44602 - fscache_stat(&fscache_n_acquires_ok);
44603 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44604 _leave(" = %p", cookie);
44605 return cookie;
44606 }
44607 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44608 cache = fscache_select_cache_for_object(cookie->parent);
44609 if (!cache) {
44610 up_read(&fscache_addremove_sem);
44611 - fscache_stat(&fscache_n_acquires_no_cache);
44612 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44613 _leave(" = -ENOMEDIUM [no cache]");
44614 return -ENOMEDIUM;
44615 }
44616 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44617 object = cache->ops->alloc_object(cache, cookie);
44618 fscache_stat_d(&fscache_n_cop_alloc_object);
44619 if (IS_ERR(object)) {
44620 - fscache_stat(&fscache_n_object_no_alloc);
44621 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44622 ret = PTR_ERR(object);
44623 goto error;
44624 }
44625
44626 - fscache_stat(&fscache_n_object_alloc);
44627 + fscache_stat_unchecked(&fscache_n_object_alloc);
44628
44629 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44630
44631 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44632 struct fscache_object *object;
44633 struct hlist_node *_p;
44634
44635 - fscache_stat(&fscache_n_updates);
44636 + fscache_stat_unchecked(&fscache_n_updates);
44637
44638 if (!cookie) {
44639 - fscache_stat(&fscache_n_updates_null);
44640 + fscache_stat_unchecked(&fscache_n_updates_null);
44641 _leave(" [no cookie]");
44642 return;
44643 }
44644 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44645 struct fscache_object *object;
44646 unsigned long event;
44647
44648 - fscache_stat(&fscache_n_relinquishes);
44649 + fscache_stat_unchecked(&fscache_n_relinquishes);
44650 if (retire)
44651 - fscache_stat(&fscache_n_relinquishes_retire);
44652 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44653
44654 if (!cookie) {
44655 - fscache_stat(&fscache_n_relinquishes_null);
44656 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44657 _leave(" [no cookie]");
44658 return;
44659 }
44660 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44661
44662 /* wait for the cookie to finish being instantiated (or to fail) */
44663 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44664 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44665 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44666 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44667 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44668 }
44669 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44670 index f6aad48..88dcf26 100644
44671 --- a/fs/fscache/internal.h
44672 +++ b/fs/fscache/internal.h
44673 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44674 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44675 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44676
44677 -extern atomic_t fscache_n_op_pend;
44678 -extern atomic_t fscache_n_op_run;
44679 -extern atomic_t fscache_n_op_enqueue;
44680 -extern atomic_t fscache_n_op_deferred_release;
44681 -extern atomic_t fscache_n_op_release;
44682 -extern atomic_t fscache_n_op_gc;
44683 -extern atomic_t fscache_n_op_cancelled;
44684 -extern atomic_t fscache_n_op_rejected;
44685 +extern atomic_unchecked_t fscache_n_op_pend;
44686 +extern atomic_unchecked_t fscache_n_op_run;
44687 +extern atomic_unchecked_t fscache_n_op_enqueue;
44688 +extern atomic_unchecked_t fscache_n_op_deferred_release;
44689 +extern atomic_unchecked_t fscache_n_op_release;
44690 +extern atomic_unchecked_t fscache_n_op_gc;
44691 +extern atomic_unchecked_t fscache_n_op_cancelled;
44692 +extern atomic_unchecked_t fscache_n_op_rejected;
44693
44694 -extern atomic_t fscache_n_attr_changed;
44695 -extern atomic_t fscache_n_attr_changed_ok;
44696 -extern atomic_t fscache_n_attr_changed_nobufs;
44697 -extern atomic_t fscache_n_attr_changed_nomem;
44698 -extern atomic_t fscache_n_attr_changed_calls;
44699 +extern atomic_unchecked_t fscache_n_attr_changed;
44700 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
44701 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44702 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44703 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
44704
44705 -extern atomic_t fscache_n_allocs;
44706 -extern atomic_t fscache_n_allocs_ok;
44707 -extern atomic_t fscache_n_allocs_wait;
44708 -extern atomic_t fscache_n_allocs_nobufs;
44709 -extern atomic_t fscache_n_allocs_intr;
44710 -extern atomic_t fscache_n_allocs_object_dead;
44711 -extern atomic_t fscache_n_alloc_ops;
44712 -extern atomic_t fscache_n_alloc_op_waits;
44713 +extern atomic_unchecked_t fscache_n_allocs;
44714 +extern atomic_unchecked_t fscache_n_allocs_ok;
44715 +extern atomic_unchecked_t fscache_n_allocs_wait;
44716 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
44717 +extern atomic_unchecked_t fscache_n_allocs_intr;
44718 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
44719 +extern atomic_unchecked_t fscache_n_alloc_ops;
44720 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
44721
44722 -extern atomic_t fscache_n_retrievals;
44723 -extern atomic_t fscache_n_retrievals_ok;
44724 -extern atomic_t fscache_n_retrievals_wait;
44725 -extern atomic_t fscache_n_retrievals_nodata;
44726 -extern atomic_t fscache_n_retrievals_nobufs;
44727 -extern atomic_t fscache_n_retrievals_intr;
44728 -extern atomic_t fscache_n_retrievals_nomem;
44729 -extern atomic_t fscache_n_retrievals_object_dead;
44730 -extern atomic_t fscache_n_retrieval_ops;
44731 -extern atomic_t fscache_n_retrieval_op_waits;
44732 +extern atomic_unchecked_t fscache_n_retrievals;
44733 +extern atomic_unchecked_t fscache_n_retrievals_ok;
44734 +extern atomic_unchecked_t fscache_n_retrievals_wait;
44735 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
44736 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44737 +extern atomic_unchecked_t fscache_n_retrievals_intr;
44738 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
44739 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44740 +extern atomic_unchecked_t fscache_n_retrieval_ops;
44741 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44742
44743 -extern atomic_t fscache_n_stores;
44744 -extern atomic_t fscache_n_stores_ok;
44745 -extern atomic_t fscache_n_stores_again;
44746 -extern atomic_t fscache_n_stores_nobufs;
44747 -extern atomic_t fscache_n_stores_oom;
44748 -extern atomic_t fscache_n_store_ops;
44749 -extern atomic_t fscache_n_store_calls;
44750 -extern atomic_t fscache_n_store_pages;
44751 -extern atomic_t fscache_n_store_radix_deletes;
44752 -extern atomic_t fscache_n_store_pages_over_limit;
44753 +extern atomic_unchecked_t fscache_n_stores;
44754 +extern atomic_unchecked_t fscache_n_stores_ok;
44755 +extern atomic_unchecked_t fscache_n_stores_again;
44756 +extern atomic_unchecked_t fscache_n_stores_nobufs;
44757 +extern atomic_unchecked_t fscache_n_stores_oom;
44758 +extern atomic_unchecked_t fscache_n_store_ops;
44759 +extern atomic_unchecked_t fscache_n_store_calls;
44760 +extern atomic_unchecked_t fscache_n_store_pages;
44761 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
44762 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44763
44764 -extern atomic_t fscache_n_store_vmscan_not_storing;
44765 -extern atomic_t fscache_n_store_vmscan_gone;
44766 -extern atomic_t fscache_n_store_vmscan_busy;
44767 -extern atomic_t fscache_n_store_vmscan_cancelled;
44768 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44769 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44770 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44771 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44772
44773 -extern atomic_t fscache_n_marks;
44774 -extern atomic_t fscache_n_uncaches;
44775 +extern atomic_unchecked_t fscache_n_marks;
44776 +extern atomic_unchecked_t fscache_n_uncaches;
44777
44778 -extern atomic_t fscache_n_acquires;
44779 -extern atomic_t fscache_n_acquires_null;
44780 -extern atomic_t fscache_n_acquires_no_cache;
44781 -extern atomic_t fscache_n_acquires_ok;
44782 -extern atomic_t fscache_n_acquires_nobufs;
44783 -extern atomic_t fscache_n_acquires_oom;
44784 +extern atomic_unchecked_t fscache_n_acquires;
44785 +extern atomic_unchecked_t fscache_n_acquires_null;
44786 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
44787 +extern atomic_unchecked_t fscache_n_acquires_ok;
44788 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
44789 +extern atomic_unchecked_t fscache_n_acquires_oom;
44790
44791 -extern atomic_t fscache_n_updates;
44792 -extern atomic_t fscache_n_updates_null;
44793 -extern atomic_t fscache_n_updates_run;
44794 +extern atomic_unchecked_t fscache_n_updates;
44795 +extern atomic_unchecked_t fscache_n_updates_null;
44796 +extern atomic_unchecked_t fscache_n_updates_run;
44797
44798 -extern atomic_t fscache_n_relinquishes;
44799 -extern atomic_t fscache_n_relinquishes_null;
44800 -extern atomic_t fscache_n_relinquishes_waitcrt;
44801 -extern atomic_t fscache_n_relinquishes_retire;
44802 +extern atomic_unchecked_t fscache_n_relinquishes;
44803 +extern atomic_unchecked_t fscache_n_relinquishes_null;
44804 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44805 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
44806
44807 -extern atomic_t fscache_n_cookie_index;
44808 -extern atomic_t fscache_n_cookie_data;
44809 -extern atomic_t fscache_n_cookie_special;
44810 +extern atomic_unchecked_t fscache_n_cookie_index;
44811 +extern atomic_unchecked_t fscache_n_cookie_data;
44812 +extern atomic_unchecked_t fscache_n_cookie_special;
44813
44814 -extern atomic_t fscache_n_object_alloc;
44815 -extern atomic_t fscache_n_object_no_alloc;
44816 -extern atomic_t fscache_n_object_lookups;
44817 -extern atomic_t fscache_n_object_lookups_negative;
44818 -extern atomic_t fscache_n_object_lookups_positive;
44819 -extern atomic_t fscache_n_object_lookups_timed_out;
44820 -extern atomic_t fscache_n_object_created;
44821 -extern atomic_t fscache_n_object_avail;
44822 -extern atomic_t fscache_n_object_dead;
44823 +extern atomic_unchecked_t fscache_n_object_alloc;
44824 +extern atomic_unchecked_t fscache_n_object_no_alloc;
44825 +extern atomic_unchecked_t fscache_n_object_lookups;
44826 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
44827 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
44828 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44829 +extern atomic_unchecked_t fscache_n_object_created;
44830 +extern atomic_unchecked_t fscache_n_object_avail;
44831 +extern atomic_unchecked_t fscache_n_object_dead;
44832
44833 -extern atomic_t fscache_n_checkaux_none;
44834 -extern atomic_t fscache_n_checkaux_okay;
44835 -extern atomic_t fscache_n_checkaux_update;
44836 -extern atomic_t fscache_n_checkaux_obsolete;
44837 +extern atomic_unchecked_t fscache_n_checkaux_none;
44838 +extern atomic_unchecked_t fscache_n_checkaux_okay;
44839 +extern atomic_unchecked_t fscache_n_checkaux_update;
44840 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44841
44842 extern atomic_t fscache_n_cop_alloc_object;
44843 extern atomic_t fscache_n_cop_lookup_object;
44844 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44845 atomic_inc(stat);
44846 }
44847
44848 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44849 +{
44850 + atomic_inc_unchecked(stat);
44851 +}
44852 +
44853 static inline void fscache_stat_d(atomic_t *stat)
44854 {
44855 atomic_dec(stat);
44856 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44857
44858 #define __fscache_stat(stat) (NULL)
44859 #define fscache_stat(stat) do {} while (0)
44860 +#define fscache_stat_unchecked(stat) do {} while (0)
44861 #define fscache_stat_d(stat) do {} while (0)
44862 #endif
44863
44864 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44865 index b6b897c..0ffff9c 100644
44866 --- a/fs/fscache/object.c
44867 +++ b/fs/fscache/object.c
44868 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44869 /* update the object metadata on disk */
44870 case FSCACHE_OBJECT_UPDATING:
44871 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44872 - fscache_stat(&fscache_n_updates_run);
44873 + fscache_stat_unchecked(&fscache_n_updates_run);
44874 fscache_stat(&fscache_n_cop_update_object);
44875 object->cache->ops->update_object(object);
44876 fscache_stat_d(&fscache_n_cop_update_object);
44877 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44878 spin_lock(&object->lock);
44879 object->state = FSCACHE_OBJECT_DEAD;
44880 spin_unlock(&object->lock);
44881 - fscache_stat(&fscache_n_object_dead);
44882 + fscache_stat_unchecked(&fscache_n_object_dead);
44883 goto terminal_transit;
44884
44885 /* handle the parent cache of this object being withdrawn from
44886 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44887 spin_lock(&object->lock);
44888 object->state = FSCACHE_OBJECT_DEAD;
44889 spin_unlock(&object->lock);
44890 - fscache_stat(&fscache_n_object_dead);
44891 + fscache_stat_unchecked(&fscache_n_object_dead);
44892 goto terminal_transit;
44893
44894 /* complain about the object being woken up once it is
44895 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44896 parent->cookie->def->name, cookie->def->name,
44897 object->cache->tag->name);
44898
44899 - fscache_stat(&fscache_n_object_lookups);
44900 + fscache_stat_unchecked(&fscache_n_object_lookups);
44901 fscache_stat(&fscache_n_cop_lookup_object);
44902 ret = object->cache->ops->lookup_object(object);
44903 fscache_stat_d(&fscache_n_cop_lookup_object);
44904 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44905 if (ret == -ETIMEDOUT) {
44906 /* probably stuck behind another object, so move this one to
44907 * the back of the queue */
44908 - fscache_stat(&fscache_n_object_lookups_timed_out);
44909 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44910 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44911 }
44912
44913 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44914
44915 spin_lock(&object->lock);
44916 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44917 - fscache_stat(&fscache_n_object_lookups_negative);
44918 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44919
44920 /* transit here to allow write requests to begin stacking up
44921 * and read requests to begin returning ENODATA */
44922 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
44923 * result, in which case there may be data available */
44924 spin_lock(&object->lock);
44925 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44926 - fscache_stat(&fscache_n_object_lookups_positive);
44927 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
44928
44929 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
44930
44931 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
44932 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44933 } else {
44934 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
44935 - fscache_stat(&fscache_n_object_created);
44936 + fscache_stat_unchecked(&fscache_n_object_created);
44937
44938 object->state = FSCACHE_OBJECT_AVAILABLE;
44939 spin_unlock(&object->lock);
44940 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
44941 fscache_enqueue_dependents(object);
44942
44943 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
44944 - fscache_stat(&fscache_n_object_avail);
44945 + fscache_stat_unchecked(&fscache_n_object_avail);
44946
44947 _leave("");
44948 }
44949 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44950 enum fscache_checkaux result;
44951
44952 if (!object->cookie->def->check_aux) {
44953 - fscache_stat(&fscache_n_checkaux_none);
44954 + fscache_stat_unchecked(&fscache_n_checkaux_none);
44955 return FSCACHE_CHECKAUX_OKAY;
44956 }
44957
44958 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44959 switch (result) {
44960 /* entry okay as is */
44961 case FSCACHE_CHECKAUX_OKAY:
44962 - fscache_stat(&fscache_n_checkaux_okay);
44963 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
44964 break;
44965
44966 /* entry requires update */
44967 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
44968 - fscache_stat(&fscache_n_checkaux_update);
44969 + fscache_stat_unchecked(&fscache_n_checkaux_update);
44970 break;
44971
44972 /* entry requires deletion */
44973 case FSCACHE_CHECKAUX_OBSOLETE:
44974 - fscache_stat(&fscache_n_checkaux_obsolete);
44975 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
44976 break;
44977
44978 default:
44979 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
44980 index 30afdfa..2256596 100644
44981 --- a/fs/fscache/operation.c
44982 +++ b/fs/fscache/operation.c
44983 @@ -17,7 +17,7 @@
44984 #include <linux/slab.h>
44985 #include "internal.h"
44986
44987 -atomic_t fscache_op_debug_id;
44988 +atomic_unchecked_t fscache_op_debug_id;
44989 EXPORT_SYMBOL(fscache_op_debug_id);
44990
44991 /**
44992 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
44993 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
44994 ASSERTCMP(atomic_read(&op->usage), >, 0);
44995
44996 - fscache_stat(&fscache_n_op_enqueue);
44997 + fscache_stat_unchecked(&fscache_n_op_enqueue);
44998 switch (op->flags & FSCACHE_OP_TYPE) {
44999 case FSCACHE_OP_ASYNC:
45000 _debug("queue async");
45001 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45002 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45003 if (op->processor)
45004 fscache_enqueue_operation(op);
45005 - fscache_stat(&fscache_n_op_run);
45006 + fscache_stat_unchecked(&fscache_n_op_run);
45007 }
45008
45009 /*
45010 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45011 if (object->n_ops > 1) {
45012 atomic_inc(&op->usage);
45013 list_add_tail(&op->pend_link, &object->pending_ops);
45014 - fscache_stat(&fscache_n_op_pend);
45015 + fscache_stat_unchecked(&fscache_n_op_pend);
45016 } else if (!list_empty(&object->pending_ops)) {
45017 atomic_inc(&op->usage);
45018 list_add_tail(&op->pend_link, &object->pending_ops);
45019 - fscache_stat(&fscache_n_op_pend);
45020 + fscache_stat_unchecked(&fscache_n_op_pend);
45021 fscache_start_operations(object);
45022 } else {
45023 ASSERTCMP(object->n_in_progress, ==, 0);
45024 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45025 object->n_exclusive++; /* reads and writes must wait */
45026 atomic_inc(&op->usage);
45027 list_add_tail(&op->pend_link, &object->pending_ops);
45028 - fscache_stat(&fscache_n_op_pend);
45029 + fscache_stat_unchecked(&fscache_n_op_pend);
45030 ret = 0;
45031 } else {
45032 /* not allowed to submit ops in any other state */
45033 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45034 if (object->n_exclusive > 0) {
45035 atomic_inc(&op->usage);
45036 list_add_tail(&op->pend_link, &object->pending_ops);
45037 - fscache_stat(&fscache_n_op_pend);
45038 + fscache_stat_unchecked(&fscache_n_op_pend);
45039 } else if (!list_empty(&object->pending_ops)) {
45040 atomic_inc(&op->usage);
45041 list_add_tail(&op->pend_link, &object->pending_ops);
45042 - fscache_stat(&fscache_n_op_pend);
45043 + fscache_stat_unchecked(&fscache_n_op_pend);
45044 fscache_start_operations(object);
45045 } else {
45046 ASSERTCMP(object->n_exclusive, ==, 0);
45047 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45048 object->n_ops++;
45049 atomic_inc(&op->usage);
45050 list_add_tail(&op->pend_link, &object->pending_ops);
45051 - fscache_stat(&fscache_n_op_pend);
45052 + fscache_stat_unchecked(&fscache_n_op_pend);
45053 ret = 0;
45054 } else if (object->state == FSCACHE_OBJECT_DYING ||
45055 object->state == FSCACHE_OBJECT_LC_DYING ||
45056 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45057 - fscache_stat(&fscache_n_op_rejected);
45058 + fscache_stat_unchecked(&fscache_n_op_rejected);
45059 ret = -ENOBUFS;
45060 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45061 fscache_report_unexpected_submission(object, op, ostate);
45062 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45063
45064 ret = -EBUSY;
45065 if (!list_empty(&op->pend_link)) {
45066 - fscache_stat(&fscache_n_op_cancelled);
45067 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45068 list_del_init(&op->pend_link);
45069 object->n_ops--;
45070 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45071 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45072 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45073 BUG();
45074
45075 - fscache_stat(&fscache_n_op_release);
45076 + fscache_stat_unchecked(&fscache_n_op_release);
45077
45078 if (op->release) {
45079 op->release(op);
45080 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45081 * lock, and defer it otherwise */
45082 if (!spin_trylock(&object->lock)) {
45083 _debug("defer put");
45084 - fscache_stat(&fscache_n_op_deferred_release);
45085 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45086
45087 cache = object->cache;
45088 spin_lock(&cache->op_gc_list_lock);
45089 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45090
45091 _debug("GC DEFERRED REL OBJ%x OP%x",
45092 object->debug_id, op->debug_id);
45093 - fscache_stat(&fscache_n_op_gc);
45094 + fscache_stat_unchecked(&fscache_n_op_gc);
45095
45096 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45097
45098 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45099 index 3f7a59b..cf196cc 100644
45100 --- a/fs/fscache/page.c
45101 +++ b/fs/fscache/page.c
45102 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45103 val = radix_tree_lookup(&cookie->stores, page->index);
45104 if (!val) {
45105 rcu_read_unlock();
45106 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45107 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45108 __fscache_uncache_page(cookie, page);
45109 return true;
45110 }
45111 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45112 spin_unlock(&cookie->stores_lock);
45113
45114 if (xpage) {
45115 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45116 - fscache_stat(&fscache_n_store_radix_deletes);
45117 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45118 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45119 ASSERTCMP(xpage, ==, page);
45120 } else {
45121 - fscache_stat(&fscache_n_store_vmscan_gone);
45122 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45123 }
45124
45125 wake_up_bit(&cookie->flags, 0);
45126 @@ -107,7 +107,7 @@ page_busy:
45127 /* we might want to wait here, but that could deadlock the allocator as
45128 * the work threads writing to the cache may all end up sleeping
45129 * on memory allocation */
45130 - fscache_stat(&fscache_n_store_vmscan_busy);
45131 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45132 return false;
45133 }
45134 EXPORT_SYMBOL(__fscache_maybe_release_page);
45135 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45136 FSCACHE_COOKIE_STORING_TAG);
45137 if (!radix_tree_tag_get(&cookie->stores, page->index,
45138 FSCACHE_COOKIE_PENDING_TAG)) {
45139 - fscache_stat(&fscache_n_store_radix_deletes);
45140 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45141 xpage = radix_tree_delete(&cookie->stores, page->index);
45142 }
45143 spin_unlock(&cookie->stores_lock);
45144 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45145
45146 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45147
45148 - fscache_stat(&fscache_n_attr_changed_calls);
45149 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45150
45151 if (fscache_object_is_active(object)) {
45152 fscache_stat(&fscache_n_cop_attr_changed);
45153 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45154
45155 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45156
45157 - fscache_stat(&fscache_n_attr_changed);
45158 + fscache_stat_unchecked(&fscache_n_attr_changed);
45159
45160 op = kzalloc(sizeof(*op), GFP_KERNEL);
45161 if (!op) {
45162 - fscache_stat(&fscache_n_attr_changed_nomem);
45163 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45164 _leave(" = -ENOMEM");
45165 return -ENOMEM;
45166 }
45167 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45168 if (fscache_submit_exclusive_op(object, op) < 0)
45169 goto nobufs;
45170 spin_unlock(&cookie->lock);
45171 - fscache_stat(&fscache_n_attr_changed_ok);
45172 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45173 fscache_put_operation(op);
45174 _leave(" = 0");
45175 return 0;
45176 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45177 nobufs:
45178 spin_unlock(&cookie->lock);
45179 kfree(op);
45180 - fscache_stat(&fscache_n_attr_changed_nobufs);
45181 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45182 _leave(" = %d", -ENOBUFS);
45183 return -ENOBUFS;
45184 }
45185 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45186 /* allocate a retrieval operation and attempt to submit it */
45187 op = kzalloc(sizeof(*op), GFP_NOIO);
45188 if (!op) {
45189 - fscache_stat(&fscache_n_retrievals_nomem);
45190 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45191 return NULL;
45192 }
45193
45194 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45195 return 0;
45196 }
45197
45198 - fscache_stat(&fscache_n_retrievals_wait);
45199 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45200
45201 jif = jiffies;
45202 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45203 fscache_wait_bit_interruptible,
45204 TASK_INTERRUPTIBLE) != 0) {
45205 - fscache_stat(&fscache_n_retrievals_intr);
45206 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45207 _leave(" = -ERESTARTSYS");
45208 return -ERESTARTSYS;
45209 }
45210 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45211 */
45212 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45213 struct fscache_retrieval *op,
45214 - atomic_t *stat_op_waits,
45215 - atomic_t *stat_object_dead)
45216 + atomic_unchecked_t *stat_op_waits,
45217 + atomic_unchecked_t *stat_object_dead)
45218 {
45219 int ret;
45220
45221 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45222 goto check_if_dead;
45223
45224 _debug(">>> WT");
45225 - fscache_stat(stat_op_waits);
45226 + fscache_stat_unchecked(stat_op_waits);
45227 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45228 fscache_wait_bit_interruptible,
45229 TASK_INTERRUPTIBLE) < 0) {
45230 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45231
45232 check_if_dead:
45233 if (unlikely(fscache_object_is_dead(object))) {
45234 - fscache_stat(stat_object_dead);
45235 + fscache_stat_unchecked(stat_object_dead);
45236 return -ENOBUFS;
45237 }
45238 return 0;
45239 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45240
45241 _enter("%p,%p,,,", cookie, page);
45242
45243 - fscache_stat(&fscache_n_retrievals);
45244 + fscache_stat_unchecked(&fscache_n_retrievals);
45245
45246 if (hlist_empty(&cookie->backing_objects))
45247 goto nobufs;
45248 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45249 goto nobufs_unlock;
45250 spin_unlock(&cookie->lock);
45251
45252 - fscache_stat(&fscache_n_retrieval_ops);
45253 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45254
45255 /* pin the netfs read context in case we need to do the actual netfs
45256 * read because we've encountered a cache read failure */
45257 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45258
45259 error:
45260 if (ret == -ENOMEM)
45261 - fscache_stat(&fscache_n_retrievals_nomem);
45262 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45263 else if (ret == -ERESTARTSYS)
45264 - fscache_stat(&fscache_n_retrievals_intr);
45265 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45266 else if (ret == -ENODATA)
45267 - fscache_stat(&fscache_n_retrievals_nodata);
45268 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45269 else if (ret < 0)
45270 - fscache_stat(&fscache_n_retrievals_nobufs);
45271 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45272 else
45273 - fscache_stat(&fscache_n_retrievals_ok);
45274 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45275
45276 fscache_put_retrieval(op);
45277 _leave(" = %d", ret);
45278 @@ -429,7 +429,7 @@ nobufs_unlock:
45279 spin_unlock(&cookie->lock);
45280 kfree(op);
45281 nobufs:
45282 - fscache_stat(&fscache_n_retrievals_nobufs);
45283 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45284 _leave(" = -ENOBUFS");
45285 return -ENOBUFS;
45286 }
45287 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45288
45289 _enter("%p,,%d,,,", cookie, *nr_pages);
45290
45291 - fscache_stat(&fscache_n_retrievals);
45292 + fscache_stat_unchecked(&fscache_n_retrievals);
45293
45294 if (hlist_empty(&cookie->backing_objects))
45295 goto nobufs;
45296 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45297 goto nobufs_unlock;
45298 spin_unlock(&cookie->lock);
45299
45300 - fscache_stat(&fscache_n_retrieval_ops);
45301 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45302
45303 /* pin the netfs read context in case we need to do the actual netfs
45304 * read because we've encountered a cache read failure */
45305 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45306
45307 error:
45308 if (ret == -ENOMEM)
45309 - fscache_stat(&fscache_n_retrievals_nomem);
45310 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45311 else if (ret == -ERESTARTSYS)
45312 - fscache_stat(&fscache_n_retrievals_intr);
45313 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45314 else if (ret == -ENODATA)
45315 - fscache_stat(&fscache_n_retrievals_nodata);
45316 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45317 else if (ret < 0)
45318 - fscache_stat(&fscache_n_retrievals_nobufs);
45319 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45320 else
45321 - fscache_stat(&fscache_n_retrievals_ok);
45322 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45323
45324 fscache_put_retrieval(op);
45325 _leave(" = %d", ret);
45326 @@ -545,7 +545,7 @@ nobufs_unlock:
45327 spin_unlock(&cookie->lock);
45328 kfree(op);
45329 nobufs:
45330 - fscache_stat(&fscache_n_retrievals_nobufs);
45331 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45332 _leave(" = -ENOBUFS");
45333 return -ENOBUFS;
45334 }
45335 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45336
45337 _enter("%p,%p,,,", cookie, page);
45338
45339 - fscache_stat(&fscache_n_allocs);
45340 + fscache_stat_unchecked(&fscache_n_allocs);
45341
45342 if (hlist_empty(&cookie->backing_objects))
45343 goto nobufs;
45344 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45345 goto nobufs_unlock;
45346 spin_unlock(&cookie->lock);
45347
45348 - fscache_stat(&fscache_n_alloc_ops);
45349 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45350
45351 ret = fscache_wait_for_retrieval_activation(
45352 object, op,
45353 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45354
45355 error:
45356 if (ret == -ERESTARTSYS)
45357 - fscache_stat(&fscache_n_allocs_intr);
45358 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45359 else if (ret < 0)
45360 - fscache_stat(&fscache_n_allocs_nobufs);
45361 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45362 else
45363 - fscache_stat(&fscache_n_allocs_ok);
45364 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45365
45366 fscache_put_retrieval(op);
45367 _leave(" = %d", ret);
45368 @@ -625,7 +625,7 @@ nobufs_unlock:
45369 spin_unlock(&cookie->lock);
45370 kfree(op);
45371 nobufs:
45372 - fscache_stat(&fscache_n_allocs_nobufs);
45373 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45374 _leave(" = -ENOBUFS");
45375 return -ENOBUFS;
45376 }
45377 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45378
45379 spin_lock(&cookie->stores_lock);
45380
45381 - fscache_stat(&fscache_n_store_calls);
45382 + fscache_stat_unchecked(&fscache_n_store_calls);
45383
45384 /* find a page to store */
45385 page = NULL;
45386 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45387 page = results[0];
45388 _debug("gang %d [%lx]", n, page->index);
45389 if (page->index > op->store_limit) {
45390 - fscache_stat(&fscache_n_store_pages_over_limit);
45391 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45392 goto superseded;
45393 }
45394
45395 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45396 spin_unlock(&cookie->stores_lock);
45397 spin_unlock(&object->lock);
45398
45399 - fscache_stat(&fscache_n_store_pages);
45400 + fscache_stat_unchecked(&fscache_n_store_pages);
45401 fscache_stat(&fscache_n_cop_write_page);
45402 ret = object->cache->ops->write_page(op, page);
45403 fscache_stat_d(&fscache_n_cop_write_page);
45404 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45405 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45406 ASSERT(PageFsCache(page));
45407
45408 - fscache_stat(&fscache_n_stores);
45409 + fscache_stat_unchecked(&fscache_n_stores);
45410
45411 op = kzalloc(sizeof(*op), GFP_NOIO);
45412 if (!op)
45413 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45414 spin_unlock(&cookie->stores_lock);
45415 spin_unlock(&object->lock);
45416
45417 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45418 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45419 op->store_limit = object->store_limit;
45420
45421 if (fscache_submit_op(object, &op->op) < 0)
45422 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45423
45424 spin_unlock(&cookie->lock);
45425 radix_tree_preload_end();
45426 - fscache_stat(&fscache_n_store_ops);
45427 - fscache_stat(&fscache_n_stores_ok);
45428 + fscache_stat_unchecked(&fscache_n_store_ops);
45429 + fscache_stat_unchecked(&fscache_n_stores_ok);
45430
45431 /* the work queue now carries its own ref on the object */
45432 fscache_put_operation(&op->op);
45433 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45434 return 0;
45435
45436 already_queued:
45437 - fscache_stat(&fscache_n_stores_again);
45438 + fscache_stat_unchecked(&fscache_n_stores_again);
45439 already_pending:
45440 spin_unlock(&cookie->stores_lock);
45441 spin_unlock(&object->lock);
45442 spin_unlock(&cookie->lock);
45443 radix_tree_preload_end();
45444 kfree(op);
45445 - fscache_stat(&fscache_n_stores_ok);
45446 + fscache_stat_unchecked(&fscache_n_stores_ok);
45447 _leave(" = 0");
45448 return 0;
45449
45450 @@ -851,14 +851,14 @@ nobufs:
45451 spin_unlock(&cookie->lock);
45452 radix_tree_preload_end();
45453 kfree(op);
45454 - fscache_stat(&fscache_n_stores_nobufs);
45455 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45456 _leave(" = -ENOBUFS");
45457 return -ENOBUFS;
45458
45459 nomem_free:
45460 kfree(op);
45461 nomem:
45462 - fscache_stat(&fscache_n_stores_oom);
45463 + fscache_stat_unchecked(&fscache_n_stores_oom);
45464 _leave(" = -ENOMEM");
45465 return -ENOMEM;
45466 }
45467 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45468 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45469 ASSERTCMP(page, !=, NULL);
45470
45471 - fscache_stat(&fscache_n_uncaches);
45472 + fscache_stat_unchecked(&fscache_n_uncaches);
45473
45474 /* cache withdrawal may beat us to it */
45475 if (!PageFsCache(page))
45476 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45477 unsigned long loop;
45478
45479 #ifdef CONFIG_FSCACHE_STATS
45480 - atomic_add(pagevec->nr, &fscache_n_marks);
45481 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45482 #endif
45483
45484 for (loop = 0; loop < pagevec->nr; loop++) {
45485 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45486 index 4765190..2a067f2 100644
45487 --- a/fs/fscache/stats.c
45488 +++ b/fs/fscache/stats.c
45489 @@ -18,95 +18,95 @@
45490 /*
45491 * operation counters
45492 */
45493 -atomic_t fscache_n_op_pend;
45494 -atomic_t fscache_n_op_run;
45495 -atomic_t fscache_n_op_enqueue;
45496 -atomic_t fscache_n_op_requeue;
45497 -atomic_t fscache_n_op_deferred_release;
45498 -atomic_t fscache_n_op_release;
45499 -atomic_t fscache_n_op_gc;
45500 -atomic_t fscache_n_op_cancelled;
45501 -atomic_t fscache_n_op_rejected;
45502 +atomic_unchecked_t fscache_n_op_pend;
45503 +atomic_unchecked_t fscache_n_op_run;
45504 +atomic_unchecked_t fscache_n_op_enqueue;
45505 +atomic_unchecked_t fscache_n_op_requeue;
45506 +atomic_unchecked_t fscache_n_op_deferred_release;
45507 +atomic_unchecked_t fscache_n_op_release;
45508 +atomic_unchecked_t fscache_n_op_gc;
45509 +atomic_unchecked_t fscache_n_op_cancelled;
45510 +atomic_unchecked_t fscache_n_op_rejected;
45511
45512 -atomic_t fscache_n_attr_changed;
45513 -atomic_t fscache_n_attr_changed_ok;
45514 -atomic_t fscache_n_attr_changed_nobufs;
45515 -atomic_t fscache_n_attr_changed_nomem;
45516 -atomic_t fscache_n_attr_changed_calls;
45517 +atomic_unchecked_t fscache_n_attr_changed;
45518 +atomic_unchecked_t fscache_n_attr_changed_ok;
45519 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45520 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45521 +atomic_unchecked_t fscache_n_attr_changed_calls;
45522
45523 -atomic_t fscache_n_allocs;
45524 -atomic_t fscache_n_allocs_ok;
45525 -atomic_t fscache_n_allocs_wait;
45526 -atomic_t fscache_n_allocs_nobufs;
45527 -atomic_t fscache_n_allocs_intr;
45528 -atomic_t fscache_n_allocs_object_dead;
45529 -atomic_t fscache_n_alloc_ops;
45530 -atomic_t fscache_n_alloc_op_waits;
45531 +atomic_unchecked_t fscache_n_allocs;
45532 +atomic_unchecked_t fscache_n_allocs_ok;
45533 +atomic_unchecked_t fscache_n_allocs_wait;
45534 +atomic_unchecked_t fscache_n_allocs_nobufs;
45535 +atomic_unchecked_t fscache_n_allocs_intr;
45536 +atomic_unchecked_t fscache_n_allocs_object_dead;
45537 +atomic_unchecked_t fscache_n_alloc_ops;
45538 +atomic_unchecked_t fscache_n_alloc_op_waits;
45539
45540 -atomic_t fscache_n_retrievals;
45541 -atomic_t fscache_n_retrievals_ok;
45542 -atomic_t fscache_n_retrievals_wait;
45543 -atomic_t fscache_n_retrievals_nodata;
45544 -atomic_t fscache_n_retrievals_nobufs;
45545 -atomic_t fscache_n_retrievals_intr;
45546 -atomic_t fscache_n_retrievals_nomem;
45547 -atomic_t fscache_n_retrievals_object_dead;
45548 -atomic_t fscache_n_retrieval_ops;
45549 -atomic_t fscache_n_retrieval_op_waits;
45550 +atomic_unchecked_t fscache_n_retrievals;
45551 +atomic_unchecked_t fscache_n_retrievals_ok;
45552 +atomic_unchecked_t fscache_n_retrievals_wait;
45553 +atomic_unchecked_t fscache_n_retrievals_nodata;
45554 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45555 +atomic_unchecked_t fscache_n_retrievals_intr;
45556 +atomic_unchecked_t fscache_n_retrievals_nomem;
45557 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45558 +atomic_unchecked_t fscache_n_retrieval_ops;
45559 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45560
45561 -atomic_t fscache_n_stores;
45562 -atomic_t fscache_n_stores_ok;
45563 -atomic_t fscache_n_stores_again;
45564 -atomic_t fscache_n_stores_nobufs;
45565 -atomic_t fscache_n_stores_oom;
45566 -atomic_t fscache_n_store_ops;
45567 -atomic_t fscache_n_store_calls;
45568 -atomic_t fscache_n_store_pages;
45569 -atomic_t fscache_n_store_radix_deletes;
45570 -atomic_t fscache_n_store_pages_over_limit;
45571 +atomic_unchecked_t fscache_n_stores;
45572 +atomic_unchecked_t fscache_n_stores_ok;
45573 +atomic_unchecked_t fscache_n_stores_again;
45574 +atomic_unchecked_t fscache_n_stores_nobufs;
45575 +atomic_unchecked_t fscache_n_stores_oom;
45576 +atomic_unchecked_t fscache_n_store_ops;
45577 +atomic_unchecked_t fscache_n_store_calls;
45578 +atomic_unchecked_t fscache_n_store_pages;
45579 +atomic_unchecked_t fscache_n_store_radix_deletes;
45580 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45581
45582 -atomic_t fscache_n_store_vmscan_not_storing;
45583 -atomic_t fscache_n_store_vmscan_gone;
45584 -atomic_t fscache_n_store_vmscan_busy;
45585 -atomic_t fscache_n_store_vmscan_cancelled;
45586 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45587 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45588 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45589 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45590
45591 -atomic_t fscache_n_marks;
45592 -atomic_t fscache_n_uncaches;
45593 +atomic_unchecked_t fscache_n_marks;
45594 +atomic_unchecked_t fscache_n_uncaches;
45595
45596 -atomic_t fscache_n_acquires;
45597 -atomic_t fscache_n_acquires_null;
45598 -atomic_t fscache_n_acquires_no_cache;
45599 -atomic_t fscache_n_acquires_ok;
45600 -atomic_t fscache_n_acquires_nobufs;
45601 -atomic_t fscache_n_acquires_oom;
45602 +atomic_unchecked_t fscache_n_acquires;
45603 +atomic_unchecked_t fscache_n_acquires_null;
45604 +atomic_unchecked_t fscache_n_acquires_no_cache;
45605 +atomic_unchecked_t fscache_n_acquires_ok;
45606 +atomic_unchecked_t fscache_n_acquires_nobufs;
45607 +atomic_unchecked_t fscache_n_acquires_oom;
45608
45609 -atomic_t fscache_n_updates;
45610 -atomic_t fscache_n_updates_null;
45611 -atomic_t fscache_n_updates_run;
45612 +atomic_unchecked_t fscache_n_updates;
45613 +atomic_unchecked_t fscache_n_updates_null;
45614 +atomic_unchecked_t fscache_n_updates_run;
45615
45616 -atomic_t fscache_n_relinquishes;
45617 -atomic_t fscache_n_relinquishes_null;
45618 -atomic_t fscache_n_relinquishes_waitcrt;
45619 -atomic_t fscache_n_relinquishes_retire;
45620 +atomic_unchecked_t fscache_n_relinquishes;
45621 +atomic_unchecked_t fscache_n_relinquishes_null;
45622 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45623 +atomic_unchecked_t fscache_n_relinquishes_retire;
45624
45625 -atomic_t fscache_n_cookie_index;
45626 -atomic_t fscache_n_cookie_data;
45627 -atomic_t fscache_n_cookie_special;
45628 +atomic_unchecked_t fscache_n_cookie_index;
45629 +atomic_unchecked_t fscache_n_cookie_data;
45630 +atomic_unchecked_t fscache_n_cookie_special;
45631
45632 -atomic_t fscache_n_object_alloc;
45633 -atomic_t fscache_n_object_no_alloc;
45634 -atomic_t fscache_n_object_lookups;
45635 -atomic_t fscache_n_object_lookups_negative;
45636 -atomic_t fscache_n_object_lookups_positive;
45637 -atomic_t fscache_n_object_lookups_timed_out;
45638 -atomic_t fscache_n_object_created;
45639 -atomic_t fscache_n_object_avail;
45640 -atomic_t fscache_n_object_dead;
45641 +atomic_unchecked_t fscache_n_object_alloc;
45642 +atomic_unchecked_t fscache_n_object_no_alloc;
45643 +atomic_unchecked_t fscache_n_object_lookups;
45644 +atomic_unchecked_t fscache_n_object_lookups_negative;
45645 +atomic_unchecked_t fscache_n_object_lookups_positive;
45646 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45647 +atomic_unchecked_t fscache_n_object_created;
45648 +atomic_unchecked_t fscache_n_object_avail;
45649 +atomic_unchecked_t fscache_n_object_dead;
45650
45651 -atomic_t fscache_n_checkaux_none;
45652 -atomic_t fscache_n_checkaux_okay;
45653 -atomic_t fscache_n_checkaux_update;
45654 -atomic_t fscache_n_checkaux_obsolete;
45655 +atomic_unchecked_t fscache_n_checkaux_none;
45656 +atomic_unchecked_t fscache_n_checkaux_okay;
45657 +atomic_unchecked_t fscache_n_checkaux_update;
45658 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45659
45660 atomic_t fscache_n_cop_alloc_object;
45661 atomic_t fscache_n_cop_lookup_object;
45662 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45663 seq_puts(m, "FS-Cache statistics\n");
45664
45665 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45666 - atomic_read(&fscache_n_cookie_index),
45667 - atomic_read(&fscache_n_cookie_data),
45668 - atomic_read(&fscache_n_cookie_special));
45669 + atomic_read_unchecked(&fscache_n_cookie_index),
45670 + atomic_read_unchecked(&fscache_n_cookie_data),
45671 + atomic_read_unchecked(&fscache_n_cookie_special));
45672
45673 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45674 - atomic_read(&fscache_n_object_alloc),
45675 - atomic_read(&fscache_n_object_no_alloc),
45676 - atomic_read(&fscache_n_object_avail),
45677 - atomic_read(&fscache_n_object_dead));
45678 + atomic_read_unchecked(&fscache_n_object_alloc),
45679 + atomic_read_unchecked(&fscache_n_object_no_alloc),
45680 + atomic_read_unchecked(&fscache_n_object_avail),
45681 + atomic_read_unchecked(&fscache_n_object_dead));
45682 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45683 - atomic_read(&fscache_n_checkaux_none),
45684 - atomic_read(&fscache_n_checkaux_okay),
45685 - atomic_read(&fscache_n_checkaux_update),
45686 - atomic_read(&fscache_n_checkaux_obsolete));
45687 + atomic_read_unchecked(&fscache_n_checkaux_none),
45688 + atomic_read_unchecked(&fscache_n_checkaux_okay),
45689 + atomic_read_unchecked(&fscache_n_checkaux_update),
45690 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45691
45692 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45693 - atomic_read(&fscache_n_marks),
45694 - atomic_read(&fscache_n_uncaches));
45695 + atomic_read_unchecked(&fscache_n_marks),
45696 + atomic_read_unchecked(&fscache_n_uncaches));
45697
45698 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45699 " oom=%u\n",
45700 - atomic_read(&fscache_n_acquires),
45701 - atomic_read(&fscache_n_acquires_null),
45702 - atomic_read(&fscache_n_acquires_no_cache),
45703 - atomic_read(&fscache_n_acquires_ok),
45704 - atomic_read(&fscache_n_acquires_nobufs),
45705 - atomic_read(&fscache_n_acquires_oom));
45706 + atomic_read_unchecked(&fscache_n_acquires),
45707 + atomic_read_unchecked(&fscache_n_acquires_null),
45708 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
45709 + atomic_read_unchecked(&fscache_n_acquires_ok),
45710 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
45711 + atomic_read_unchecked(&fscache_n_acquires_oom));
45712
45713 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45714 - atomic_read(&fscache_n_object_lookups),
45715 - atomic_read(&fscache_n_object_lookups_negative),
45716 - atomic_read(&fscache_n_object_lookups_positive),
45717 - atomic_read(&fscache_n_object_created),
45718 - atomic_read(&fscache_n_object_lookups_timed_out));
45719 + atomic_read_unchecked(&fscache_n_object_lookups),
45720 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
45721 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
45722 + atomic_read_unchecked(&fscache_n_object_created),
45723 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45724
45725 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45726 - atomic_read(&fscache_n_updates),
45727 - atomic_read(&fscache_n_updates_null),
45728 - atomic_read(&fscache_n_updates_run));
45729 + atomic_read_unchecked(&fscache_n_updates),
45730 + atomic_read_unchecked(&fscache_n_updates_null),
45731 + atomic_read_unchecked(&fscache_n_updates_run));
45732
45733 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45734 - atomic_read(&fscache_n_relinquishes),
45735 - atomic_read(&fscache_n_relinquishes_null),
45736 - atomic_read(&fscache_n_relinquishes_waitcrt),
45737 - atomic_read(&fscache_n_relinquishes_retire));
45738 + atomic_read_unchecked(&fscache_n_relinquishes),
45739 + atomic_read_unchecked(&fscache_n_relinquishes_null),
45740 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45741 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
45742
45743 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45744 - atomic_read(&fscache_n_attr_changed),
45745 - atomic_read(&fscache_n_attr_changed_ok),
45746 - atomic_read(&fscache_n_attr_changed_nobufs),
45747 - atomic_read(&fscache_n_attr_changed_nomem),
45748 - atomic_read(&fscache_n_attr_changed_calls));
45749 + atomic_read_unchecked(&fscache_n_attr_changed),
45750 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
45751 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45752 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45753 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
45754
45755 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45756 - atomic_read(&fscache_n_allocs),
45757 - atomic_read(&fscache_n_allocs_ok),
45758 - atomic_read(&fscache_n_allocs_wait),
45759 - atomic_read(&fscache_n_allocs_nobufs),
45760 - atomic_read(&fscache_n_allocs_intr));
45761 + atomic_read_unchecked(&fscache_n_allocs),
45762 + atomic_read_unchecked(&fscache_n_allocs_ok),
45763 + atomic_read_unchecked(&fscache_n_allocs_wait),
45764 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
45765 + atomic_read_unchecked(&fscache_n_allocs_intr));
45766 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45767 - atomic_read(&fscache_n_alloc_ops),
45768 - atomic_read(&fscache_n_alloc_op_waits),
45769 - atomic_read(&fscache_n_allocs_object_dead));
45770 + atomic_read_unchecked(&fscache_n_alloc_ops),
45771 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
45772 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
45773
45774 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45775 " int=%u oom=%u\n",
45776 - atomic_read(&fscache_n_retrievals),
45777 - atomic_read(&fscache_n_retrievals_ok),
45778 - atomic_read(&fscache_n_retrievals_wait),
45779 - atomic_read(&fscache_n_retrievals_nodata),
45780 - atomic_read(&fscache_n_retrievals_nobufs),
45781 - atomic_read(&fscache_n_retrievals_intr),
45782 - atomic_read(&fscache_n_retrievals_nomem));
45783 + atomic_read_unchecked(&fscache_n_retrievals),
45784 + atomic_read_unchecked(&fscache_n_retrievals_ok),
45785 + atomic_read_unchecked(&fscache_n_retrievals_wait),
45786 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
45787 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45788 + atomic_read_unchecked(&fscache_n_retrievals_intr),
45789 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
45790 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45791 - atomic_read(&fscache_n_retrieval_ops),
45792 - atomic_read(&fscache_n_retrieval_op_waits),
45793 - atomic_read(&fscache_n_retrievals_object_dead));
45794 + atomic_read_unchecked(&fscache_n_retrieval_ops),
45795 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45796 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45797
45798 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45799 - atomic_read(&fscache_n_stores),
45800 - atomic_read(&fscache_n_stores_ok),
45801 - atomic_read(&fscache_n_stores_again),
45802 - atomic_read(&fscache_n_stores_nobufs),
45803 - atomic_read(&fscache_n_stores_oom));
45804 + atomic_read_unchecked(&fscache_n_stores),
45805 + atomic_read_unchecked(&fscache_n_stores_ok),
45806 + atomic_read_unchecked(&fscache_n_stores_again),
45807 + atomic_read_unchecked(&fscache_n_stores_nobufs),
45808 + atomic_read_unchecked(&fscache_n_stores_oom));
45809 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45810 - atomic_read(&fscache_n_store_ops),
45811 - atomic_read(&fscache_n_store_calls),
45812 - atomic_read(&fscache_n_store_pages),
45813 - atomic_read(&fscache_n_store_radix_deletes),
45814 - atomic_read(&fscache_n_store_pages_over_limit));
45815 + atomic_read_unchecked(&fscache_n_store_ops),
45816 + atomic_read_unchecked(&fscache_n_store_calls),
45817 + atomic_read_unchecked(&fscache_n_store_pages),
45818 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
45819 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45820
45821 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45822 - atomic_read(&fscache_n_store_vmscan_not_storing),
45823 - atomic_read(&fscache_n_store_vmscan_gone),
45824 - atomic_read(&fscache_n_store_vmscan_busy),
45825 - atomic_read(&fscache_n_store_vmscan_cancelled));
45826 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45827 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45828 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45829 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45830
45831 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45832 - atomic_read(&fscache_n_op_pend),
45833 - atomic_read(&fscache_n_op_run),
45834 - atomic_read(&fscache_n_op_enqueue),
45835 - atomic_read(&fscache_n_op_cancelled),
45836 - atomic_read(&fscache_n_op_rejected));
45837 + atomic_read_unchecked(&fscache_n_op_pend),
45838 + atomic_read_unchecked(&fscache_n_op_run),
45839 + atomic_read_unchecked(&fscache_n_op_enqueue),
45840 + atomic_read_unchecked(&fscache_n_op_cancelled),
45841 + atomic_read_unchecked(&fscache_n_op_rejected));
45842 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45843 - atomic_read(&fscache_n_op_deferred_release),
45844 - atomic_read(&fscache_n_op_release),
45845 - atomic_read(&fscache_n_op_gc));
45846 + atomic_read_unchecked(&fscache_n_op_deferred_release),
45847 + atomic_read_unchecked(&fscache_n_op_release),
45848 + atomic_read_unchecked(&fscache_n_op_gc));
45849
45850 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45851 atomic_read(&fscache_n_cop_alloc_object),
45852 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45853 index 3426521..3b75162 100644
45854 --- a/fs/fuse/cuse.c
45855 +++ b/fs/fuse/cuse.c
45856 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
45857 INIT_LIST_HEAD(&cuse_conntbl[i]);
45858
45859 /* inherit and extend fuse_dev_operations */
45860 - cuse_channel_fops = fuse_dev_operations;
45861 - cuse_channel_fops.owner = THIS_MODULE;
45862 - cuse_channel_fops.open = cuse_channel_open;
45863 - cuse_channel_fops.release = cuse_channel_release;
45864 + pax_open_kernel();
45865 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45866 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45867 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
45868 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
45869 + pax_close_kernel();
45870
45871 cuse_class = class_create(THIS_MODULE, "cuse");
45872 if (IS_ERR(cuse_class))
45873 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45874 index 7df2b5e..5804aa7 100644
45875 --- a/fs/fuse/dev.c
45876 +++ b/fs/fuse/dev.c
45877 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45878 ret = 0;
45879 pipe_lock(pipe);
45880
45881 - if (!pipe->readers) {
45882 + if (!atomic_read(&pipe->readers)) {
45883 send_sig(SIGPIPE, current, 0);
45884 if (!ret)
45885 ret = -EPIPE;
45886 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45887 index df5ac04..08cee2a 100644
45888 --- a/fs/fuse/dir.c
45889 +++ b/fs/fuse/dir.c
45890 @@ -1180,7 +1180,7 @@ static char *read_link(struct dentry *dentry)
45891 return link;
45892 }
45893
45894 -static void free_link(char *link)
45895 +static void free_link(const char *link)
45896 {
45897 if (!IS_ERR(link))
45898 free_page((unsigned long) link);
45899 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45900 index a9ba244..d9df391 100644
45901 --- a/fs/gfs2/inode.c
45902 +++ b/fs/gfs2/inode.c
45903 @@ -1496,7 +1496,7 @@ out:
45904
45905 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45906 {
45907 - char *s = nd_get_link(nd);
45908 + const char *s = nd_get_link(nd);
45909 if (!IS_ERR(s))
45910 kfree(s);
45911 }
45912 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
45913 index 001ef01..f7d5f07 100644
45914 --- a/fs/hugetlbfs/inode.c
45915 +++ b/fs/hugetlbfs/inode.c
45916 @@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
45917 .kill_sb = kill_litter_super,
45918 };
45919
45920 -static struct vfsmount *hugetlbfs_vfsmount;
45921 +struct vfsmount *hugetlbfs_vfsmount;
45922
45923 static int can_do_hugetlb_shm(void)
45924 {
45925 diff --git a/fs/inode.c b/fs/inode.c
45926 index 9f4f5fe..6214688 100644
45927 --- a/fs/inode.c
45928 +++ b/fs/inode.c
45929 @@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
45930
45931 #ifdef CONFIG_SMP
45932 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
45933 - static atomic_t shared_last_ino;
45934 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
45935 + static atomic_unchecked_t shared_last_ino;
45936 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
45937
45938 res = next - LAST_INO_BATCH;
45939 }
45940 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
45941 index 4a6cf28..d3a29d3 100644
45942 --- a/fs/jffs2/erase.c
45943 +++ b/fs/jffs2/erase.c
45944 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
45945 struct jffs2_unknown_node marker = {
45946 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
45947 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45948 - .totlen = cpu_to_je32(c->cleanmarker_size)
45949 + .totlen = cpu_to_je32(c->cleanmarker_size),
45950 + .hdr_crc = cpu_to_je32(0)
45951 };
45952
45953 jffs2_prealloc_raw_node_refs(c, jeb, 1);
45954 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
45955 index 74d9be1..d5dd140 100644
45956 --- a/fs/jffs2/wbuf.c
45957 +++ b/fs/jffs2/wbuf.c
45958 @@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
45959 {
45960 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
45961 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45962 - .totlen = constant_cpu_to_je32(8)
45963 + .totlen = constant_cpu_to_je32(8),
45964 + .hdr_crc = constant_cpu_to_je32(0)
45965 };
45966
45967 /*
45968 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
45969 index 4a82950..bcaa0cb 100644
45970 --- a/fs/jfs/super.c
45971 +++ b/fs/jfs/super.c
45972 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
45973
45974 jfs_inode_cachep =
45975 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
45976 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
45977 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
45978 init_once);
45979 if (jfs_inode_cachep == NULL)
45980 return -ENOMEM;
45981 diff --git a/fs/libfs.c b/fs/libfs.c
45982 index 18d08f5..fe3dc64 100644
45983 --- a/fs/libfs.c
45984 +++ b/fs/libfs.c
45985 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45986
45987 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
45988 struct dentry *next;
45989 + char d_name[sizeof(next->d_iname)];
45990 + const unsigned char *name;
45991 +
45992 next = list_entry(p, struct dentry, d_u.d_child);
45993 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
45994 if (!simple_positive(next)) {
45995 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45996
45997 spin_unlock(&next->d_lock);
45998 spin_unlock(&dentry->d_lock);
45999 - if (filldir(dirent, next->d_name.name,
46000 + name = next->d_name.name;
46001 + if (name == next->d_iname) {
46002 + memcpy(d_name, name, next->d_name.len);
46003 + name = d_name;
46004 + }
46005 + if (filldir(dirent, name,
46006 next->d_name.len, filp->f_pos,
46007 next->d_inode->i_ino,
46008 dt_type(next->d_inode)) < 0)
46009 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46010 index 8392cb8..80d6193 100644
46011 --- a/fs/lockd/clntproc.c
46012 +++ b/fs/lockd/clntproc.c
46013 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46014 /*
46015 * Cookie counter for NLM requests
46016 */
46017 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46018 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46019
46020 void nlmclnt_next_cookie(struct nlm_cookie *c)
46021 {
46022 - u32 cookie = atomic_inc_return(&nlm_cookie);
46023 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46024
46025 memcpy(c->data, &cookie, 4);
46026 c->len=4;
46027 diff --git a/fs/locks.c b/fs/locks.c
46028 index 0d68f1f..f216b79 100644
46029 --- a/fs/locks.c
46030 +++ b/fs/locks.c
46031 @@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46032 return;
46033
46034 if (filp->f_op && filp->f_op->flock) {
46035 - struct file_lock fl = {
46036 + struct file_lock flock = {
46037 .fl_pid = current->tgid,
46038 .fl_file = filp,
46039 .fl_flags = FL_FLOCK,
46040 .fl_type = F_UNLCK,
46041 .fl_end = OFFSET_MAX,
46042 };
46043 - filp->f_op->flock(filp, F_SETLKW, &fl);
46044 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46045 - fl.fl_ops->fl_release_private(&fl);
46046 + filp->f_op->flock(filp, F_SETLKW, &flock);
46047 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46048 + flock.fl_ops->fl_release_private(&flock);
46049 }
46050
46051 lock_flocks();
46052 diff --git a/fs/namei.c b/fs/namei.c
46053 index c427919..e37fd3f 100644
46054 --- a/fs/namei.c
46055 +++ b/fs/namei.c
46056 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46057 if (ret != -EACCES)
46058 return ret;
46059
46060 +#ifdef CONFIG_GRKERNSEC
46061 + /* we'll block if we have to log due to a denied capability use */
46062 + if (mask & MAY_NOT_BLOCK)
46063 + return -ECHILD;
46064 +#endif
46065 +
46066 if (S_ISDIR(inode->i_mode)) {
46067 /* DACs are overridable for directories */
46068 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46069 - return 0;
46070 if (!(mask & MAY_WRITE))
46071 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46072 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46073 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46074 return 0;
46075 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46076 + return 0;
46077 return -EACCES;
46078 }
46079 /*
46080 + * Searching includes executable on directories, else just read.
46081 + */
46082 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46083 + if (mask == MAY_READ)
46084 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46085 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46086 + return 0;
46087 +
46088 + /*
46089 * Read/write DACs are always overridable.
46090 * Executable DACs are overridable when there is
46091 * at least one exec bit set.
46092 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46093 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46094 return 0;
46095
46096 - /*
46097 - * Searching includes executable on directories, else just read.
46098 - */
46099 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46100 - if (mask == MAY_READ)
46101 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46102 - return 0;
46103 -
46104 return -EACCES;
46105 }
46106
46107 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46108 return error;
46109 }
46110
46111 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46112 + dentry->d_inode, dentry, nd->path.mnt)) {
46113 + error = -EACCES;
46114 + *p = ERR_PTR(error); /* no ->put_link(), please */
46115 + path_put(&nd->path);
46116 + return error;
46117 + }
46118 +
46119 nd->last_type = LAST_BIND;
46120 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46121 error = PTR_ERR(*p);
46122 if (!IS_ERR(*p)) {
46123 - char *s = nd_get_link(nd);
46124 + const char *s = nd_get_link(nd);
46125 error = 0;
46126 if (s)
46127 error = __vfs_follow_link(nd, s);
46128 @@ -1753,6 +1769,21 @@ static int path_lookupat(int dfd, const char *name,
46129 if (!err)
46130 err = complete_walk(nd);
46131
46132 + if (!(nd->flags & LOOKUP_PARENT)) {
46133 +#ifdef CONFIG_GRKERNSEC
46134 + if (flags & LOOKUP_RCU) {
46135 + if (!err)
46136 + path_put(&nd->path);
46137 + err = -ECHILD;
46138 + } else
46139 +#endif
46140 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46141 + if (!err)
46142 + path_put(&nd->path);
46143 + err = -ENOENT;
46144 + }
46145 + }
46146 +
46147 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46148 if (!nd->inode->i_op->lookup) {
46149 path_put(&nd->path);
46150 @@ -1780,6 +1811,15 @@ static int do_path_lookup(int dfd, const char *name,
46151 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46152
46153 if (likely(!retval)) {
46154 + if (*name != '/' && nd->path.dentry && nd->inode) {
46155 +#ifdef CONFIG_GRKERNSEC
46156 + if (flags & LOOKUP_RCU)
46157 + return -ECHILD;
46158 +#endif
46159 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46160 + return -ENOENT;
46161 + }
46162 +
46163 if (unlikely(!audit_dummy_context())) {
46164 if (nd->path.dentry && nd->inode)
46165 audit_inode(name, nd->path.dentry);
46166 @@ -2126,6 +2166,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46167 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46168 return -EPERM;
46169
46170 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46171 + return -EPERM;
46172 + if (gr_handle_rawio(inode))
46173 + return -EPERM;
46174 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46175 + return -EACCES;
46176 +
46177 return 0;
46178 }
46179
46180 @@ -2187,6 +2234,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46181 error = complete_walk(nd);
46182 if (error)
46183 return ERR_PTR(error);
46184 +#ifdef CONFIG_GRKERNSEC
46185 + if (nd->flags & LOOKUP_RCU) {
46186 + error = -ECHILD;
46187 + goto exit;
46188 + }
46189 +#endif
46190 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46191 + error = -ENOENT;
46192 + goto exit;
46193 + }
46194 audit_inode(pathname, nd->path.dentry);
46195 if (open_flag & O_CREAT) {
46196 error = -EISDIR;
46197 @@ -2197,6 +2254,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46198 error = complete_walk(nd);
46199 if (error)
46200 return ERR_PTR(error);
46201 +#ifdef CONFIG_GRKERNSEC
46202 + if (nd->flags & LOOKUP_RCU) {
46203 + error = -ECHILD;
46204 + goto exit;
46205 + }
46206 +#endif
46207 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46208 + error = -ENOENT;
46209 + goto exit;
46210 + }
46211 audit_inode(pathname, dir);
46212 goto ok;
46213 }
46214 @@ -2218,6 +2285,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46215 error = complete_walk(nd);
46216 if (error)
46217 return ERR_PTR(error);
46218 +#ifdef CONFIG_GRKERNSEC
46219 + if (nd->flags & LOOKUP_RCU) {
46220 + error = -ECHILD;
46221 + goto exit;
46222 + }
46223 +#endif
46224 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46225 + error = -ENOENT;
46226 + goto exit;
46227 + }
46228
46229 error = -ENOTDIR;
46230 if (nd->flags & LOOKUP_DIRECTORY) {
46231 @@ -2258,6 +2335,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46232 /* Negative dentry, just create the file */
46233 if (!dentry->d_inode) {
46234 umode_t mode = op->mode;
46235 +
46236 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46237 + error = -EACCES;
46238 + goto exit_mutex_unlock;
46239 + }
46240 +
46241 if (!IS_POSIXACL(dir->d_inode))
46242 mode &= ~current_umask();
46243 /*
46244 @@ -2281,6 +2364,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46245 error = vfs_create(dir->d_inode, dentry, mode, nd);
46246 if (error)
46247 goto exit_mutex_unlock;
46248 + else
46249 + gr_handle_create(path->dentry, path->mnt);
46250 mutex_unlock(&dir->d_inode->i_mutex);
46251 dput(nd->path.dentry);
46252 nd->path.dentry = dentry;
46253 @@ -2290,6 +2375,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46254 /*
46255 * It already exists.
46256 */
46257 +
46258 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46259 + error = -ENOENT;
46260 + goto exit_mutex_unlock;
46261 + }
46262 +
46263 + /* only check if O_CREAT is specified, all other checks need to go
46264 + into may_open */
46265 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46266 + error = -EACCES;
46267 + goto exit_mutex_unlock;
46268 + }
46269 +
46270 mutex_unlock(&dir->d_inode->i_mutex);
46271 audit_inode(pathname, path->dentry);
46272
46273 @@ -2502,6 +2600,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46274 *path = nd.path;
46275 return dentry;
46276 eexist:
46277 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46278 + dput(dentry);
46279 + dentry = ERR_PTR(-ENOENT);
46280 + goto fail;
46281 + }
46282 dput(dentry);
46283 dentry = ERR_PTR(-EEXIST);
46284 fail:
46285 @@ -2524,6 +2627,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46286 }
46287 EXPORT_SYMBOL(user_path_create);
46288
46289 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46290 +{
46291 + char *tmp = getname(pathname);
46292 + struct dentry *res;
46293 + if (IS_ERR(tmp))
46294 + return ERR_CAST(tmp);
46295 + res = kern_path_create(dfd, tmp, path, is_dir);
46296 + if (IS_ERR(res))
46297 + putname(tmp);
46298 + else
46299 + *to = tmp;
46300 + return res;
46301 +}
46302 +
46303 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46304 {
46305 int error = may_create(dir, dentry);
46306 @@ -2591,6 +2708,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46307 error = mnt_want_write(path.mnt);
46308 if (error)
46309 goto out_dput;
46310 +
46311 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46312 + error = -EPERM;
46313 + goto out_drop_write;
46314 + }
46315 +
46316 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46317 + error = -EACCES;
46318 + goto out_drop_write;
46319 + }
46320 +
46321 error = security_path_mknod(&path, dentry, mode, dev);
46322 if (error)
46323 goto out_drop_write;
46324 @@ -2608,6 +2736,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46325 }
46326 out_drop_write:
46327 mnt_drop_write(path.mnt);
46328 +
46329 + if (!error)
46330 + gr_handle_create(dentry, path.mnt);
46331 out_dput:
46332 dput(dentry);
46333 mutex_unlock(&path.dentry->d_inode->i_mutex);
46334 @@ -2661,12 +2792,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46335 error = mnt_want_write(path.mnt);
46336 if (error)
46337 goto out_dput;
46338 +
46339 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46340 + error = -EACCES;
46341 + goto out_drop_write;
46342 + }
46343 +
46344 error = security_path_mkdir(&path, dentry, mode);
46345 if (error)
46346 goto out_drop_write;
46347 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46348 out_drop_write:
46349 mnt_drop_write(path.mnt);
46350 +
46351 + if (!error)
46352 + gr_handle_create(dentry, path.mnt);
46353 out_dput:
46354 dput(dentry);
46355 mutex_unlock(&path.dentry->d_inode->i_mutex);
46356 @@ -2746,6 +2886,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46357 char * name;
46358 struct dentry *dentry;
46359 struct nameidata nd;
46360 + ino_t saved_ino = 0;
46361 + dev_t saved_dev = 0;
46362
46363 error = user_path_parent(dfd, pathname, &nd, &name);
46364 if (error)
46365 @@ -2774,6 +2916,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46366 error = -ENOENT;
46367 goto exit3;
46368 }
46369 +
46370 + saved_ino = dentry->d_inode->i_ino;
46371 + saved_dev = gr_get_dev_from_dentry(dentry);
46372 +
46373 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46374 + error = -EACCES;
46375 + goto exit3;
46376 + }
46377 +
46378 error = mnt_want_write(nd.path.mnt);
46379 if (error)
46380 goto exit3;
46381 @@ -2781,6 +2932,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46382 if (error)
46383 goto exit4;
46384 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46385 + if (!error && (saved_dev || saved_ino))
46386 + gr_handle_delete(saved_ino, saved_dev);
46387 exit4:
46388 mnt_drop_write(nd.path.mnt);
46389 exit3:
46390 @@ -2843,6 +2996,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46391 struct dentry *dentry;
46392 struct nameidata nd;
46393 struct inode *inode = NULL;
46394 + ino_t saved_ino = 0;
46395 + dev_t saved_dev = 0;
46396
46397 error = user_path_parent(dfd, pathname, &nd, &name);
46398 if (error)
46399 @@ -2865,6 +3020,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46400 if (!inode)
46401 goto slashes;
46402 ihold(inode);
46403 +
46404 + if (inode->i_nlink <= 1) {
46405 + saved_ino = inode->i_ino;
46406 + saved_dev = gr_get_dev_from_dentry(dentry);
46407 + }
46408 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46409 + error = -EACCES;
46410 + goto exit2;
46411 + }
46412 +
46413 error = mnt_want_write(nd.path.mnt);
46414 if (error)
46415 goto exit2;
46416 @@ -2872,6 +3037,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46417 if (error)
46418 goto exit3;
46419 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46420 + if (!error && (saved_ino || saved_dev))
46421 + gr_handle_delete(saved_ino, saved_dev);
46422 exit3:
46423 mnt_drop_write(nd.path.mnt);
46424 exit2:
46425 @@ -2947,10 +3114,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46426 error = mnt_want_write(path.mnt);
46427 if (error)
46428 goto out_dput;
46429 +
46430 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46431 + error = -EACCES;
46432 + goto out_drop_write;
46433 + }
46434 +
46435 error = security_path_symlink(&path, dentry, from);
46436 if (error)
46437 goto out_drop_write;
46438 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46439 + if (!error)
46440 + gr_handle_create(dentry, path.mnt);
46441 out_drop_write:
46442 mnt_drop_write(path.mnt);
46443 out_dput:
46444 @@ -3025,6 +3200,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46445 {
46446 struct dentry *new_dentry;
46447 struct path old_path, new_path;
46448 + char *to = NULL;
46449 int how = 0;
46450 int error;
46451
46452 @@ -3048,7 +3224,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46453 if (error)
46454 return error;
46455
46456 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46457 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46458 error = PTR_ERR(new_dentry);
46459 if (IS_ERR(new_dentry))
46460 goto out;
46461 @@ -3059,13 +3235,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46462 error = mnt_want_write(new_path.mnt);
46463 if (error)
46464 goto out_dput;
46465 +
46466 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46467 + old_path.dentry->d_inode,
46468 + old_path.dentry->d_inode->i_mode, to)) {
46469 + error = -EACCES;
46470 + goto out_drop_write;
46471 + }
46472 +
46473 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46474 + old_path.dentry, old_path.mnt, to)) {
46475 + error = -EACCES;
46476 + goto out_drop_write;
46477 + }
46478 +
46479 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46480 if (error)
46481 goto out_drop_write;
46482 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46483 + if (!error)
46484 + gr_handle_create(new_dentry, new_path.mnt);
46485 out_drop_write:
46486 mnt_drop_write(new_path.mnt);
46487 out_dput:
46488 + putname(to);
46489 dput(new_dentry);
46490 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46491 path_put(&new_path);
46492 @@ -3299,6 +3492,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46493 if (new_dentry == trap)
46494 goto exit5;
46495
46496 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46497 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46498 + to);
46499 + if (error)
46500 + goto exit5;
46501 +
46502 error = mnt_want_write(oldnd.path.mnt);
46503 if (error)
46504 goto exit5;
46505 @@ -3308,6 +3507,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46506 goto exit6;
46507 error = vfs_rename(old_dir->d_inode, old_dentry,
46508 new_dir->d_inode, new_dentry);
46509 + if (!error)
46510 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46511 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46512 exit6:
46513 mnt_drop_write(oldnd.path.mnt);
46514 exit5:
46515 @@ -3333,6 +3535,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46516
46517 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46518 {
46519 + char tmpbuf[64];
46520 + const char *newlink;
46521 int len;
46522
46523 len = PTR_ERR(link);
46524 @@ -3342,7 +3546,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46525 len = strlen(link);
46526 if (len > (unsigned) buflen)
46527 len = buflen;
46528 - if (copy_to_user(buffer, link, len))
46529 +
46530 + if (len < sizeof(tmpbuf)) {
46531 + memcpy(tmpbuf, link, len);
46532 + newlink = tmpbuf;
46533 + } else
46534 + newlink = link;
46535 +
46536 + if (copy_to_user(buffer, newlink, len))
46537 len = -EFAULT;
46538 out:
46539 return len;
46540 diff --git a/fs/namespace.c b/fs/namespace.c
46541 index 4e46539..b28253c 100644
46542 --- a/fs/namespace.c
46543 +++ b/fs/namespace.c
46544 @@ -1156,6 +1156,9 @@ static int do_umount(struct mount *mnt, int flags)
46545 if (!(sb->s_flags & MS_RDONLY))
46546 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46547 up_write(&sb->s_umount);
46548 +
46549 + gr_log_remount(mnt->mnt_devname, retval);
46550 +
46551 return retval;
46552 }
46553
46554 @@ -1175,6 +1178,9 @@ static int do_umount(struct mount *mnt, int flags)
46555 br_write_unlock(vfsmount_lock);
46556 up_write(&namespace_sem);
46557 release_mounts(&umount_list);
46558 +
46559 + gr_log_unmount(mnt->mnt_devname, retval);
46560 +
46561 return retval;
46562 }
46563
46564 @@ -2176,6 +2182,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46565 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46566 MS_STRICTATIME);
46567
46568 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46569 + retval = -EPERM;
46570 + goto dput_out;
46571 + }
46572 +
46573 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46574 + retval = -EPERM;
46575 + goto dput_out;
46576 + }
46577 +
46578 if (flags & MS_REMOUNT)
46579 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46580 data_page);
46581 @@ -2190,6 +2206,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46582 dev_name, data_page);
46583 dput_out:
46584 path_put(&path);
46585 +
46586 + gr_log_mount(dev_name, dir_name, retval);
46587 +
46588 return retval;
46589 }
46590
46591 @@ -2471,6 +2490,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46592 if (error)
46593 goto out2;
46594
46595 + if (gr_handle_chroot_pivot()) {
46596 + error = -EPERM;
46597 + goto out2;
46598 + }
46599 +
46600 get_fs_root(current->fs, &root);
46601 error = lock_mount(&old);
46602 if (error)
46603 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46604 index e8bbfa5..864f936 100644
46605 --- a/fs/nfs/inode.c
46606 +++ b/fs/nfs/inode.c
46607 @@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46608 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46609 nfsi->attrtimeo_timestamp = jiffies;
46610
46611 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46612 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46613 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46614 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46615 else
46616 @@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46617 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46618 }
46619
46620 -static atomic_long_t nfs_attr_generation_counter;
46621 +static atomic_long_unchecked_t nfs_attr_generation_counter;
46622
46623 static unsigned long nfs_read_attr_generation_counter(void)
46624 {
46625 - return atomic_long_read(&nfs_attr_generation_counter);
46626 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46627 }
46628
46629 unsigned long nfs_inc_attr_generation_counter(void)
46630 {
46631 - return atomic_long_inc_return(&nfs_attr_generation_counter);
46632 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46633 }
46634
46635 void nfs_fattr_init(struct nfs_fattr *fattr)
46636 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46637 index 5686661..80a9a3a 100644
46638 --- a/fs/nfsd/vfs.c
46639 +++ b/fs/nfsd/vfs.c
46640 @@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46641 } else {
46642 oldfs = get_fs();
46643 set_fs(KERNEL_DS);
46644 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46645 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46646 set_fs(oldfs);
46647 }
46648
46649 @@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46650
46651 /* Write the data. */
46652 oldfs = get_fs(); set_fs(KERNEL_DS);
46653 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46654 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46655 set_fs(oldfs);
46656 if (host_err < 0)
46657 goto out_nfserr;
46658 @@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46659 */
46660
46661 oldfs = get_fs(); set_fs(KERNEL_DS);
46662 - host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
46663 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
46664 set_fs(oldfs);
46665
46666 if (host_err < 0)
46667 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46668 index 3568c8a..e0240d8 100644
46669 --- a/fs/notify/fanotify/fanotify_user.c
46670 +++ b/fs/notify/fanotify/fanotify_user.c
46671 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46672 goto out_close_fd;
46673
46674 ret = -EFAULT;
46675 - if (copy_to_user(buf, &fanotify_event_metadata,
46676 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46677 + copy_to_user(buf, &fanotify_event_metadata,
46678 fanotify_event_metadata.event_len))
46679 goto out_kill_access_response;
46680
46681 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46682 index c887b13..0fdf472 100644
46683 --- a/fs/notify/notification.c
46684 +++ b/fs/notify/notification.c
46685 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46686 * get set to 0 so it will never get 'freed'
46687 */
46688 static struct fsnotify_event *q_overflow_event;
46689 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46690 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46691
46692 /**
46693 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46694 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46695 */
46696 u32 fsnotify_get_cookie(void)
46697 {
46698 - return atomic_inc_return(&fsnotify_sync_cookie);
46699 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46700 }
46701 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46702
46703 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46704 index 99e3610..02c1068 100644
46705 --- a/fs/ntfs/dir.c
46706 +++ b/fs/ntfs/dir.c
46707 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
46708 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46709 ~(s64)(ndir->itype.index.block_size - 1)));
46710 /* Bounds checks. */
46711 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46712 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46713 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46714 "inode 0x%lx or driver bug.", vdir->i_ino);
46715 goto err_out;
46716 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46717 index 8639169..76697aa 100644
46718 --- a/fs/ntfs/file.c
46719 +++ b/fs/ntfs/file.c
46720 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46721 #endif /* NTFS_RW */
46722 };
46723
46724 -const struct file_operations ntfs_empty_file_ops = {};
46725 +const struct file_operations ntfs_empty_file_ops __read_only;
46726
46727 -const struct inode_operations ntfs_empty_inode_ops = {};
46728 +const struct inode_operations ntfs_empty_inode_ops __read_only;
46729 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46730 index 210c352..a174f83 100644
46731 --- a/fs/ocfs2/localalloc.c
46732 +++ b/fs/ocfs2/localalloc.c
46733 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46734 goto bail;
46735 }
46736
46737 - atomic_inc(&osb->alloc_stats.moves);
46738 + atomic_inc_unchecked(&osb->alloc_stats.moves);
46739
46740 bail:
46741 if (handle)
46742 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46743 index d355e6e..578d905 100644
46744 --- a/fs/ocfs2/ocfs2.h
46745 +++ b/fs/ocfs2/ocfs2.h
46746 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
46747
46748 struct ocfs2_alloc_stats
46749 {
46750 - atomic_t moves;
46751 - atomic_t local_data;
46752 - atomic_t bitmap_data;
46753 - atomic_t bg_allocs;
46754 - atomic_t bg_extends;
46755 + atomic_unchecked_t moves;
46756 + atomic_unchecked_t local_data;
46757 + atomic_unchecked_t bitmap_data;
46758 + atomic_unchecked_t bg_allocs;
46759 + atomic_unchecked_t bg_extends;
46760 };
46761
46762 enum ocfs2_local_alloc_state
46763 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46764 index f169da4..9112253 100644
46765 --- a/fs/ocfs2/suballoc.c
46766 +++ b/fs/ocfs2/suballoc.c
46767 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46768 mlog_errno(status);
46769 goto bail;
46770 }
46771 - atomic_inc(&osb->alloc_stats.bg_extends);
46772 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46773
46774 /* You should never ask for this much metadata */
46775 BUG_ON(bits_wanted >
46776 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46777 mlog_errno(status);
46778 goto bail;
46779 }
46780 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46781 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46782
46783 *suballoc_loc = res.sr_bg_blkno;
46784 *suballoc_bit_start = res.sr_bit_offset;
46785 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46786 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46787 res->sr_bits);
46788
46789 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46790 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46791
46792 BUG_ON(res->sr_bits != 1);
46793
46794 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46795 mlog_errno(status);
46796 goto bail;
46797 }
46798 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46799 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46800
46801 BUG_ON(res.sr_bits != 1);
46802
46803 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46804 cluster_start,
46805 num_clusters);
46806 if (!status)
46807 - atomic_inc(&osb->alloc_stats.local_data);
46808 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
46809 } else {
46810 if (min_clusters > (osb->bitmap_cpg - 1)) {
46811 /* The only paths asking for contiguousness
46812 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46813 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46814 res.sr_bg_blkno,
46815 res.sr_bit_offset);
46816 - atomic_inc(&osb->alloc_stats.bitmap_data);
46817 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46818 *num_clusters = res.sr_bits;
46819 }
46820 }
46821 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46822 index 68f4541..89cfe6a 100644
46823 --- a/fs/ocfs2/super.c
46824 +++ b/fs/ocfs2/super.c
46825 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46826 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46827 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46828 "Stats",
46829 - atomic_read(&osb->alloc_stats.bitmap_data),
46830 - atomic_read(&osb->alloc_stats.local_data),
46831 - atomic_read(&osb->alloc_stats.bg_allocs),
46832 - atomic_read(&osb->alloc_stats.moves),
46833 - atomic_read(&osb->alloc_stats.bg_extends));
46834 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46835 + atomic_read_unchecked(&osb->alloc_stats.local_data),
46836 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46837 + atomic_read_unchecked(&osb->alloc_stats.moves),
46838 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46839
46840 out += snprintf(buf + out, len - out,
46841 "%10s => State: %u Descriptor: %llu Size: %u bits "
46842 @@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46843 spin_lock_init(&osb->osb_xattr_lock);
46844 ocfs2_init_steal_slots(osb);
46845
46846 - atomic_set(&osb->alloc_stats.moves, 0);
46847 - atomic_set(&osb->alloc_stats.local_data, 0);
46848 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
46849 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
46850 - atomic_set(&osb->alloc_stats.bg_extends, 0);
46851 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
46852 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
46853 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
46854 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
46855 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
46856
46857 /* Copy the blockcheck stats from the superblock probe */
46858 osb->osb_ecc_stats = *stats;
46859 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
46860 index 5d22872..523db20 100644
46861 --- a/fs/ocfs2/symlink.c
46862 +++ b/fs/ocfs2/symlink.c
46863 @@ -142,7 +142,7 @@ bail:
46864
46865 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46866 {
46867 - char *link = nd_get_link(nd);
46868 + const char *link = nd_get_link(nd);
46869 if (!IS_ERR(link))
46870 kfree(link);
46871 }
46872 diff --git a/fs/open.c b/fs/open.c
46873 index 5720854..ccfe124 100644
46874 --- a/fs/open.c
46875 +++ b/fs/open.c
46876 @@ -31,6 +31,8 @@
46877 #include <linux/ima.h>
46878 #include <linux/dnotify.h>
46879
46880 +#define CREATE_TRACE_POINTS
46881 +#include <trace/events/fs.h>
46882 #include "internal.h"
46883
46884 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
46885 @@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
46886 error = locks_verify_truncate(inode, NULL, length);
46887 if (!error)
46888 error = security_path_truncate(&path);
46889 +
46890 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46891 + error = -EACCES;
46892 +
46893 if (!error)
46894 error = do_truncate(path.dentry, length, 0, NULL);
46895
46896 @@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
46897 if (__mnt_is_readonly(path.mnt))
46898 res = -EROFS;
46899
46900 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46901 + res = -EACCES;
46902 +
46903 out_path_release:
46904 path_put(&path);
46905 out:
46906 @@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
46907 if (error)
46908 goto dput_and_out;
46909
46910 + gr_log_chdir(path.dentry, path.mnt);
46911 +
46912 set_fs_pwd(current->fs, &path);
46913
46914 dput_and_out:
46915 @@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
46916 goto out_putf;
46917
46918 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46919 +
46920 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
46921 + error = -EPERM;
46922 +
46923 + if (!error)
46924 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
46925 +
46926 if (!error)
46927 set_fs_pwd(current->fs, &file->f_path);
46928 out_putf:
46929 @@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
46930 if (error)
46931 goto dput_and_out;
46932
46933 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
46934 + goto dput_and_out;
46935 +
46936 set_fs_root(current->fs, &path);
46937 +
46938 + gr_handle_chroot_chdir(&path);
46939 +
46940 error = 0;
46941 dput_and_out:
46942 path_put(&path);
46943 @@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
46944 if (error)
46945 return error;
46946 mutex_lock(&inode->i_mutex);
46947 +
46948 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
46949 + error = -EACCES;
46950 + goto out_unlock;
46951 + }
46952 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
46953 + error = -EACCES;
46954 + goto out_unlock;
46955 + }
46956 +
46957 error = security_path_chmod(path, mode);
46958 if (error)
46959 goto out_unlock;
46960 @@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
46961 int error;
46962 struct iattr newattrs;
46963
46964 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
46965 + return -EACCES;
46966 +
46967 newattrs.ia_valid = ATTR_CTIME;
46968 if (user != (uid_t) -1) {
46969 newattrs.ia_valid |= ATTR_UID;
46970 @@ -987,6 +1024,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
46971 } else {
46972 fsnotify_open(f);
46973 fd_install(fd, f);
46974 + trace_do_sys_open(tmp, flags, mode);
46975 }
46976 }
46977 putname(tmp);
46978 diff --git a/fs/pipe.c b/fs/pipe.c
46979 index fec5e4a..f4210f9 100644
46980 --- a/fs/pipe.c
46981 +++ b/fs/pipe.c
46982 @@ -438,9 +438,9 @@ redo:
46983 }
46984 if (bufs) /* More to do? */
46985 continue;
46986 - if (!pipe->writers)
46987 + if (!atomic_read(&pipe->writers))
46988 break;
46989 - if (!pipe->waiting_writers) {
46990 + if (!atomic_read(&pipe->waiting_writers)) {
46991 /* syscall merging: Usually we must not sleep
46992 * if O_NONBLOCK is set, or if we got some data.
46993 * But if a writer sleeps in kernel space, then
46994 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
46995 mutex_lock(&inode->i_mutex);
46996 pipe = inode->i_pipe;
46997
46998 - if (!pipe->readers) {
46999 + if (!atomic_read(&pipe->readers)) {
47000 send_sig(SIGPIPE, current, 0);
47001 ret = -EPIPE;
47002 goto out;
47003 @@ -553,7 +553,7 @@ redo1:
47004 for (;;) {
47005 int bufs;
47006
47007 - if (!pipe->readers) {
47008 + if (!atomic_read(&pipe->readers)) {
47009 send_sig(SIGPIPE, current, 0);
47010 if (!ret)
47011 ret = -EPIPE;
47012 @@ -644,9 +644,9 @@ redo2:
47013 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47014 do_wakeup = 0;
47015 }
47016 - pipe->waiting_writers++;
47017 + atomic_inc(&pipe->waiting_writers);
47018 pipe_wait(pipe);
47019 - pipe->waiting_writers--;
47020 + atomic_dec(&pipe->waiting_writers);
47021 }
47022 out:
47023 mutex_unlock(&inode->i_mutex);
47024 @@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47025 mask = 0;
47026 if (filp->f_mode & FMODE_READ) {
47027 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47028 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47029 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47030 mask |= POLLHUP;
47031 }
47032
47033 @@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47034 * Most Unices do not set POLLERR for FIFOs but on Linux they
47035 * behave exactly like pipes for poll().
47036 */
47037 - if (!pipe->readers)
47038 + if (!atomic_read(&pipe->readers))
47039 mask |= POLLERR;
47040 }
47041
47042 @@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47043
47044 mutex_lock(&inode->i_mutex);
47045 pipe = inode->i_pipe;
47046 - pipe->readers -= decr;
47047 - pipe->writers -= decw;
47048 + atomic_sub(decr, &pipe->readers);
47049 + atomic_sub(decw, &pipe->writers);
47050
47051 - if (!pipe->readers && !pipe->writers) {
47052 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47053 free_pipe_info(inode);
47054 } else {
47055 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47056 @@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47057
47058 if (inode->i_pipe) {
47059 ret = 0;
47060 - inode->i_pipe->readers++;
47061 + atomic_inc(&inode->i_pipe->readers);
47062 }
47063
47064 mutex_unlock(&inode->i_mutex);
47065 @@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47066
47067 if (inode->i_pipe) {
47068 ret = 0;
47069 - inode->i_pipe->writers++;
47070 + atomic_inc(&inode->i_pipe->writers);
47071 }
47072
47073 mutex_unlock(&inode->i_mutex);
47074 @@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47075 if (inode->i_pipe) {
47076 ret = 0;
47077 if (filp->f_mode & FMODE_READ)
47078 - inode->i_pipe->readers++;
47079 + atomic_inc(&inode->i_pipe->readers);
47080 if (filp->f_mode & FMODE_WRITE)
47081 - inode->i_pipe->writers++;
47082 + atomic_inc(&inode->i_pipe->writers);
47083 }
47084
47085 mutex_unlock(&inode->i_mutex);
47086 @@ -959,7 +959,7 @@ void free_pipe_info(struct inode *inode)
47087 inode->i_pipe = NULL;
47088 }
47089
47090 -static struct vfsmount *pipe_mnt __read_mostly;
47091 +struct vfsmount *pipe_mnt __read_mostly;
47092
47093 /*
47094 * pipefs_dname() is called from d_path().
47095 @@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(void)
47096 goto fail_iput;
47097 inode->i_pipe = pipe;
47098
47099 - pipe->readers = pipe->writers = 1;
47100 + atomic_set(&pipe->readers, 1);
47101 + atomic_set(&pipe->writers, 1);
47102 inode->i_fop = &rdwr_pipefifo_fops;
47103
47104 /*
47105 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47106 index 15af622..0e9f4467 100644
47107 --- a/fs/proc/Kconfig
47108 +++ b/fs/proc/Kconfig
47109 @@ -30,12 +30,12 @@ config PROC_FS
47110
47111 config PROC_KCORE
47112 bool "/proc/kcore support" if !ARM
47113 - depends on PROC_FS && MMU
47114 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47115
47116 config PROC_VMCORE
47117 bool "/proc/vmcore support"
47118 - depends on PROC_FS && CRASH_DUMP
47119 - default y
47120 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47121 + default n
47122 help
47123 Exports the dump image of crashed kernel in ELF format.
47124
47125 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47126 limited in memory.
47127
47128 config PROC_PAGE_MONITOR
47129 - default y
47130 - depends on PROC_FS && MMU
47131 + default n
47132 + depends on PROC_FS && MMU && !GRKERNSEC
47133 bool "Enable /proc page monitoring" if EXPERT
47134 help
47135 Various /proc files exist to monitor process memory utilization:
47136 diff --git a/fs/proc/array.c b/fs/proc/array.c
47137 index f9bd395..acb7847 100644
47138 --- a/fs/proc/array.c
47139 +++ b/fs/proc/array.c
47140 @@ -60,6 +60,7 @@
47141 #include <linux/tty.h>
47142 #include <linux/string.h>
47143 #include <linux/mman.h>
47144 +#include <linux/grsecurity.h>
47145 #include <linux/proc_fs.h>
47146 #include <linux/ioport.h>
47147 #include <linux/uaccess.h>
47148 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47149 seq_putc(m, '\n');
47150 }
47151
47152 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47153 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47154 +{
47155 + if (p->mm)
47156 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47157 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47158 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47159 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47160 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47161 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47162 + else
47163 + seq_printf(m, "PaX:\t-----\n");
47164 +}
47165 +#endif
47166 +
47167 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47168 struct pid *pid, struct task_struct *task)
47169 {
47170 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47171 task_cpus_allowed(m, task);
47172 cpuset_task_status_allowed(m, task);
47173 task_context_switch_counts(m, task);
47174 +
47175 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47176 + task_pax(m, task);
47177 +#endif
47178 +
47179 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47180 + task_grsec_rbac(m, task);
47181 +#endif
47182 +
47183 return 0;
47184 }
47185
47186 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47187 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47188 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47189 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47190 +#endif
47191 +
47192 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47193 struct pid *pid, struct task_struct *task, int whole)
47194 {
47195 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47196 char tcomm[sizeof(task->comm)];
47197 unsigned long flags;
47198
47199 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47200 + if (current->exec_id != m->exec_id) {
47201 + gr_log_badprocpid("stat");
47202 + return 0;
47203 + }
47204 +#endif
47205 +
47206 state = *get_task_state(task);
47207 vsize = eip = esp = 0;
47208 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47209 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47210 gtime = task->gtime;
47211 }
47212
47213 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47214 + if (PAX_RAND_FLAGS(mm)) {
47215 + eip = 0;
47216 + esp = 0;
47217 + wchan = 0;
47218 + }
47219 +#endif
47220 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47221 + wchan = 0;
47222 + eip =0;
47223 + esp =0;
47224 +#endif
47225 +
47226 /* scale priority and nice values from timeslices to -20..20 */
47227 /* to make it look like a "normal" Unix priority/nice value */
47228 priority = task_prio(task);
47229 @@ -485,9 +536,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47230 seq_put_decimal_ull(m, ' ', vsize);
47231 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
47232 seq_put_decimal_ull(m, ' ', rsslim);
47233 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47234 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
47235 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
47236 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
47237 +#else
47238 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
47239 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
47240 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
47241 +#endif
47242 seq_put_decimal_ull(m, ' ', esp);
47243 seq_put_decimal_ull(m, ' ', eip);
47244 /* The signal information here is obsolete.
47245 @@ -508,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47246 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
47247 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
47248 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
47249 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47250 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_data : 0));
47251 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->end_data : 0));
47252 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_brk : 0));
47253 +#else
47254 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
47255 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
47256 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
47257 +#endif
47258 seq_putc(m, '\n');
47259 if (mm)
47260 mmput(mm);
47261 @@ -533,8 +596,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47262 struct pid *pid, struct task_struct *task)
47263 {
47264 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47265 - struct mm_struct *mm = get_task_mm(task);
47266 + struct mm_struct *mm;
47267
47268 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47269 + if (current->exec_id != m->exec_id) {
47270 + gr_log_badprocpid("statm");
47271 + return 0;
47272 + }
47273 +#endif
47274 + mm = get_task_mm(task);
47275 if (mm) {
47276 size = task_statm(mm, &shared, &text, &data, &resident);
47277 mmput(mm);
47278 @@ -556,3 +626,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47279
47280 return 0;
47281 }
47282 +
47283 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47284 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47285 +{
47286 + u32 curr_ip = 0;
47287 + unsigned long flags;
47288 +
47289 + if (lock_task_sighand(task, &flags)) {
47290 + curr_ip = task->signal->curr_ip;
47291 + unlock_task_sighand(task, &flags);
47292 + }
47293 +
47294 + return sprintf(buffer, "%pI4\n", &curr_ip);
47295 +}
47296 +#endif
47297 diff --git a/fs/proc/base.c b/fs/proc/base.c
47298 index 9fc77b4..04761b8 100644
47299 --- a/fs/proc/base.c
47300 +++ b/fs/proc/base.c
47301 @@ -109,6 +109,14 @@ struct pid_entry {
47302 union proc_op op;
47303 };
47304
47305 +struct getdents_callback {
47306 + struct linux_dirent __user * current_dir;
47307 + struct linux_dirent __user * previous;
47308 + struct file * file;
47309 + int count;
47310 + int error;
47311 +};
47312 +
47313 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47314 .name = (NAME), \
47315 .len = sizeof(NAME) - 1, \
47316 @@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47317 if (!mm->arg_end)
47318 goto out_mm; /* Shh! No looking before we're done */
47319
47320 + if (gr_acl_handle_procpidmem(task))
47321 + goto out_mm;
47322 +
47323 len = mm->arg_end - mm->arg_start;
47324
47325 if (len > PAGE_SIZE)
47326 @@ -240,12 +251,28 @@ out:
47327 return res;
47328 }
47329
47330 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47331 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47332 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47333 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47334 +#endif
47335 +
47336 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47337 {
47338 struct mm_struct *mm = mm_for_maps(task);
47339 int res = PTR_ERR(mm);
47340 if (mm && !IS_ERR(mm)) {
47341 unsigned int nwords = 0;
47342 +
47343 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47344 + /* allow if we're currently ptracing this task */
47345 + if (PAX_RAND_FLAGS(mm) &&
47346 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47347 + mmput(mm);
47348 + return 0;
47349 + }
47350 +#endif
47351 +
47352 do {
47353 nwords += 2;
47354 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47355 @@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47356 }
47357
47358
47359 -#ifdef CONFIG_KALLSYMS
47360 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47361 /*
47362 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47363 * Returns the resolved symbol. If that fails, simply return the address.
47364 @@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
47365 mutex_unlock(&task->signal->cred_guard_mutex);
47366 }
47367
47368 -#ifdef CONFIG_STACKTRACE
47369 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47370
47371 #define MAX_STACK_TRACE_DEPTH 64
47372
47373 @@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47374 return count;
47375 }
47376
47377 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47378 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47379 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47380 {
47381 long nr;
47382 @@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47383 /************************************************************************/
47384
47385 /* permission checks */
47386 -static int proc_fd_access_allowed(struct inode *inode)
47387 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47388 {
47389 struct task_struct *task;
47390 int allowed = 0;
47391 @@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47392 */
47393 task = get_proc_task(inode);
47394 if (task) {
47395 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47396 + if (log)
47397 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47398 + else
47399 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47400 put_task_struct(task);
47401 }
47402 return allowed;
47403 @@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47404 struct task_struct *task,
47405 int hide_pid_min)
47406 {
47407 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47408 + return false;
47409 +
47410 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47411 + rcu_read_lock();
47412 + {
47413 + const struct cred *tmpcred = current_cred();
47414 + const struct cred *cred = __task_cred(task);
47415 +
47416 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47417 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47418 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47419 +#endif
47420 + ) {
47421 + rcu_read_unlock();
47422 + return true;
47423 + }
47424 + }
47425 + rcu_read_unlock();
47426 +
47427 + if (!pid->hide_pid)
47428 + return false;
47429 +#endif
47430 +
47431 if (pid->hide_pid < hide_pid_min)
47432 return true;
47433 if (in_group_p(pid->pid_gid))
47434 return true;
47435 +
47436 return ptrace_may_access(task, PTRACE_MODE_READ);
47437 }
47438
47439 @@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47440 put_task_struct(task);
47441
47442 if (!has_perms) {
47443 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47444 + {
47445 +#else
47446 if (pid->hide_pid == 2) {
47447 +#endif
47448 /*
47449 * Let's make getdents(), stat(), and open()
47450 * consistent with each other. If a process
47451 @@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
47452 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47453 file->private_data = mm;
47454
47455 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47456 + file->f_version = current->exec_id;
47457 +#endif
47458 +
47459 return 0;
47460 }
47461
47462 @@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47463 ssize_t copied;
47464 char *page;
47465
47466 +#ifdef CONFIG_GRKERNSEC
47467 + if (write)
47468 + return -EPERM;
47469 +#endif
47470 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47471 + if (file->f_version != current->exec_id) {
47472 + gr_log_badprocpid("mem");
47473 + return 0;
47474 + }
47475 +#endif
47476 +
47477 if (!mm)
47478 return 0;
47479
47480 @@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47481 if (!task)
47482 goto out_no_task;
47483
47484 + if (gr_acl_handle_procpidmem(task))
47485 + goto out;
47486 +
47487 ret = -ENOMEM;
47488 page = (char *)__get_free_page(GFP_TEMPORARY);
47489 if (!page)
47490 @@ -1433,7 +1510,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47491 path_put(&nd->path);
47492
47493 /* Are we allowed to snoop on the tasks file descriptors? */
47494 - if (!proc_fd_access_allowed(inode))
47495 + if (!proc_fd_access_allowed(inode, 0))
47496 goto out;
47497
47498 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
47499 @@ -1472,8 +1549,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47500 struct path path;
47501
47502 /* Are we allowed to snoop on the tasks file descriptors? */
47503 - if (!proc_fd_access_allowed(inode))
47504 - goto out;
47505 + /* logging this is needed for learning on chromium to work properly,
47506 + but we don't want to flood the logs from 'ps' which does a readlink
47507 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47508 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
47509 + */
47510 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47511 + if (!proc_fd_access_allowed(inode,0))
47512 + goto out;
47513 + } else {
47514 + if (!proc_fd_access_allowed(inode,1))
47515 + goto out;
47516 + }
47517
47518 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
47519 if (error)
47520 @@ -1538,7 +1625,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47521 rcu_read_lock();
47522 cred = __task_cred(task);
47523 inode->i_uid = cred->euid;
47524 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47525 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47526 +#else
47527 inode->i_gid = cred->egid;
47528 +#endif
47529 rcu_read_unlock();
47530 }
47531 security_task_to_inode(task, inode);
47532 @@ -1574,10 +1665,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47533 return -ENOENT;
47534 }
47535 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47536 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47537 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47538 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47539 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47540 +#endif
47541 task_dumpable(task)) {
47542 cred = __task_cred(task);
47543 stat->uid = cred->euid;
47544 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47545 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47546 +#else
47547 stat->gid = cred->egid;
47548 +#endif
47549 }
47550 }
47551 rcu_read_unlock();
47552 @@ -1615,11 +1715,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47553
47554 if (task) {
47555 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47556 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47557 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47558 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47559 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47560 +#endif
47561 task_dumpable(task)) {
47562 rcu_read_lock();
47563 cred = __task_cred(task);
47564 inode->i_uid = cred->euid;
47565 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47566 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47567 +#else
47568 inode->i_gid = cred->egid;
47569 +#endif
47570 rcu_read_unlock();
47571 } else {
47572 inode->i_uid = 0;
47573 @@ -1737,7 +1846,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47574 int fd = proc_fd(inode);
47575
47576 if (task) {
47577 - files = get_files_struct(task);
47578 + if (!gr_acl_handle_procpidmem(task))
47579 + files = get_files_struct(task);
47580 put_task_struct(task);
47581 }
47582 if (files) {
47583 @@ -2338,11 +2448,21 @@ static const struct file_operations proc_map_files_operations = {
47584 */
47585 static int proc_fd_permission(struct inode *inode, int mask)
47586 {
47587 + struct task_struct *task;
47588 int rv = generic_permission(inode, mask);
47589 - if (rv == 0)
47590 - return 0;
47591 +
47592 if (task_pid(current) == proc_pid(inode))
47593 rv = 0;
47594 +
47595 + task = get_proc_task(inode);
47596 + if (task == NULL)
47597 + return rv;
47598 +
47599 + if (gr_acl_handle_procpidmem(task))
47600 + rv = -EACCES;
47601 +
47602 + put_task_struct(task);
47603 +
47604 return rv;
47605 }
47606
47607 @@ -2452,6 +2572,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47608 if (!task)
47609 goto out_no_task;
47610
47611 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47612 + goto out;
47613 +
47614 /*
47615 * Yes, it does not scale. And it should not. Don't add
47616 * new entries into /proc/<tgid>/ without very good reasons.
47617 @@ -2496,6 +2619,9 @@ static int proc_pident_readdir(struct file *filp,
47618 if (!task)
47619 goto out_no_task;
47620
47621 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47622 + goto out;
47623 +
47624 ret = 0;
47625 i = filp->f_pos;
47626 switch (i) {
47627 @@ -2766,7 +2892,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47628 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47629 void *cookie)
47630 {
47631 - char *s = nd_get_link(nd);
47632 + const char *s = nd_get_link(nd);
47633 if (!IS_ERR(s))
47634 __putname(s);
47635 }
47636 @@ -2967,7 +3093,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47637 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47638 #endif
47639 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47640 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47641 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47642 INF("syscall", S_IRUGO, proc_pid_syscall),
47643 #endif
47644 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47645 @@ -2992,10 +3118,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47646 #ifdef CONFIG_SECURITY
47647 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47648 #endif
47649 -#ifdef CONFIG_KALLSYMS
47650 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47651 INF("wchan", S_IRUGO, proc_pid_wchan),
47652 #endif
47653 -#ifdef CONFIG_STACKTRACE
47654 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47655 ONE("stack", S_IRUGO, proc_pid_stack),
47656 #endif
47657 #ifdef CONFIG_SCHEDSTATS
47658 @@ -3029,6 +3155,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47659 #ifdef CONFIG_HARDWALL
47660 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47661 #endif
47662 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47663 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47664 +#endif
47665 };
47666
47667 static int proc_tgid_base_readdir(struct file * filp,
47668 @@ -3155,7 +3284,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47669 if (!inode)
47670 goto out;
47671
47672 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47673 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47674 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47675 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47676 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47677 +#else
47678 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47679 +#endif
47680 inode->i_op = &proc_tgid_base_inode_operations;
47681 inode->i_fop = &proc_tgid_base_operations;
47682 inode->i_flags|=S_IMMUTABLE;
47683 @@ -3197,7 +3333,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47684 if (!task)
47685 goto out;
47686
47687 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47688 + goto out_put_task;
47689 +
47690 result = proc_pid_instantiate(dir, dentry, task, NULL);
47691 +out_put_task:
47692 put_task_struct(task);
47693 out:
47694 return result;
47695 @@ -3260,6 +3400,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
47696 static int fake_filldir(void *buf, const char *name, int namelen,
47697 loff_t offset, u64 ino, unsigned d_type)
47698 {
47699 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
47700 + __buf->error = -EINVAL;
47701 return 0;
47702 }
47703
47704 @@ -3326,7 +3468,7 @@ static const struct pid_entry tid_base_stuff[] = {
47705 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47706 #endif
47707 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47708 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47709 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47710 INF("syscall", S_IRUGO, proc_pid_syscall),
47711 #endif
47712 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47713 @@ -3350,10 +3492,10 @@ static const struct pid_entry tid_base_stuff[] = {
47714 #ifdef CONFIG_SECURITY
47715 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47716 #endif
47717 -#ifdef CONFIG_KALLSYMS
47718 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47719 INF("wchan", S_IRUGO, proc_pid_wchan),
47720 #endif
47721 -#ifdef CONFIG_STACKTRACE
47722 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47723 ONE("stack", S_IRUGO, proc_pid_stack),
47724 #endif
47725 #ifdef CONFIG_SCHEDSTATS
47726 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47727 index 82676e3..5f8518a 100644
47728 --- a/fs/proc/cmdline.c
47729 +++ b/fs/proc/cmdline.c
47730 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47731
47732 static int __init proc_cmdline_init(void)
47733 {
47734 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47735 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47736 +#else
47737 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47738 +#endif
47739 return 0;
47740 }
47741 module_init(proc_cmdline_init);
47742 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47743 index b143471..bb105e5 100644
47744 --- a/fs/proc/devices.c
47745 +++ b/fs/proc/devices.c
47746 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47747
47748 static int __init proc_devices_init(void)
47749 {
47750 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47751 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47752 +#else
47753 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47754 +#endif
47755 return 0;
47756 }
47757 module_init(proc_devices_init);
47758 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47759 index 205c922..2ee4c57 100644
47760 --- a/fs/proc/inode.c
47761 +++ b/fs/proc/inode.c
47762 @@ -21,11 +21,17 @@
47763 #include <linux/seq_file.h>
47764 #include <linux/slab.h>
47765 #include <linux/mount.h>
47766 +#include <linux/grsecurity.h>
47767
47768 #include <asm/uaccess.h>
47769
47770 #include "internal.h"
47771
47772 +#ifdef CONFIG_PROC_SYSCTL
47773 +extern const struct inode_operations proc_sys_inode_operations;
47774 +extern const struct inode_operations proc_sys_dir_operations;
47775 +#endif
47776 +
47777 static void proc_evict_inode(struct inode *inode)
47778 {
47779 struct proc_dir_entry *de;
47780 @@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
47781 ns_ops = PROC_I(inode)->ns_ops;
47782 if (ns_ops && ns_ops->put)
47783 ns_ops->put(PROC_I(inode)->ns);
47784 +
47785 +#ifdef CONFIG_PROC_SYSCTL
47786 + if (inode->i_op == &proc_sys_inode_operations ||
47787 + inode->i_op == &proc_sys_dir_operations)
47788 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47789 +#endif
47790 +
47791 }
47792
47793 static struct kmem_cache * proc_inode_cachep;
47794 @@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47795 if (de->mode) {
47796 inode->i_mode = de->mode;
47797 inode->i_uid = de->uid;
47798 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47799 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47800 +#else
47801 inode->i_gid = de->gid;
47802 +#endif
47803 }
47804 if (de->size)
47805 inode->i_size = de->size;
47806 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47807 index 5f79bb8..eeccee4 100644
47808 --- a/fs/proc/internal.h
47809 +++ b/fs/proc/internal.h
47810 @@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47811 struct pid *pid, struct task_struct *task);
47812 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47813 struct pid *pid, struct task_struct *task);
47814 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47815 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47816 +#endif
47817 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47818
47819 extern const struct file_operations proc_pid_maps_operations;
47820 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47821 index 86c67ee..cdca321 100644
47822 --- a/fs/proc/kcore.c
47823 +++ b/fs/proc/kcore.c
47824 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47825 * the addresses in the elf_phdr on our list.
47826 */
47827 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47828 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47829 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47830 + if (tsz > buflen)
47831 tsz = buflen;
47832 -
47833 +
47834 while (buflen) {
47835 struct kcore_list *m;
47836
47837 @@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47838 kfree(elf_buf);
47839 } else {
47840 if (kern_addr_valid(start)) {
47841 - unsigned long n;
47842 + char *elf_buf;
47843 + mm_segment_t oldfs;
47844
47845 - n = copy_to_user(buffer, (char *)start, tsz);
47846 - /*
47847 - * We cannot distinguish between fault on source
47848 - * and fault on destination. When this happens
47849 - * we clear too and hope it will trigger the
47850 - * EFAULT again.
47851 - */
47852 - if (n) {
47853 - if (clear_user(buffer + tsz - n,
47854 - n))
47855 + elf_buf = kmalloc(tsz, GFP_KERNEL);
47856 + if (!elf_buf)
47857 + return -ENOMEM;
47858 + oldfs = get_fs();
47859 + set_fs(KERNEL_DS);
47860 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
47861 + set_fs(oldfs);
47862 + if (copy_to_user(buffer, elf_buf, tsz)) {
47863 + kfree(elf_buf);
47864 return -EFAULT;
47865 + }
47866 }
47867 + set_fs(oldfs);
47868 + kfree(elf_buf);
47869 } else {
47870 if (clear_user(buffer, tsz))
47871 return -EFAULT;
47872 @@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47873
47874 static int open_kcore(struct inode *inode, struct file *filp)
47875 {
47876 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47877 + return -EPERM;
47878 +#endif
47879 if (!capable(CAP_SYS_RAWIO))
47880 return -EPERM;
47881 if (kcore_need_update)
47882 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
47883 index 80e4645..53e5fcf 100644
47884 --- a/fs/proc/meminfo.c
47885 +++ b/fs/proc/meminfo.c
47886 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
47887 vmi.used >> 10,
47888 vmi.largest_chunk >> 10
47889 #ifdef CONFIG_MEMORY_FAILURE
47890 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47891 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47892 #endif
47893 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47894 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47895 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
47896 index b1822dd..df622cb 100644
47897 --- a/fs/proc/nommu.c
47898 +++ b/fs/proc/nommu.c
47899 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
47900 if (len < 1)
47901 len = 1;
47902 seq_printf(m, "%*c", len, ' ');
47903 - seq_path(m, &file->f_path, "");
47904 + seq_path(m, &file->f_path, "\n\\");
47905 }
47906
47907 seq_putc(m, '\n');
47908 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
47909 index 06e1cc1..177cd98 100644
47910 --- a/fs/proc/proc_net.c
47911 +++ b/fs/proc/proc_net.c
47912 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
47913 struct task_struct *task;
47914 struct nsproxy *ns;
47915 struct net *net = NULL;
47916 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47917 + const struct cred *cred = current_cred();
47918 +#endif
47919 +
47920 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47921 + if (cred->fsuid)
47922 + return net;
47923 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47924 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
47925 + return net;
47926 +#endif
47927
47928 rcu_read_lock();
47929 task = pid_task(proc_pid(dir), PIDTYPE_PID);
47930 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
47931 index 21d836f..bebf3ee 100644
47932 --- a/fs/proc/proc_sysctl.c
47933 +++ b/fs/proc/proc_sysctl.c
47934 @@ -12,11 +12,15 @@
47935 #include <linux/module.h>
47936 #include "internal.h"
47937
47938 +extern int gr_handle_chroot_sysctl(const int op);
47939 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
47940 + const int op);
47941 +
47942 static const struct dentry_operations proc_sys_dentry_operations;
47943 static const struct file_operations proc_sys_file_operations;
47944 -static const struct inode_operations proc_sys_inode_operations;
47945 +const struct inode_operations proc_sys_inode_operations;
47946 static const struct file_operations proc_sys_dir_file_operations;
47947 -static const struct inode_operations proc_sys_dir_operations;
47948 +const struct inode_operations proc_sys_dir_operations;
47949
47950 void proc_sys_poll_notify(struct ctl_table_poll *poll)
47951 {
47952 @@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
47953
47954 err = NULL;
47955 d_set_d_op(dentry, &proc_sys_dentry_operations);
47956 +
47957 + gr_handle_proc_create(dentry, inode);
47958 +
47959 d_add(dentry, inode);
47960
47961 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
47962 + err = ERR_PTR(-ENOENT);
47963 +
47964 out:
47965 sysctl_head_finish(head);
47966 return err;
47967 @@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
47968 struct inode *inode = filp->f_path.dentry->d_inode;
47969 struct ctl_table_header *head = grab_header(inode);
47970 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
47971 + int op = write ? MAY_WRITE : MAY_READ;
47972 ssize_t error;
47973 size_t res;
47974
47975 if (IS_ERR(head))
47976 return PTR_ERR(head);
47977
47978 +
47979 /*
47980 * At this point we know that the sysctl was not unregistered
47981 * and won't be until we finish.
47982 */
47983 error = -EPERM;
47984 - if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
47985 + if (sysctl_perm(head->root, table, op))
47986 goto out;
47987
47988 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
47989 @@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
47990 if (!table->proc_handler)
47991 goto out;
47992
47993 +#ifdef CONFIG_GRKERNSEC
47994 + error = -EPERM;
47995 + if (gr_handle_chroot_sysctl(op))
47996 + goto out;
47997 + dget(filp->f_path.dentry);
47998 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
47999 + dput(filp->f_path.dentry);
48000 + goto out;
48001 + }
48002 + dput(filp->f_path.dentry);
48003 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
48004 + goto out;
48005 + if (write && !capable(CAP_SYS_ADMIN))
48006 + goto out;
48007 +#endif
48008 +
48009 /* careful: calling conventions are nasty here */
48010 res = count;
48011 error = table->proc_handler(table, write, buf, &res, ppos);
48012 @@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48013 return -ENOMEM;
48014 } else {
48015 d_set_d_op(child, &proc_sys_dentry_operations);
48016 +
48017 + gr_handle_proc_create(child, inode);
48018 +
48019 d_add(child, inode);
48020 }
48021 } else {
48022 @@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48023 if ((*pos)++ < file->f_pos)
48024 return 0;
48025
48026 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
48027 + return 0;
48028 +
48029 if (unlikely(S_ISLNK(table->mode)))
48030 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
48031 else
48032 @@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48033 if (IS_ERR(head))
48034 return PTR_ERR(head);
48035
48036 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
48037 + return -ENOENT;
48038 +
48039 generic_fillattr(inode, stat);
48040 if (table)
48041 stat->mode = (stat->mode & S_IFMT) | table->mode;
48042 @@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48043 .llseek = generic_file_llseek,
48044 };
48045
48046 -static const struct inode_operations proc_sys_inode_operations = {
48047 +const struct inode_operations proc_sys_inode_operations = {
48048 .permission = proc_sys_permission,
48049 .setattr = proc_sys_setattr,
48050 .getattr = proc_sys_getattr,
48051 };
48052
48053 -static const struct inode_operations proc_sys_dir_operations = {
48054 +const struct inode_operations proc_sys_dir_operations = {
48055 .lookup = proc_sys_lookup,
48056 .permission = proc_sys_permission,
48057 .setattr = proc_sys_setattr,
48058 diff --git a/fs/proc/root.c b/fs/proc/root.c
48059 index eed44bf..abeb499 100644
48060 --- a/fs/proc/root.c
48061 +++ b/fs/proc/root.c
48062 @@ -188,7 +188,15 @@ void __init proc_root_init(void)
48063 #ifdef CONFIG_PROC_DEVICETREE
48064 proc_device_tree_init();
48065 #endif
48066 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48067 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48068 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48069 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48070 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48071 +#endif
48072 +#else
48073 proc_mkdir("bus", NULL);
48074 +#endif
48075 proc_sys_init();
48076 }
48077
48078 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48079 index 7faaf2a..096c28b 100644
48080 --- a/fs/proc/task_mmu.c
48081 +++ b/fs/proc/task_mmu.c
48082 @@ -11,12 +11,19 @@
48083 #include <linux/rmap.h>
48084 #include <linux/swap.h>
48085 #include <linux/swapops.h>
48086 +#include <linux/grsecurity.h>
48087
48088 #include <asm/elf.h>
48089 #include <asm/uaccess.h>
48090 #include <asm/tlbflush.h>
48091 #include "internal.h"
48092
48093 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48094 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48095 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48096 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48097 +#endif
48098 +
48099 void task_mem(struct seq_file *m, struct mm_struct *mm)
48100 {
48101 unsigned long data, text, lib, swap;
48102 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48103 "VmExe:\t%8lu kB\n"
48104 "VmLib:\t%8lu kB\n"
48105 "VmPTE:\t%8lu kB\n"
48106 - "VmSwap:\t%8lu kB\n",
48107 - hiwater_vm << (PAGE_SHIFT-10),
48108 + "VmSwap:\t%8lu kB\n"
48109 +
48110 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48111 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48112 +#endif
48113 +
48114 + ,hiwater_vm << (PAGE_SHIFT-10),
48115 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48116 mm->locked_vm << (PAGE_SHIFT-10),
48117 mm->pinned_vm << (PAGE_SHIFT-10),
48118 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48119 data << (PAGE_SHIFT-10),
48120 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48121 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48122 - swap << (PAGE_SHIFT-10));
48123 + swap << (PAGE_SHIFT-10)
48124 +
48125 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48126 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48127 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
48128 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
48129 +#else
48130 + , mm->context.user_cs_base
48131 + , mm->context.user_cs_limit
48132 +#endif
48133 +#endif
48134 +
48135 + );
48136 }
48137
48138 unsigned long task_vsize(struct mm_struct *mm)
48139 @@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48140 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48141 }
48142
48143 - /* We don't show the stack guard page in /proc/maps */
48144 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48145 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48146 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48147 +#else
48148 start = vma->vm_start;
48149 - if (stack_guard_page_start(vma, start))
48150 - start += PAGE_SIZE;
48151 end = vma->vm_end;
48152 - if (stack_guard_page_end(vma, end))
48153 - end -= PAGE_SIZE;
48154 +#endif
48155
48156 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48157 start,
48158 @@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48159 flags & VM_WRITE ? 'w' : '-',
48160 flags & VM_EXEC ? 'x' : '-',
48161 flags & VM_MAYSHARE ? 's' : 'p',
48162 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48163 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48164 +#else
48165 pgoff,
48166 +#endif
48167 MAJOR(dev), MINOR(dev), ino, &len);
48168
48169 /*
48170 @@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48171 */
48172 if (file) {
48173 pad_len_spaces(m, len);
48174 - seq_path(m, &file->f_path, "\n");
48175 + seq_path(m, &file->f_path, "\n\\");
48176 goto done;
48177 }
48178
48179 @@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48180 * Thread stack in /proc/PID/task/TID/maps or
48181 * the main process stack.
48182 */
48183 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
48184 - vma->vm_end >= mm->start_stack)) {
48185 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48186 + (vma->vm_start <= mm->start_stack &&
48187 + vma->vm_end >= mm->start_stack)) {
48188 name = "[stack]";
48189 } else {
48190 /* Thread stack in /proc/PID/maps */
48191 @@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
48192 struct proc_maps_private *priv = m->private;
48193 struct task_struct *task = priv->task;
48194
48195 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48196 + if (current->exec_id != m->exec_id) {
48197 + gr_log_badprocpid("maps");
48198 + return 0;
48199 + }
48200 +#endif
48201 +
48202 show_map_vma(m, vma, is_pid);
48203
48204 if (m->count < m->size) /* vma is copied successfully */
48205 @@ -482,12 +518,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48206 .private = &mss,
48207 };
48208
48209 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48210 + if (current->exec_id != m->exec_id) {
48211 + gr_log_badprocpid("smaps");
48212 + return 0;
48213 + }
48214 +#endif
48215 memset(&mss, 0, sizeof mss);
48216 - mss.vma = vma;
48217 - /* mmap_sem is held in m_start */
48218 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48219 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48220 -
48221 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48222 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48223 +#endif
48224 + mss.vma = vma;
48225 + /* mmap_sem is held in m_start */
48226 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48227 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48228 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48229 + }
48230 +#endif
48231 show_map_vma(m, vma, is_pid);
48232
48233 seq_printf(m,
48234 @@ -505,7 +552,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48235 "KernelPageSize: %8lu kB\n"
48236 "MMUPageSize: %8lu kB\n"
48237 "Locked: %8lu kB\n",
48238 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48239 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48240 +#else
48241 (vma->vm_end - vma->vm_start) >> 10,
48242 +#endif
48243 mss.resident >> 10,
48244 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48245 mss.shared_clean >> 10,
48246 @@ -1138,6 +1189,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48247 int n;
48248 char buffer[50];
48249
48250 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48251 + if (current->exec_id != m->exec_id) {
48252 + gr_log_badprocpid("numa_maps");
48253 + return 0;
48254 + }
48255 +#endif
48256 +
48257 if (!mm)
48258 return 0;
48259
48260 @@ -1155,11 +1213,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48261 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48262 mpol_cond_put(pol);
48263
48264 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48265 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48266 +#else
48267 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48268 +#endif
48269
48270 if (file) {
48271 seq_printf(m, " file=");
48272 - seq_path(m, &file->f_path, "\n\t= ");
48273 + seq_path(m, &file->f_path, "\n\t\\= ");
48274 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48275 seq_printf(m, " heap");
48276 } else {
48277 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48278 index 74fe164..899e77b 100644
48279 --- a/fs/proc/task_nommu.c
48280 +++ b/fs/proc/task_nommu.c
48281 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48282 else
48283 bytes += kobjsize(mm);
48284
48285 - if (current->fs && current->fs->users > 1)
48286 + if (current->fs && atomic_read(&current->fs->users) > 1)
48287 sbytes += kobjsize(current->fs);
48288 else
48289 bytes += kobjsize(current->fs);
48290 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
48291
48292 if (file) {
48293 pad_len_spaces(m, len);
48294 - seq_path(m, &file->f_path, "");
48295 + seq_path(m, &file->f_path, "\n\\");
48296 } else if (mm) {
48297 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
48298
48299 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48300 index d67908b..d13f6a6 100644
48301 --- a/fs/quota/netlink.c
48302 +++ b/fs/quota/netlink.c
48303 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48304 void quota_send_warning(short type, unsigned int id, dev_t dev,
48305 const char warntype)
48306 {
48307 - static atomic_t seq;
48308 + static atomic_unchecked_t seq;
48309 struct sk_buff *skb;
48310 void *msg_head;
48311 int ret;
48312 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48313 "VFS: Not enough memory to send quota warning.\n");
48314 return;
48315 }
48316 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48317 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48318 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48319 if (!msg_head) {
48320 printk(KERN_ERR
48321 diff --git a/fs/readdir.c b/fs/readdir.c
48322 index cc0a822..43cb195 100644
48323 --- a/fs/readdir.c
48324 +++ b/fs/readdir.c
48325 @@ -17,6 +17,7 @@
48326 #include <linux/security.h>
48327 #include <linux/syscalls.h>
48328 #include <linux/unistd.h>
48329 +#include <linux/namei.h>
48330
48331 #include <asm/uaccess.h>
48332
48333 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48334
48335 struct readdir_callback {
48336 struct old_linux_dirent __user * dirent;
48337 + struct file * file;
48338 int result;
48339 };
48340
48341 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48342 buf->result = -EOVERFLOW;
48343 return -EOVERFLOW;
48344 }
48345 +
48346 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48347 + return 0;
48348 +
48349 buf->result++;
48350 dirent = buf->dirent;
48351 if (!access_ok(VERIFY_WRITE, dirent,
48352 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48353
48354 buf.result = 0;
48355 buf.dirent = dirent;
48356 + buf.file = file;
48357
48358 error = vfs_readdir(file, fillonedir, &buf);
48359 if (buf.result)
48360 @@ -142,6 +149,7 @@ struct linux_dirent {
48361 struct getdents_callback {
48362 struct linux_dirent __user * current_dir;
48363 struct linux_dirent __user * previous;
48364 + struct file * file;
48365 int count;
48366 int error;
48367 };
48368 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48369 buf->error = -EOVERFLOW;
48370 return -EOVERFLOW;
48371 }
48372 +
48373 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48374 + return 0;
48375 +
48376 dirent = buf->previous;
48377 if (dirent) {
48378 if (__put_user(offset, &dirent->d_off))
48379 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48380 buf.previous = NULL;
48381 buf.count = count;
48382 buf.error = 0;
48383 + buf.file = file;
48384
48385 error = vfs_readdir(file, filldir, &buf);
48386 if (error >= 0)
48387 @@ -229,6 +242,7 @@ out:
48388 struct getdents_callback64 {
48389 struct linux_dirent64 __user * current_dir;
48390 struct linux_dirent64 __user * previous;
48391 + struct file *file;
48392 int count;
48393 int error;
48394 };
48395 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48396 buf->error = -EINVAL; /* only used if we fail.. */
48397 if (reclen > buf->count)
48398 return -EINVAL;
48399 +
48400 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48401 + return 0;
48402 +
48403 dirent = buf->previous;
48404 if (dirent) {
48405 if (__put_user(offset, &dirent->d_off))
48406 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48407
48408 buf.current_dir = dirent;
48409 buf.previous = NULL;
48410 + buf.file = file;
48411 buf.count = count;
48412 buf.error = 0;
48413
48414 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48415 error = buf.error;
48416 lastdirent = buf.previous;
48417 if (lastdirent) {
48418 - typeof(lastdirent->d_off) d_off = file->f_pos;
48419 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48420 if (__put_user(d_off, &lastdirent->d_off))
48421 error = -EFAULT;
48422 else
48423 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48424 index 2b7882b..1c5ef48 100644
48425 --- a/fs/reiserfs/do_balan.c
48426 +++ b/fs/reiserfs/do_balan.c
48427 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48428 return;
48429 }
48430
48431 - atomic_inc(&(fs_generation(tb->tb_sb)));
48432 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48433 do_balance_starts(tb);
48434
48435 /* balance leaf returns 0 except if combining L R and S into
48436 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48437 index 2c1ade6..8c59d8d 100644
48438 --- a/fs/reiserfs/procfs.c
48439 +++ b/fs/reiserfs/procfs.c
48440 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48441 "SMALL_TAILS " : "NO_TAILS ",
48442 replay_only(sb) ? "REPLAY_ONLY " : "",
48443 convert_reiserfs(sb) ? "CONV " : "",
48444 - atomic_read(&r->s_generation_counter),
48445 + atomic_read_unchecked(&r->s_generation_counter),
48446 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48447 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48448 SF(s_good_search_by_key_reada), SF(s_bmaps),
48449 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
48450 index a59d271..e12d1cf 100644
48451 --- a/fs/reiserfs/reiserfs.h
48452 +++ b/fs/reiserfs/reiserfs.h
48453 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
48454 /* Comment? -Hans */
48455 wait_queue_head_t s_wait;
48456 /* To be obsoleted soon by per buffer seals.. -Hans */
48457 - atomic_t s_generation_counter; // increased by one every time the
48458 + atomic_unchecked_t s_generation_counter; // increased by one every time the
48459 // tree gets re-balanced
48460 unsigned long s_properties; /* File system properties. Currently holds
48461 on-disk FS format */
48462 @@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
48463 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
48464
48465 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
48466 -#define get_generation(s) atomic_read (&fs_generation(s))
48467 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
48468 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
48469 #define __fs_changed(gen,s) (gen != get_generation (s))
48470 #define fs_changed(gen,s) \
48471 diff --git a/fs/select.c b/fs/select.c
48472 index 17d33d0..da0bf5c 100644
48473 --- a/fs/select.c
48474 +++ b/fs/select.c
48475 @@ -20,6 +20,7 @@
48476 #include <linux/export.h>
48477 #include <linux/slab.h>
48478 #include <linux/poll.h>
48479 +#include <linux/security.h>
48480 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48481 #include <linux/file.h>
48482 #include <linux/fdtable.h>
48483 @@ -833,6 +834,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48484 struct poll_list *walk = head;
48485 unsigned long todo = nfds;
48486
48487 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48488 if (nfds > rlimit(RLIMIT_NOFILE))
48489 return -EINVAL;
48490
48491 diff --git a/fs/seq_file.c b/fs/seq_file.c
48492 index 0cbd049..cab1127 100644
48493 --- a/fs/seq_file.c
48494 +++ b/fs/seq_file.c
48495 @@ -9,6 +9,7 @@
48496 #include <linux/export.h>
48497 #include <linux/seq_file.h>
48498 #include <linux/slab.h>
48499 +#include <linux/sched.h>
48500
48501 #include <asm/uaccess.h>
48502 #include <asm/page.h>
48503 @@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48504 memset(p, 0, sizeof(*p));
48505 mutex_init(&p->lock);
48506 p->op = op;
48507 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48508 + p->exec_id = current->exec_id;
48509 +#endif
48510
48511 /*
48512 * Wrappers around seq_open(e.g. swaps_open) need to be
48513 @@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
48514 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48515 void *data)
48516 {
48517 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48518 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48519 int res = -ENOMEM;
48520
48521 if (op) {
48522 diff --git a/fs/splice.c b/fs/splice.c
48523 index f847684..156619e 100644
48524 --- a/fs/splice.c
48525 +++ b/fs/splice.c
48526 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48527 pipe_lock(pipe);
48528
48529 for (;;) {
48530 - if (!pipe->readers) {
48531 + if (!atomic_read(&pipe->readers)) {
48532 send_sig(SIGPIPE, current, 0);
48533 if (!ret)
48534 ret = -EPIPE;
48535 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48536 do_wakeup = 0;
48537 }
48538
48539 - pipe->waiting_writers++;
48540 + atomic_inc(&pipe->waiting_writers);
48541 pipe_wait(pipe);
48542 - pipe->waiting_writers--;
48543 + atomic_dec(&pipe->waiting_writers);
48544 }
48545
48546 pipe_unlock(pipe);
48547 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48548 old_fs = get_fs();
48549 set_fs(get_ds());
48550 /* The cast to a user pointer is valid due to the set_fs() */
48551 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48552 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48553 set_fs(old_fs);
48554
48555 return res;
48556 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48557 old_fs = get_fs();
48558 set_fs(get_ds());
48559 /* The cast to a user pointer is valid due to the set_fs() */
48560 - res = vfs_write(file, (const char __user *)buf, count, &pos);
48561 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48562 set_fs(old_fs);
48563
48564 return res;
48565 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48566 goto err;
48567
48568 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48569 - vec[i].iov_base = (void __user *) page_address(page);
48570 + vec[i].iov_base = (void __force_user *) page_address(page);
48571 vec[i].iov_len = this_len;
48572 spd.pages[i] = page;
48573 spd.nr_pages++;
48574 @@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48575 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48576 {
48577 while (!pipe->nrbufs) {
48578 - if (!pipe->writers)
48579 + if (!atomic_read(&pipe->writers))
48580 return 0;
48581
48582 - if (!pipe->waiting_writers && sd->num_spliced)
48583 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48584 return 0;
48585
48586 if (sd->flags & SPLICE_F_NONBLOCK)
48587 @@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48588 * out of the pipe right after the splice_to_pipe(). So set
48589 * PIPE_READERS appropriately.
48590 */
48591 - pipe->readers = 1;
48592 + atomic_set(&pipe->readers, 1);
48593
48594 current->splice_pipe = pipe;
48595 }
48596 @@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48597 ret = -ERESTARTSYS;
48598 break;
48599 }
48600 - if (!pipe->writers)
48601 + if (!atomic_read(&pipe->writers))
48602 break;
48603 - if (!pipe->waiting_writers) {
48604 + if (!atomic_read(&pipe->waiting_writers)) {
48605 if (flags & SPLICE_F_NONBLOCK) {
48606 ret = -EAGAIN;
48607 break;
48608 @@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48609 pipe_lock(pipe);
48610
48611 while (pipe->nrbufs >= pipe->buffers) {
48612 - if (!pipe->readers) {
48613 + if (!atomic_read(&pipe->readers)) {
48614 send_sig(SIGPIPE, current, 0);
48615 ret = -EPIPE;
48616 break;
48617 @@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48618 ret = -ERESTARTSYS;
48619 break;
48620 }
48621 - pipe->waiting_writers++;
48622 + atomic_inc(&pipe->waiting_writers);
48623 pipe_wait(pipe);
48624 - pipe->waiting_writers--;
48625 + atomic_dec(&pipe->waiting_writers);
48626 }
48627
48628 pipe_unlock(pipe);
48629 @@ -1818,14 +1818,14 @@ retry:
48630 pipe_double_lock(ipipe, opipe);
48631
48632 do {
48633 - if (!opipe->readers) {
48634 + if (!atomic_read(&opipe->readers)) {
48635 send_sig(SIGPIPE, current, 0);
48636 if (!ret)
48637 ret = -EPIPE;
48638 break;
48639 }
48640
48641 - if (!ipipe->nrbufs && !ipipe->writers)
48642 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48643 break;
48644
48645 /*
48646 @@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48647 pipe_double_lock(ipipe, opipe);
48648
48649 do {
48650 - if (!opipe->readers) {
48651 + if (!atomic_read(&opipe->readers)) {
48652 send_sig(SIGPIPE, current, 0);
48653 if (!ret)
48654 ret = -EPIPE;
48655 @@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48656 * return EAGAIN if we have the potential of some data in the
48657 * future, otherwise just return 0
48658 */
48659 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48660 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48661 ret = -EAGAIN;
48662
48663 pipe_unlock(ipipe);
48664 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48665 index 35a36d3..23424b2 100644
48666 --- a/fs/sysfs/dir.c
48667 +++ b/fs/sysfs/dir.c
48668 @@ -657,6 +657,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48669 struct sysfs_dirent *sd;
48670 int rc;
48671
48672 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48673 + const char *parent_name = parent_sd->s_name;
48674 +
48675 + mode = S_IFDIR | S_IRWXU;
48676 +
48677 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48678 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48679 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48680 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48681 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48682 +#endif
48683 +
48684 /* allocate */
48685 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48686 if (!sd)
48687 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48688 index 00012e3..8392349 100644
48689 --- a/fs/sysfs/file.c
48690 +++ b/fs/sysfs/file.c
48691 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48692
48693 struct sysfs_open_dirent {
48694 atomic_t refcnt;
48695 - atomic_t event;
48696 + atomic_unchecked_t event;
48697 wait_queue_head_t poll;
48698 struct list_head buffers; /* goes through sysfs_buffer.list */
48699 };
48700 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48701 if (!sysfs_get_active(attr_sd))
48702 return -ENODEV;
48703
48704 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48705 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48706 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48707
48708 sysfs_put_active(attr_sd);
48709 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48710 return -ENOMEM;
48711
48712 atomic_set(&new_od->refcnt, 0);
48713 - atomic_set(&new_od->event, 1);
48714 + atomic_set_unchecked(&new_od->event, 1);
48715 init_waitqueue_head(&new_od->poll);
48716 INIT_LIST_HEAD(&new_od->buffers);
48717 goto retry;
48718 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48719
48720 sysfs_put_active(attr_sd);
48721
48722 - if (buffer->event != atomic_read(&od->event))
48723 + if (buffer->event != atomic_read_unchecked(&od->event))
48724 goto trigger;
48725
48726 return DEFAULT_POLLMASK;
48727 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48728
48729 od = sd->s_attr.open;
48730 if (od) {
48731 - atomic_inc(&od->event);
48732 + atomic_inc_unchecked(&od->event);
48733 wake_up_interruptible(&od->poll);
48734 }
48735
48736 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48737 index a7ac78f..02158e1 100644
48738 --- a/fs/sysfs/symlink.c
48739 +++ b/fs/sysfs/symlink.c
48740 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48741
48742 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48743 {
48744 - char *page = nd_get_link(nd);
48745 + const char *page = nd_get_link(nd);
48746 if (!IS_ERR(page))
48747 free_page((unsigned long)page);
48748 }
48749 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48750 index c175b4d..8f36a16 100644
48751 --- a/fs/udf/misc.c
48752 +++ b/fs/udf/misc.c
48753 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48754
48755 u8 udf_tag_checksum(const struct tag *t)
48756 {
48757 - u8 *data = (u8 *)t;
48758 + const u8 *data = (const u8 *)t;
48759 u8 checksum = 0;
48760 int i;
48761 for (i = 0; i < sizeof(struct tag); ++i)
48762 diff --git a/fs/utimes.c b/fs/utimes.c
48763 index ba653f3..06ea4b1 100644
48764 --- a/fs/utimes.c
48765 +++ b/fs/utimes.c
48766 @@ -1,6 +1,7 @@
48767 #include <linux/compiler.h>
48768 #include <linux/file.h>
48769 #include <linux/fs.h>
48770 +#include <linux/security.h>
48771 #include <linux/linkage.h>
48772 #include <linux/mount.h>
48773 #include <linux/namei.h>
48774 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48775 goto mnt_drop_write_and_out;
48776 }
48777 }
48778 +
48779 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48780 + error = -EACCES;
48781 + goto mnt_drop_write_and_out;
48782 + }
48783 +
48784 mutex_lock(&inode->i_mutex);
48785 error = notify_change(path->dentry, &newattrs);
48786 mutex_unlock(&inode->i_mutex);
48787 diff --git a/fs/xattr.c b/fs/xattr.c
48788 index 3c8c1cc..a83c398 100644
48789 --- a/fs/xattr.c
48790 +++ b/fs/xattr.c
48791 @@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48792 * Extended attribute SET operations
48793 */
48794 static long
48795 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
48796 +setxattr(struct path *path, const char __user *name, const void __user *value,
48797 size_t size, int flags)
48798 {
48799 int error;
48800 @@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48801 }
48802 }
48803
48804 - error = vfs_setxattr(d, kname, kvalue, size, flags);
48805 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48806 + error = -EACCES;
48807 + goto out;
48808 + }
48809 +
48810 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48811 out:
48812 if (vvalue)
48813 vfree(vvalue);
48814 @@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48815 return error;
48816 error = mnt_want_write(path.mnt);
48817 if (!error) {
48818 - error = setxattr(path.dentry, name, value, size, flags);
48819 + error = setxattr(&path, name, value, size, flags);
48820 mnt_drop_write(path.mnt);
48821 }
48822 path_put(&path);
48823 @@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48824 return error;
48825 error = mnt_want_write(path.mnt);
48826 if (!error) {
48827 - error = setxattr(path.dentry, name, value, size, flags);
48828 + error = setxattr(&path, name, value, size, flags);
48829 mnt_drop_write(path.mnt);
48830 }
48831 path_put(&path);
48832 @@ -400,17 +405,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48833 const void __user *,value, size_t, size, int, flags)
48834 {
48835 struct file *f;
48836 - struct dentry *dentry;
48837 int error = -EBADF;
48838
48839 f = fget(fd);
48840 if (!f)
48841 return error;
48842 - dentry = f->f_path.dentry;
48843 - audit_inode(NULL, dentry);
48844 + audit_inode(NULL, f->f_path.dentry);
48845 error = mnt_want_write_file(f);
48846 if (!error) {
48847 - error = setxattr(dentry, name, value, size, flags);
48848 + error = setxattr(&f->f_path, name, value, size, flags);
48849 mnt_drop_write_file(f);
48850 }
48851 fput(f);
48852 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
48853 index 69d06b0..c0996e5 100644
48854 --- a/fs/xattr_acl.c
48855 +++ b/fs/xattr_acl.c
48856 @@ -17,8 +17,8 @@
48857 struct posix_acl *
48858 posix_acl_from_xattr(const void *value, size_t size)
48859 {
48860 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
48861 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
48862 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
48863 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
48864 int count;
48865 struct posix_acl *acl;
48866 struct posix_acl_entry *acl_e;
48867 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
48868 index 85e7e32..5344e52 100644
48869 --- a/fs/xfs/xfs_bmap.c
48870 +++ b/fs/xfs/xfs_bmap.c
48871 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
48872 int nmap,
48873 int ret_nmap);
48874 #else
48875 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48876 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48877 #endif /* DEBUG */
48878
48879 STATIC int
48880 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
48881 index 79d05e8..e3e5861 100644
48882 --- a/fs/xfs/xfs_dir2_sf.c
48883 +++ b/fs/xfs/xfs_dir2_sf.c
48884 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48885 }
48886
48887 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48888 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48889 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48890 + char name[sfep->namelen];
48891 + memcpy(name, sfep->name, sfep->namelen);
48892 + if (filldir(dirent, name, sfep->namelen,
48893 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
48894 + *offset = off & 0x7fffffff;
48895 + return 0;
48896 + }
48897 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48898 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48899 *offset = off & 0x7fffffff;
48900 return 0;
48901 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
48902 index 91f8ff5..0ce68f9 100644
48903 --- a/fs/xfs/xfs_ioctl.c
48904 +++ b/fs/xfs/xfs_ioctl.c
48905 @@ -128,7 +128,7 @@ xfs_find_handle(
48906 }
48907
48908 error = -EFAULT;
48909 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48910 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48911 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48912 goto out_put;
48913
48914 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
48915 index 3011b87..1ab03e9 100644
48916 --- a/fs/xfs/xfs_iops.c
48917 +++ b/fs/xfs/xfs_iops.c
48918 @@ -397,7 +397,7 @@ xfs_vn_put_link(
48919 struct nameidata *nd,
48920 void *p)
48921 {
48922 - char *s = nd_get_link(nd);
48923 + const char *s = nd_get_link(nd);
48924
48925 if (!IS_ERR(s))
48926 kfree(s);
48927 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
48928 new file mode 100644
48929 index 0000000..2645296
48930 --- /dev/null
48931 +++ b/grsecurity/Kconfig
48932 @@ -0,0 +1,1079 @@
48933 +#
48934 +# grecurity configuration
48935 +#
48936 +
48937 +menu "Grsecurity"
48938 +
48939 +config GRKERNSEC
48940 + bool "Grsecurity"
48941 + select CRYPTO
48942 + select CRYPTO_SHA256
48943 + help
48944 + If you say Y here, you will be able to configure many features
48945 + that will enhance the security of your system. It is highly
48946 + recommended that you say Y here and read through the help
48947 + for each option so that you fully understand the features and
48948 + can evaluate their usefulness for your machine.
48949 +
48950 +choice
48951 + prompt "Security Level"
48952 + depends on GRKERNSEC
48953 + default GRKERNSEC_CUSTOM
48954 +
48955 +config GRKERNSEC_LOW
48956 + bool "Low"
48957 + select GRKERNSEC_LINK
48958 + select GRKERNSEC_FIFO
48959 + select GRKERNSEC_RANDNET
48960 + select GRKERNSEC_DMESG
48961 + select GRKERNSEC_CHROOT
48962 + select GRKERNSEC_CHROOT_CHDIR
48963 +
48964 + help
48965 + If you choose this option, several of the grsecurity options will
48966 + be enabled that will give you greater protection against a number
48967 + of attacks, while assuring that none of your software will have any
48968 + conflicts with the additional security measures. If you run a lot
48969 + of unusual software, or you are having problems with the higher
48970 + security levels, you should say Y here. With this option, the
48971 + following features are enabled:
48972 +
48973 + - Linking restrictions
48974 + - FIFO restrictions
48975 + - Restricted dmesg
48976 + - Enforced chdir("/") on chroot
48977 + - Runtime module disabling
48978 +
48979 +config GRKERNSEC_MEDIUM
48980 + bool "Medium"
48981 + select PAX
48982 + select PAX_EI_PAX
48983 + select PAX_PT_PAX_FLAGS
48984 + select PAX_HAVE_ACL_FLAGS
48985 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48986 + select GRKERNSEC_CHROOT
48987 + select GRKERNSEC_CHROOT_SYSCTL
48988 + select GRKERNSEC_LINK
48989 + select GRKERNSEC_FIFO
48990 + select GRKERNSEC_DMESG
48991 + select GRKERNSEC_RANDNET
48992 + select GRKERNSEC_FORKFAIL
48993 + select GRKERNSEC_TIME
48994 + select GRKERNSEC_SIGNAL
48995 + select GRKERNSEC_CHROOT
48996 + select GRKERNSEC_CHROOT_UNIX
48997 + select GRKERNSEC_CHROOT_MOUNT
48998 + select GRKERNSEC_CHROOT_PIVOT
48999 + select GRKERNSEC_CHROOT_DOUBLE
49000 + select GRKERNSEC_CHROOT_CHDIR
49001 + select GRKERNSEC_CHROOT_MKNOD
49002 + select GRKERNSEC_PROC
49003 + select GRKERNSEC_PROC_USERGROUP
49004 + select PAX_RANDUSTACK
49005 + select PAX_ASLR
49006 + select PAX_RANDMMAP
49007 + select PAX_REFCOUNT if (X86 || SPARC64)
49008 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49009 +
49010 + help
49011 + If you say Y here, several features in addition to those included
49012 + in the low additional security level will be enabled. These
49013 + features provide even more security to your system, though in rare
49014 + cases they may be incompatible with very old or poorly written
49015 + software. If you enable this option, make sure that your auth
49016 + service (identd) is running as gid 1001. With this option,
49017 + the following features (in addition to those provided in the
49018 + low additional security level) will be enabled:
49019 +
49020 + - Failed fork logging
49021 + - Time change logging
49022 + - Signal logging
49023 + - Deny mounts in chroot
49024 + - Deny double chrooting
49025 + - Deny sysctl writes in chroot
49026 + - Deny mknod in chroot
49027 + - Deny access to abstract AF_UNIX sockets out of chroot
49028 + - Deny pivot_root in chroot
49029 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49030 + - /proc restrictions with special GID set to 10 (usually wheel)
49031 + - Address Space Layout Randomization (ASLR)
49032 + - Prevent exploitation of most refcount overflows
49033 + - Bounds checking of copying between the kernel and userland
49034 +
49035 +config GRKERNSEC_HIGH
49036 + bool "High"
49037 + select GRKERNSEC_LINK
49038 + select GRKERNSEC_FIFO
49039 + select GRKERNSEC_DMESG
49040 + select GRKERNSEC_FORKFAIL
49041 + select GRKERNSEC_TIME
49042 + select GRKERNSEC_SIGNAL
49043 + select GRKERNSEC_CHROOT
49044 + select GRKERNSEC_CHROOT_SHMAT
49045 + select GRKERNSEC_CHROOT_UNIX
49046 + select GRKERNSEC_CHROOT_MOUNT
49047 + select GRKERNSEC_CHROOT_FCHDIR
49048 + select GRKERNSEC_CHROOT_PIVOT
49049 + select GRKERNSEC_CHROOT_DOUBLE
49050 + select GRKERNSEC_CHROOT_CHDIR
49051 + select GRKERNSEC_CHROOT_MKNOD
49052 + select GRKERNSEC_CHROOT_CAPS
49053 + select GRKERNSEC_CHROOT_SYSCTL
49054 + select GRKERNSEC_CHROOT_FINDTASK
49055 + select GRKERNSEC_SYSFS_RESTRICT
49056 + select GRKERNSEC_PROC
49057 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49058 + select GRKERNSEC_HIDESYM
49059 + select GRKERNSEC_BRUTE
49060 + select GRKERNSEC_PROC_USERGROUP
49061 + select GRKERNSEC_KMEM
49062 + select GRKERNSEC_RESLOG
49063 + select GRKERNSEC_RANDNET
49064 + select GRKERNSEC_PROC_ADD
49065 + select GRKERNSEC_CHROOT_CHMOD
49066 + select GRKERNSEC_CHROOT_NICE
49067 + select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
49068 + select GRKERNSEC_AUDIT_MOUNT
49069 + select GRKERNSEC_MODHARDEN if (MODULES)
49070 + select GRKERNSEC_HARDEN_PTRACE
49071 + select GRKERNSEC_PTRACE_READEXEC
49072 + select GRKERNSEC_VM86 if (X86_32)
49073 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49074 + select PAX
49075 + select PAX_RANDUSTACK
49076 + select PAX_ASLR
49077 + select PAX_RANDMMAP
49078 + select PAX_NOEXEC
49079 + select PAX_MPROTECT
49080 + select PAX_EI_PAX
49081 + select PAX_PT_PAX_FLAGS
49082 + select PAX_HAVE_ACL_FLAGS
49083 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49084 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49085 + select PAX_RANDKSTACK if (X86_TSC && X86)
49086 + select PAX_SEGMEXEC if (X86_32)
49087 + select PAX_PAGEEXEC
49088 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49089 + select PAX_EMUTRAMP if (PARISC)
49090 + select PAX_EMUSIGRT if (PARISC)
49091 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49092 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49093 + select PAX_REFCOUNT if (X86 || SPARC64)
49094 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49095 + help
49096 + If you say Y here, many of the features of grsecurity will be
49097 + enabled, which will protect you against many kinds of attacks
49098 + against your system. The heightened security comes at a cost
49099 + of an increased chance of incompatibilities with rare software
49100 + on your machine. Since this security level enables PaX, you should
49101 + view <http://pax.grsecurity.net> and read about the PaX
49102 + project. While you are there, download chpax and run it on
49103 + binaries that cause problems with PaX. Also remember that
49104 + since the /proc restrictions are enabled, you must run your
49105 + identd as gid 1001. This security level enables the following
49106 + features in addition to those listed in the low and medium
49107 + security levels:
49108 +
49109 + - Additional /proc restrictions
49110 + - Chmod restrictions in chroot
49111 + - No signals, ptrace, or viewing of processes outside of chroot
49112 + - Capability restrictions in chroot
49113 + - Deny fchdir out of chroot
49114 + - Priority restrictions in chroot
49115 + - Segmentation-based implementation of PaX
49116 + - Mprotect restrictions
49117 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49118 + - Kernel stack randomization
49119 + - Mount/unmount/remount logging
49120 + - Kernel symbol hiding
49121 + - Hardening of module auto-loading
49122 + - Ptrace restrictions
49123 + - Restricted vm86 mode
49124 + - Restricted sysfs/debugfs
49125 + - Active kernel exploit response
49126 +
49127 +config GRKERNSEC_CUSTOM
49128 + bool "Custom"
49129 + help
49130 + If you say Y here, you will be able to configure every grsecurity
49131 + option, which allows you to enable many more features that aren't
49132 + covered in the basic security levels. These additional features
49133 + include TPE, socket restrictions, and the sysctl system for
49134 + grsecurity. It is advised that you read through the help for
49135 + each option to determine its usefulness in your situation.
49136 +
49137 +endchoice
49138 +
49139 +menu "Memory Protections"
49140 +depends on GRKERNSEC
49141 +
49142 +config GRKERNSEC_KMEM
49143 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49144 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49145 + help
49146 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49147 + be written to or read from to modify or leak the contents of the running
49148 + kernel. /dev/port will also not be allowed to be opened. If you have module
49149 + support disabled, enabling this will close up four ways that are
49150 + currently used to insert malicious code into the running kernel.
49151 + Even with all these features enabled, we still highly recommend that
49152 + you use the RBAC system, as it is still possible for an attacker to
49153 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49154 + If you are not using XFree86, you may be able to stop this additional
49155 + case by enabling the 'Disable privileged I/O' option. Though nothing
49156 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49157 + but only to video memory, which is the only writing we allow in this
49158 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49159 + not be allowed to mprotect it with PROT_WRITE later.
49160 + It is highly recommended that you say Y here if you meet all the
49161 + conditions above.
49162 +
49163 +config GRKERNSEC_VM86
49164 + bool "Restrict VM86 mode"
49165 + depends on X86_32
49166 +
49167 + help
49168 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49169 + make use of a special execution mode on 32bit x86 processors called
49170 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49171 + video cards and will still work with this option enabled. The purpose
49172 + of the option is to prevent exploitation of emulation errors in
49173 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49174 + Nearly all users should be able to enable this option.
49175 +
49176 +config GRKERNSEC_IO
49177 + bool "Disable privileged I/O"
49178 + depends on X86
49179 + select RTC_CLASS
49180 + select RTC_INTF_DEV
49181 + select RTC_DRV_CMOS
49182 +
49183 + help
49184 + If you say Y here, all ioperm and iopl calls will return an error.
49185 + Ioperm and iopl can be used to modify the running kernel.
49186 + Unfortunately, some programs need this access to operate properly,
49187 + the most notable of which are XFree86 and hwclock. hwclock can be
49188 + remedied by having RTC support in the kernel, so real-time
49189 + clock support is enabled if this option is enabled, to ensure
49190 + that hwclock operates correctly. XFree86 still will not
49191 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49192 + IF YOU USE XFree86. If you use XFree86 and you still want to
49193 + protect your kernel against modification, use the RBAC system.
49194 +
49195 +config GRKERNSEC_PROC_MEMMAP
49196 + bool "Harden ASLR against information leaks and entropy reduction"
49197 + default y if (PAX_NOEXEC || PAX_ASLR)
49198 + depends on PAX_NOEXEC || PAX_ASLR
49199 + help
49200 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49201 + give no information about the addresses of its mappings if
49202 + PaX features that rely on random addresses are enabled on the task.
49203 + In addition to sanitizing this information and disabling other
49204 + dangerous sources of information, this option causes reads of sensitive
49205 + /proc/<pid> entries where the file descriptor was opened in a different
49206 + task than the one performing the read. Such attempts are logged.
49207 + This option also limits argv/env strings for suid/sgid binaries
49208 + to 512KB to prevent a complete exhaustion of the stack entropy provided
49209 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49210 + binaries to prevent alternative mmap layouts from being abused.
49211 +
49212 + If you use PaX it is essential that you say Y here as it closes up
49213 + several holes that make full ASLR useless locally.
49214 +
49215 +config GRKERNSEC_BRUTE
49216 + bool "Deter exploit bruteforcing"
49217 + help
49218 + If you say Y here, attempts to bruteforce exploits against forking
49219 + daemons such as apache or sshd, as well as against suid/sgid binaries
49220 + will be deterred. When a child of a forking daemon is killed by PaX
49221 + or crashes due to an illegal instruction or other suspicious signal,
49222 + the parent process will be delayed 30 seconds upon every subsequent
49223 + fork until the administrator is able to assess the situation and
49224 + restart the daemon.
49225 + In the suid/sgid case, the attempt is logged, the user has all their
49226 + processes terminated, and they are prevented from executing any further
49227 + processes for 15 minutes.
49228 + It is recommended that you also enable signal logging in the auditing
49229 + section so that logs are generated when a process triggers a suspicious
49230 + signal.
49231 + If the sysctl option is enabled, a sysctl option with name
49232 + "deter_bruteforce" is created.
49233 +
49234 +
49235 +config GRKERNSEC_MODHARDEN
49236 + bool "Harden module auto-loading"
49237 + depends on MODULES
49238 + help
49239 + If you say Y here, module auto-loading in response to use of some
49240 + feature implemented by an unloaded module will be restricted to
49241 + root users. Enabling this option helps defend against attacks
49242 + by unprivileged users who abuse the auto-loading behavior to
49243 + cause a vulnerable module to load that is then exploited.
49244 +
49245 + If this option prevents a legitimate use of auto-loading for a
49246 + non-root user, the administrator can execute modprobe manually
49247 + with the exact name of the module mentioned in the alert log.
49248 + Alternatively, the administrator can add the module to the list
49249 + of modules loaded at boot by modifying init scripts.
49250 +
49251 + Modification of init scripts will most likely be needed on
49252 + Ubuntu servers with encrypted home directory support enabled,
49253 + as the first non-root user logging in will cause the ecb(aes),
49254 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49255 +
49256 +config GRKERNSEC_HIDESYM
49257 + bool "Hide kernel symbols"
49258 + help
49259 + If you say Y here, getting information on loaded modules, and
49260 + displaying all kernel symbols through a syscall will be restricted
49261 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49262 + /proc/kallsyms will be restricted to the root user. The RBAC
49263 + system can hide that entry even from root.
49264 +
49265 + This option also prevents leaking of kernel addresses through
49266 + several /proc entries.
49267 +
49268 + Note that this option is only effective provided the following
49269 + conditions are met:
49270 + 1) The kernel using grsecurity is not precompiled by some distribution
49271 + 2) You have also enabled GRKERNSEC_DMESG
49272 + 3) You are using the RBAC system and hiding other files such as your
49273 + kernel image and System.map. Alternatively, enabling this option
49274 + causes the permissions on /boot, /lib/modules, and the kernel
49275 + source directory to change at compile time to prevent
49276 + reading by non-root users.
49277 + If the above conditions are met, this option will aid in providing a
49278 + useful protection against local kernel exploitation of overflows
49279 + and arbitrary read/write vulnerabilities.
49280 +
49281 +config GRKERNSEC_KERN_LOCKOUT
49282 + bool "Active kernel exploit response"
49283 + depends on X86 || ARM || PPC || SPARC
49284 + help
49285 + If you say Y here, when a PaX alert is triggered due to suspicious
49286 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49287 + or an OOPs occurs due to bad memory accesses, instead of just
49288 + terminating the offending process (and potentially allowing
49289 + a subsequent exploit from the same user), we will take one of two
49290 + actions:
49291 + If the user was root, we will panic the system
49292 + If the user was non-root, we will log the attempt, terminate
49293 + all processes owned by the user, then prevent them from creating
49294 + any new processes until the system is restarted
49295 + This deters repeated kernel exploitation/bruteforcing attempts
49296 + and is useful for later forensics.
49297 +
49298 +endmenu
49299 +menu "Role Based Access Control Options"
49300 +depends on GRKERNSEC
49301 +
49302 +config GRKERNSEC_RBAC_DEBUG
49303 + bool
49304 +
49305 +config GRKERNSEC_NO_RBAC
49306 + bool "Disable RBAC system"
49307 + help
49308 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49309 + preventing the RBAC system from being enabled. You should only say Y
49310 + here if you have no intention of using the RBAC system, so as to prevent
49311 + an attacker with root access from misusing the RBAC system to hide files
49312 + and processes when loadable module support and /dev/[k]mem have been
49313 + locked down.
49314 +
49315 +config GRKERNSEC_ACL_HIDEKERN
49316 + bool "Hide kernel processes"
49317 + help
49318 + If you say Y here, all kernel threads will be hidden to all
49319 + processes but those whose subject has the "view hidden processes"
49320 + flag.
49321 +
49322 +config GRKERNSEC_ACL_MAXTRIES
49323 + int "Maximum tries before password lockout"
49324 + default 3
49325 + help
49326 + This option enforces the maximum number of times a user can attempt
49327 + to authorize themselves with the grsecurity RBAC system before being
49328 + denied the ability to attempt authorization again for a specified time.
49329 + The lower the number, the harder it will be to brute-force a password.
49330 +
49331 +config GRKERNSEC_ACL_TIMEOUT
49332 + int "Time to wait after max password tries, in seconds"
49333 + default 30
49334 + help
49335 + This option specifies the time the user must wait after attempting to
49336 + authorize to the RBAC system with the maximum number of invalid
49337 + passwords. The higher the number, the harder it will be to brute-force
49338 + a password.
49339 +
49340 +endmenu
49341 +menu "Filesystem Protections"
49342 +depends on GRKERNSEC
49343 +
49344 +config GRKERNSEC_PROC
49345 + bool "Proc restrictions"
49346 + help
49347 + If you say Y here, the permissions of the /proc filesystem
49348 + will be altered to enhance system security and privacy. You MUST
49349 + choose either a user only restriction or a user and group restriction.
49350 + Depending upon the option you choose, you can either restrict users to
49351 + see only the processes they themselves run, or choose a group that can
49352 + view all processes and files normally restricted to root if you choose
49353 + the "restrict to user only" option. NOTE: If you're running identd or
49354 + ntpd as a non-root user, you will have to run it as the group you
49355 + specify here.
49356 +
49357 +config GRKERNSEC_PROC_USER
49358 + bool "Restrict /proc to user only"
49359 + depends on GRKERNSEC_PROC
49360 + help
49361 + If you say Y here, non-root users will only be able to view their own
49362 + processes, and restricts them from viewing network-related information,
49363 + and viewing kernel symbol and module information.
49364 +
49365 +config GRKERNSEC_PROC_USERGROUP
49366 + bool "Allow special group"
49367 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49368 + help
49369 + If you say Y here, you will be able to select a group that will be
49370 + able to view all processes and network-related information. If you've
49371 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49372 + remain hidden. This option is useful if you want to run identd as
49373 + a non-root user.
49374 +
49375 +config GRKERNSEC_PROC_GID
49376 + int "GID for special group"
49377 + depends on GRKERNSEC_PROC_USERGROUP
49378 + default 1001
49379 +
49380 +config GRKERNSEC_PROC_ADD
49381 + bool "Additional restrictions"
49382 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49383 + help
49384 + If you say Y here, additional restrictions will be placed on
49385 + /proc that keep normal users from viewing device information and
49386 + slabinfo information that could be useful for exploits.
49387 +
49388 +config GRKERNSEC_LINK
49389 + bool "Linking restrictions"
49390 + help
49391 + If you say Y here, /tmp race exploits will be prevented, since users
49392 + will no longer be able to follow symlinks owned by other users in
49393 + world-writable +t directories (e.g. /tmp), unless the owner of the
49394 + symlink is the owner of the directory. users will also not be
49395 + able to hardlink to files they do not own. If the sysctl option is
49396 + enabled, a sysctl option with name "linking_restrictions" is created.
49397 +
49398 +config GRKERNSEC_FIFO
49399 + bool "FIFO restrictions"
49400 + help
49401 + If you say Y here, users will not be able to write to FIFOs they don't
49402 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49403 + the FIFO is the same owner of the directory it's held in. If the sysctl
49404 + option is enabled, a sysctl option with name "fifo_restrictions" is
49405 + created.
49406 +
49407 +config GRKERNSEC_SYSFS_RESTRICT
49408 + bool "Sysfs/debugfs restriction"
49409 + depends on SYSFS
49410 + help
49411 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49412 + any filesystem normally mounted under it (e.g. debugfs) will be
49413 + mostly accessible only by root. These filesystems generally provide access
49414 + to hardware and debug information that isn't appropriate for unprivileged
49415 + users of the system. Sysfs and debugfs have also become a large source
49416 + of new vulnerabilities, ranging from infoleaks to local compromise.
49417 + There has been very little oversight with an eye toward security involved
49418 + in adding new exporters of information to these filesystems, so their
49419 + use is discouraged.
49420 + For reasons of compatibility, a few directories have been whitelisted
49421 + for access by non-root users:
49422 + /sys/fs/selinux
49423 + /sys/fs/fuse
49424 + /sys/devices/system/cpu
49425 +
49426 +config GRKERNSEC_ROFS
49427 + bool "Runtime read-only mount protection"
49428 + help
49429 + If you say Y here, a sysctl option with name "romount_protect" will
49430 + be created. By setting this option to 1 at runtime, filesystems
49431 + will be protected in the following ways:
49432 + * No new writable mounts will be allowed
49433 + * Existing read-only mounts won't be able to be remounted read/write
49434 + * Write operations will be denied on all block devices
49435 + This option acts independently of grsec_lock: once it is set to 1,
49436 + it cannot be turned off. Therefore, please be mindful of the resulting
49437 + behavior if this option is enabled in an init script on a read-only
49438 + filesystem. This feature is mainly intended for secure embedded systems.
49439 +
49440 +config GRKERNSEC_CHROOT
49441 + bool "Chroot jail restrictions"
49442 + help
49443 + If you say Y here, you will be able to choose several options that will
49444 + make breaking out of a chrooted jail much more difficult. If you
49445 + encounter no software incompatibilities with the following options, it
49446 + is recommended that you enable each one.
49447 +
49448 +config GRKERNSEC_CHROOT_MOUNT
49449 + bool "Deny mounts"
49450 + depends on GRKERNSEC_CHROOT
49451 + help
49452 + If you say Y here, processes inside a chroot will not be able to
49453 + mount or remount filesystems. If the sysctl option is enabled, a
49454 + sysctl option with name "chroot_deny_mount" is created.
49455 +
49456 +config GRKERNSEC_CHROOT_DOUBLE
49457 + bool "Deny double-chroots"
49458 + depends on GRKERNSEC_CHROOT
49459 + help
49460 + If you say Y here, processes inside a chroot will not be able to chroot
49461 + again outside the chroot. This is a widely used method of breaking
49462 + out of a chroot jail and should not be allowed. If the sysctl
49463 + option is enabled, a sysctl option with name
49464 + "chroot_deny_chroot" is created.
49465 +
49466 +config GRKERNSEC_CHROOT_PIVOT
49467 + bool "Deny pivot_root in chroot"
49468 + depends on GRKERNSEC_CHROOT
49469 + help
49470 + If you say Y here, processes inside a chroot will not be able to use
49471 + a function called pivot_root() that was introduced in Linux 2.3.41. It
49472 + works similar to chroot in that it changes the root filesystem. This
49473 + function could be misused in a chrooted process to attempt to break out
49474 + of the chroot, and therefore should not be allowed. If the sysctl
49475 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
49476 + created.
49477 +
49478 +config GRKERNSEC_CHROOT_CHDIR
49479 + bool "Enforce chdir(\"/\") on all chroots"
49480 + depends on GRKERNSEC_CHROOT
49481 + help
49482 + If you say Y here, the current working directory of all newly-chrooted
49483 + applications will be set to the the root directory of the chroot.
49484 + The man page on chroot(2) states:
49485 + Note that this call does not change the current working
49486 + directory, so that `.' can be outside the tree rooted at
49487 + `/'. In particular, the super-user can escape from a
49488 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49489 +
49490 + It is recommended that you say Y here, since it's not known to break
49491 + any software. If the sysctl option is enabled, a sysctl option with
49492 + name "chroot_enforce_chdir" is created.
49493 +
49494 +config GRKERNSEC_CHROOT_CHMOD
49495 + bool "Deny (f)chmod +s"
49496 + depends on GRKERNSEC_CHROOT
49497 + help
49498 + If you say Y here, processes inside a chroot will not be able to chmod
49499 + or fchmod files to make them have suid or sgid bits. This protects
49500 + against another published method of breaking a chroot. If the sysctl
49501 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
49502 + created.
49503 +
49504 +config GRKERNSEC_CHROOT_FCHDIR
49505 + bool "Deny fchdir out of chroot"
49506 + depends on GRKERNSEC_CHROOT
49507 + help
49508 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
49509 + to a file descriptor of the chrooting process that points to a directory
49510 + outside the filesystem will be stopped. If the sysctl option
49511 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49512 +
49513 +config GRKERNSEC_CHROOT_MKNOD
49514 + bool "Deny mknod"
49515 + depends on GRKERNSEC_CHROOT
49516 + help
49517 + If you say Y here, processes inside a chroot will not be allowed to
49518 + mknod. The problem with using mknod inside a chroot is that it
49519 + would allow an attacker to create a device entry that is the same
49520 + as one on the physical root of your system, which could range from
49521 + anything from the console device to a device for your harddrive (which
49522 + they could then use to wipe the drive or steal data). It is recommended
49523 + that you say Y here, unless you run into software incompatibilities.
49524 + If the sysctl option is enabled, a sysctl option with name
49525 + "chroot_deny_mknod" is created.
49526 +
49527 +config GRKERNSEC_CHROOT_SHMAT
49528 + bool "Deny shmat() out of chroot"
49529 + depends on GRKERNSEC_CHROOT
49530 + help
49531 + If you say Y here, processes inside a chroot will not be able to attach
49532 + to shared memory segments that were created outside of the chroot jail.
49533 + It is recommended that you say Y here. If the sysctl option is enabled,
49534 + a sysctl option with name "chroot_deny_shmat" is created.
49535 +
49536 +config GRKERNSEC_CHROOT_UNIX
49537 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
49538 + depends on GRKERNSEC_CHROOT
49539 + help
49540 + If you say Y here, processes inside a chroot will not be able to
49541 + connect to abstract (meaning not belonging to a filesystem) Unix
49542 + domain sockets that were bound outside of a chroot. It is recommended
49543 + that you say Y here. If the sysctl option is enabled, a sysctl option
49544 + with name "chroot_deny_unix" is created.
49545 +
49546 +config GRKERNSEC_CHROOT_FINDTASK
49547 + bool "Protect outside processes"
49548 + depends on GRKERNSEC_CHROOT
49549 + help
49550 + If you say Y here, processes inside a chroot will not be able to
49551 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49552 + getsid, or view any process outside of the chroot. If the sysctl
49553 + option is enabled, a sysctl option with name "chroot_findtask" is
49554 + created.
49555 +
49556 +config GRKERNSEC_CHROOT_NICE
49557 + bool "Restrict priority changes"
49558 + depends on GRKERNSEC_CHROOT
49559 + help
49560 + If you say Y here, processes inside a chroot will not be able to raise
49561 + the priority of processes in the chroot, or alter the priority of
49562 + processes outside the chroot. This provides more security than simply
49563 + removing CAP_SYS_NICE from the process' capability set. If the
49564 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49565 + is created.
49566 +
49567 +config GRKERNSEC_CHROOT_SYSCTL
49568 + bool "Deny sysctl writes"
49569 + depends on GRKERNSEC_CHROOT
49570 + help
49571 + If you say Y here, an attacker in a chroot will not be able to
49572 + write to sysctl entries, either by sysctl(2) or through a /proc
49573 + interface. It is strongly recommended that you say Y here. If the
49574 + sysctl option is enabled, a sysctl option with name
49575 + "chroot_deny_sysctl" is created.
49576 +
49577 +config GRKERNSEC_CHROOT_CAPS
49578 + bool "Capability restrictions"
49579 + depends on GRKERNSEC_CHROOT
49580 + help
49581 + If you say Y here, the capabilities on all processes within a
49582 + chroot jail will be lowered to stop module insertion, raw i/o,
49583 + system and net admin tasks, rebooting the system, modifying immutable
49584 + files, modifying IPC owned by another, and changing the system time.
49585 + This is left an option because it can break some apps. Disable this
49586 + if your chrooted apps are having problems performing those kinds of
49587 + tasks. If the sysctl option is enabled, a sysctl option with
49588 + name "chroot_caps" is created.
49589 +
49590 +endmenu
49591 +menu "Kernel Auditing"
49592 +depends on GRKERNSEC
49593 +
49594 +config GRKERNSEC_AUDIT_GROUP
49595 + bool "Single group for auditing"
49596 + help
49597 + If you say Y here, the exec, chdir, and (un)mount logging features
49598 + will only operate on a group you specify. This option is recommended
49599 + if you only want to watch certain users instead of having a large
49600 + amount of logs from the entire system. If the sysctl option is enabled,
49601 + a sysctl option with name "audit_group" is created.
49602 +
49603 +config GRKERNSEC_AUDIT_GID
49604 + int "GID for auditing"
49605 + depends on GRKERNSEC_AUDIT_GROUP
49606 + default 1007
49607 +
49608 +config GRKERNSEC_EXECLOG
49609 + bool "Exec logging"
49610 + help
49611 + If you say Y here, all execve() calls will be logged (since the
49612 + other exec*() calls are frontends to execve(), all execution
49613 + will be logged). Useful for shell-servers that like to keep track
49614 + of their users. If the sysctl option is enabled, a sysctl option with
49615 + name "exec_logging" is created.
49616 + WARNING: This option when enabled will produce a LOT of logs, especially
49617 + on an active system.
49618 +
49619 +config GRKERNSEC_RESLOG
49620 + bool "Resource logging"
49621 + help
49622 + If you say Y here, all attempts to overstep resource limits will
49623 + be logged with the resource name, the requested size, and the current
49624 + limit. It is highly recommended that you say Y here. If the sysctl
49625 + option is enabled, a sysctl option with name "resource_logging" is
49626 + created. If the RBAC system is enabled, the sysctl value is ignored.
49627 +
49628 +config GRKERNSEC_CHROOT_EXECLOG
49629 + bool "Log execs within chroot"
49630 + help
49631 + If you say Y here, all executions inside a chroot jail will be logged
49632 + to syslog. This can cause a large amount of logs if certain
49633 + applications (eg. djb's daemontools) are installed on the system, and
49634 + is therefore left as an option. If the sysctl option is enabled, a
49635 + sysctl option with name "chroot_execlog" is created.
49636 +
49637 +config GRKERNSEC_AUDIT_PTRACE
49638 + bool "Ptrace logging"
49639 + help
49640 + If you say Y here, all attempts to attach to a process via ptrace
49641 + will be logged. If the sysctl option is enabled, a sysctl option
49642 + with name "audit_ptrace" is created.
49643 +
49644 +config GRKERNSEC_AUDIT_CHDIR
49645 + bool "Chdir logging"
49646 + help
49647 + If you say Y here, all chdir() calls will be logged. If the sysctl
49648 + option is enabled, a sysctl option with name "audit_chdir" is created.
49649 +
49650 +config GRKERNSEC_AUDIT_MOUNT
49651 + bool "(Un)Mount logging"
49652 + help
49653 + If you say Y here, all mounts and unmounts will be logged. If the
49654 + sysctl option is enabled, a sysctl option with name "audit_mount" is
49655 + created.
49656 +
49657 +config GRKERNSEC_SIGNAL
49658 + bool "Signal logging"
49659 + help
49660 + If you say Y here, certain important signals will be logged, such as
49661 + SIGSEGV, which will as a result inform you of when a error in a program
49662 + occurred, which in some cases could mean a possible exploit attempt.
49663 + If the sysctl option is enabled, a sysctl option with name
49664 + "signal_logging" is created.
49665 +
49666 +config GRKERNSEC_FORKFAIL
49667 + bool "Fork failure logging"
49668 + help
49669 + If you say Y here, all failed fork() attempts will be logged.
49670 + This could suggest a fork bomb, or someone attempting to overstep
49671 + their process limit. If the sysctl option is enabled, a sysctl option
49672 + with name "forkfail_logging" is created.
49673 +
49674 +config GRKERNSEC_TIME
49675 + bool "Time change logging"
49676 + help
49677 + If you say Y here, any changes of the system clock will be logged.
49678 + If the sysctl option is enabled, a sysctl option with name
49679 + "timechange_logging" is created.
49680 +
49681 +config GRKERNSEC_PROC_IPADDR
49682 + bool "/proc/<pid>/ipaddr support"
49683 + help
49684 + If you say Y here, a new entry will be added to each /proc/<pid>
49685 + directory that contains the IP address of the person using the task.
49686 + The IP is carried across local TCP and AF_UNIX stream sockets.
49687 + This information can be useful for IDS/IPSes to perform remote response
49688 + to a local attack. The entry is readable by only the owner of the
49689 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49690 + the RBAC system), and thus does not create privacy concerns.
49691 +
49692 +config GRKERNSEC_RWXMAP_LOG
49693 + bool 'Denied RWX mmap/mprotect logging'
49694 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49695 + help
49696 + If you say Y here, calls to mmap() and mprotect() with explicit
49697 + usage of PROT_WRITE and PROT_EXEC together will be logged when
49698 + denied by the PAX_MPROTECT feature. If the sysctl option is
49699 + enabled, a sysctl option with name "rwxmap_logging" is created.
49700 +
49701 +config GRKERNSEC_AUDIT_TEXTREL
49702 + bool 'ELF text relocations logging (READ HELP)'
49703 + depends on PAX_MPROTECT
49704 + help
49705 + If you say Y here, text relocations will be logged with the filename
49706 + of the offending library or binary. The purpose of the feature is
49707 + to help Linux distribution developers get rid of libraries and
49708 + binaries that need text relocations which hinder the future progress
49709 + of PaX. Only Linux distribution developers should say Y here, and
49710 + never on a production machine, as this option creates an information
49711 + leak that could aid an attacker in defeating the randomization of
49712 + a single memory region. If the sysctl option is enabled, a sysctl
49713 + option with name "audit_textrel" is created.
49714 +
49715 +endmenu
49716 +
49717 +menu "Executable Protections"
49718 +depends on GRKERNSEC
49719 +
49720 +config GRKERNSEC_DMESG
49721 + bool "Dmesg(8) restriction"
49722 + help
49723 + If you say Y here, non-root users will not be able to use dmesg(8)
49724 + to view up to the last 4kb of messages in the kernel's log buffer.
49725 + The kernel's log buffer often contains kernel addresses and other
49726 + identifying information useful to an attacker in fingerprinting a
49727 + system for a targeted exploit.
49728 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
49729 + created.
49730 +
49731 +config GRKERNSEC_HARDEN_PTRACE
49732 + bool "Deter ptrace-based process snooping"
49733 + help
49734 + If you say Y here, TTY sniffers and other malicious monitoring
49735 + programs implemented through ptrace will be defeated. If you
49736 + have been using the RBAC system, this option has already been
49737 + enabled for several years for all users, with the ability to make
49738 + fine-grained exceptions.
49739 +
49740 + This option only affects the ability of non-root users to ptrace
49741 + processes that are not a descendent of the ptracing process.
49742 + This means that strace ./binary and gdb ./binary will still work,
49743 + but attaching to arbitrary processes will not. If the sysctl
49744 + option is enabled, a sysctl option with name "harden_ptrace" is
49745 + created.
49746 +
49747 +config GRKERNSEC_PTRACE_READEXEC
49748 + bool "Require read access to ptrace sensitive binaries"
49749 + help
49750 + If you say Y here, unprivileged users will not be able to ptrace unreadable
49751 + binaries. This option is useful in environments that
49752 + remove the read bits (e.g. file mode 4711) from suid binaries to
49753 + prevent infoleaking of their contents. This option adds
49754 + consistency to the use of that file mode, as the binary could normally
49755 + be read out when run without privileges while ptracing.
49756 +
49757 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49758 + is created.
49759 +
49760 +config GRKERNSEC_SETXID
49761 + bool "Enforce consistent multithreaded privileges"
49762 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
49763 + help
49764 + If you say Y here, a change from a root uid to a non-root uid
49765 + in a multithreaded application will cause the resulting uids,
49766 + gids, supplementary groups, and capabilities in that thread
49767 + to be propagated to the other threads of the process. In most
49768 + cases this is unnecessary, as glibc will emulate this behavior
49769 + on behalf of the application. Other libcs do not act in the
49770 + same way, allowing the other threads of the process to continue
49771 + running with root privileges. If the sysctl option is enabled,
49772 + a sysctl option with name "consistent_setxid" is created.
49773 +
49774 +config GRKERNSEC_TPE
49775 + bool "Trusted Path Execution (TPE)"
49776 + help
49777 + If you say Y here, you will be able to choose a gid to add to the
49778 + supplementary groups of users you want to mark as "untrusted."
49779 + These users will not be able to execute any files that are not in
49780 + root-owned directories writable only by root. If the sysctl option
49781 + is enabled, a sysctl option with name "tpe" is created.
49782 +
49783 +config GRKERNSEC_TPE_ALL
49784 + bool "Partially restrict all non-root users"
49785 + depends on GRKERNSEC_TPE
49786 + help
49787 + If you say Y here, all non-root users will be covered under
49788 + a weaker TPE restriction. This is separate from, and in addition to,
49789 + the main TPE options that you have selected elsewhere. Thus, if a
49790 + "trusted" GID is chosen, this restriction applies to even that GID.
49791 + Under this restriction, all non-root users will only be allowed to
49792 + execute files in directories they own that are not group or
49793 + world-writable, or in directories owned by root and writable only by
49794 + root. If the sysctl option is enabled, a sysctl option with name
49795 + "tpe_restrict_all" is created.
49796 +
49797 +config GRKERNSEC_TPE_INVERT
49798 + bool "Invert GID option"
49799 + depends on GRKERNSEC_TPE
49800 + help
49801 + If you say Y here, the group you specify in the TPE configuration will
49802 + decide what group TPE restrictions will be *disabled* for. This
49803 + option is useful if you want TPE restrictions to be applied to most
49804 + users on the system. If the sysctl option is enabled, a sysctl option
49805 + with name "tpe_invert" is created. Unlike other sysctl options, this
49806 + entry will default to on for backward-compatibility.
49807 +
49808 +config GRKERNSEC_TPE_GID
49809 + int "GID for untrusted users"
49810 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49811 + default 1005
49812 + help
49813 + Setting this GID determines what group TPE restrictions will be
49814 + *enabled* for. If the sysctl option is enabled, a sysctl option
49815 + with name "tpe_gid" is created.
49816 +
49817 +config GRKERNSEC_TPE_GID
49818 + int "GID for trusted users"
49819 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49820 + default 1005
49821 + help
49822 + Setting this GID determines what group TPE restrictions will be
49823 + *disabled* for. If the sysctl option is enabled, a sysctl option
49824 + with name "tpe_gid" is created.
49825 +
49826 +endmenu
49827 +menu "Network Protections"
49828 +depends on GRKERNSEC
49829 +
49830 +config GRKERNSEC_RANDNET
49831 + bool "Larger entropy pools"
49832 + help
49833 + If you say Y here, the entropy pools used for many features of Linux
49834 + and grsecurity will be doubled in size. Since several grsecurity
49835 + features use additional randomness, it is recommended that you say Y
49836 + here. Saying Y here has a similar effect as modifying
49837 + /proc/sys/kernel/random/poolsize.
49838 +
49839 +config GRKERNSEC_BLACKHOLE
49840 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49841 + depends on NET
49842 + help
49843 + If you say Y here, neither TCP resets nor ICMP
49844 + destination-unreachable packets will be sent in response to packets
49845 + sent to ports for which no associated listening process exists.
49846 + This feature supports both IPV4 and IPV6 and exempts the
49847 + loopback interface from blackholing. Enabling this feature
49848 + makes a host more resilient to DoS attacks and reduces network
49849 + visibility against scanners.
49850 +
49851 + The blackhole feature as-implemented is equivalent to the FreeBSD
49852 + blackhole feature, as it prevents RST responses to all packets, not
49853 + just SYNs. Under most application behavior this causes no
49854 + problems, but applications (like haproxy) may not close certain
49855 + connections in a way that cleanly terminates them on the remote
49856 + end, leaving the remote host in LAST_ACK state. Because of this
49857 + side-effect and to prevent intentional LAST_ACK DoSes, this
49858 + feature also adds automatic mitigation against such attacks.
49859 + The mitigation drastically reduces the amount of time a socket
49860 + can spend in LAST_ACK state. If you're using haproxy and not
49861 + all servers it connects to have this option enabled, consider
49862 + disabling this feature on the haproxy host.
49863 +
49864 + If the sysctl option is enabled, two sysctl options with names
49865 + "ip_blackhole" and "lastack_retries" will be created.
49866 + While "ip_blackhole" takes the standard zero/non-zero on/off
49867 + toggle, "lastack_retries" uses the same kinds of values as
49868 + "tcp_retries1" and "tcp_retries2". The default value of 4
49869 + prevents a socket from lasting more than 45 seconds in LAST_ACK
49870 + state.
49871 +
49872 +config GRKERNSEC_SOCKET
49873 + bool "Socket restrictions"
49874 + depends on NET
49875 + help
49876 + If you say Y here, you will be able to choose from several options.
49877 + If you assign a GID on your system and add it to the supplementary
49878 + groups of users you want to restrict socket access to, this patch
49879 + will perform up to three things, based on the option(s) you choose.
49880 +
49881 +config GRKERNSEC_SOCKET_ALL
49882 + bool "Deny any sockets to group"
49883 + depends on GRKERNSEC_SOCKET
49884 + help
49885 + If you say Y here, you will be able to choose a GID of whose users will
49886 + be unable to connect to other hosts from your machine or run server
49887 + applications from your machine. If the sysctl option is enabled, a
49888 + sysctl option with name "socket_all" is created.
49889 +
49890 +config GRKERNSEC_SOCKET_ALL_GID
49891 + int "GID to deny all sockets for"
49892 + depends on GRKERNSEC_SOCKET_ALL
49893 + default 1004
49894 + help
49895 + Here you can choose the GID to disable socket access for. Remember to
49896 + add the users you want socket access disabled for to the GID
49897 + specified here. If the sysctl option is enabled, a sysctl option
49898 + with name "socket_all_gid" is created.
49899 +
49900 +config GRKERNSEC_SOCKET_CLIENT
49901 + bool "Deny client sockets to group"
49902 + depends on GRKERNSEC_SOCKET
49903 + help
49904 + If you say Y here, you will be able to choose a GID of whose users will
49905 + be unable to connect to other hosts from your machine, but will be
49906 + able to run servers. If this option is enabled, all users in the group
49907 + you specify will have to use passive mode when initiating ftp transfers
49908 + from the shell on your machine. If the sysctl option is enabled, a
49909 + sysctl option with name "socket_client" is created.
49910 +
49911 +config GRKERNSEC_SOCKET_CLIENT_GID
49912 + int "GID to deny client sockets for"
49913 + depends on GRKERNSEC_SOCKET_CLIENT
49914 + default 1003
49915 + help
49916 + Here you can choose the GID to disable client socket access for.
49917 + Remember to add the users you want client socket access disabled for to
49918 + the GID specified here. If the sysctl option is enabled, a sysctl
49919 + option with name "socket_client_gid" is created.
49920 +
49921 +config GRKERNSEC_SOCKET_SERVER
49922 + bool "Deny server sockets to group"
49923 + depends on GRKERNSEC_SOCKET
49924 + help
49925 + If you say Y here, you will be able to choose a GID of whose users will
49926 + be unable to run server applications from your machine. If the sysctl
49927 + option is enabled, a sysctl option with name "socket_server" is created.
49928 +
49929 +config GRKERNSEC_SOCKET_SERVER_GID
49930 + int "GID to deny server sockets for"
49931 + depends on GRKERNSEC_SOCKET_SERVER
49932 + default 1002
49933 + help
49934 + Here you can choose the GID to disable server socket access for.
49935 + Remember to add the users you want server socket access disabled for to
49936 + the GID specified here. If the sysctl option is enabled, a sysctl
49937 + option with name "socket_server_gid" is created.
49938 +
49939 +endmenu
49940 +menu "Sysctl support"
49941 +depends on GRKERNSEC && SYSCTL
49942 +
49943 +config GRKERNSEC_SYSCTL
49944 + bool "Sysctl support"
49945 + help
49946 + If you say Y here, you will be able to change the options that
49947 + grsecurity runs with at bootup, without having to recompile your
49948 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
49949 + to enable (1) or disable (0) various features. All the sysctl entries
49950 + are mutable until the "grsec_lock" entry is set to a non-zero value.
49951 + All features enabled in the kernel configuration are disabled at boot
49952 + if you do not say Y to the "Turn on features by default" option.
49953 + All options should be set at startup, and the grsec_lock entry should
49954 + be set to a non-zero value after all the options are set.
49955 + *THIS IS EXTREMELY IMPORTANT*
49956 +
49957 +config GRKERNSEC_SYSCTL_DISTRO
49958 + bool "Extra sysctl support for distro makers (READ HELP)"
49959 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
49960 + help
49961 + If you say Y here, additional sysctl options will be created
49962 + for features that affect processes running as root. Therefore,
49963 + it is critical when using this option that the grsec_lock entry be
49964 + enabled after boot. Only distros with prebuilt kernel packages
49965 + with this option enabled that can ensure grsec_lock is enabled
49966 + after boot should use this option.
49967 + *Failure to set grsec_lock after boot makes all grsec features
49968 + this option covers useless*
49969 +
49970 + Currently this option creates the following sysctl entries:
49971 + "Disable Privileged I/O": "disable_priv_io"
49972 +
49973 +config GRKERNSEC_SYSCTL_ON
49974 + bool "Turn on features by default"
49975 + depends on GRKERNSEC_SYSCTL
49976 + help
49977 + If you say Y here, instead of having all features enabled in the
49978 + kernel configuration disabled at boot time, the features will be
49979 + enabled at boot time. It is recommended you say Y here unless
49980 + there is some reason you would want all sysctl-tunable features to
49981 + be disabled by default. As mentioned elsewhere, it is important
49982 + to enable the grsec_lock entry once you have finished modifying
49983 + the sysctl entries.
49984 +
49985 +endmenu
49986 +menu "Logging Options"
49987 +depends on GRKERNSEC
49988 +
49989 +config GRKERNSEC_FLOODTIME
49990 + int "Seconds in between log messages (minimum)"
49991 + default 10
49992 + help
49993 + This option allows you to enforce the number of seconds between
49994 + grsecurity log messages. The default should be suitable for most
49995 + people, however, if you choose to change it, choose a value small enough
49996 + to allow informative logs to be produced, but large enough to
49997 + prevent flooding.
49998 +
49999 +config GRKERNSEC_FLOODBURST
50000 + int "Number of messages in a burst (maximum)"
50001 + default 6
50002 + help
50003 + This option allows you to choose the maximum number of messages allowed
50004 + within the flood time interval you chose in a separate option. The
50005 + default should be suitable for most people, however if you find that
50006 + many of your logs are being interpreted as flooding, you may want to
50007 + raise this value.
50008 +
50009 +endmenu
50010 +
50011 +endmenu
50012 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50013 new file mode 100644
50014 index 0000000..1b9afa9
50015 --- /dev/null
50016 +++ b/grsecurity/Makefile
50017 @@ -0,0 +1,38 @@
50018 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50019 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50020 +# into an RBAC system
50021 +#
50022 +# All code in this directory and various hooks inserted throughout the kernel
50023 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50024 +# under the GPL v2 or higher
50025 +
50026 +KBUILD_CFLAGS += -Werror
50027 +
50028 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50029 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50030 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50031 +
50032 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50033 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50034 + gracl_learn.o grsec_log.o
50035 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50036 +
50037 +ifdef CONFIG_NET
50038 +obj-y += grsec_sock.o
50039 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50040 +endif
50041 +
50042 +ifndef CONFIG_GRKERNSEC
50043 +obj-y += grsec_disabled.o
50044 +endif
50045 +
50046 +ifdef CONFIG_GRKERNSEC_HIDESYM
50047 +extra-y := grsec_hidesym.o
50048 +$(obj)/grsec_hidesym.o:
50049 + @-chmod -f 500 /boot
50050 + @-chmod -f 500 /lib/modules
50051 + @-chmod -f 500 /lib64/modules
50052 + @-chmod -f 500 /lib32/modules
50053 + @-chmod -f 700 .
50054 + @echo ' grsec: protected kernel image paths'
50055 +endif
50056 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50057 new file mode 100644
50058 index 0000000..00b6c54
50059 --- /dev/null
50060 +++ b/grsecurity/gracl.c
50061 @@ -0,0 +1,4012 @@
50062 +#include <linux/kernel.h>
50063 +#include <linux/module.h>
50064 +#include <linux/sched.h>
50065 +#include <linux/mm.h>
50066 +#include <linux/file.h>
50067 +#include <linux/fs.h>
50068 +#include <linux/namei.h>
50069 +#include <linux/mount.h>
50070 +#include <linux/tty.h>
50071 +#include <linux/proc_fs.h>
50072 +#include <linux/lglock.h>
50073 +#include <linux/slab.h>
50074 +#include <linux/vmalloc.h>
50075 +#include <linux/types.h>
50076 +#include <linux/sysctl.h>
50077 +#include <linux/netdevice.h>
50078 +#include <linux/ptrace.h>
50079 +#include <linux/gracl.h>
50080 +#include <linux/gralloc.h>
50081 +#include <linux/security.h>
50082 +#include <linux/grinternal.h>
50083 +#include <linux/pid_namespace.h>
50084 +#include <linux/fdtable.h>
50085 +#include <linux/percpu.h>
50086 +#include "../fs/mount.h"
50087 +
50088 +#include <asm/uaccess.h>
50089 +#include <asm/errno.h>
50090 +#include <asm/mman.h>
50091 +
50092 +static struct acl_role_db acl_role_set;
50093 +static struct name_db name_set;
50094 +static struct inodev_db inodev_set;
50095 +
50096 +/* for keeping track of userspace pointers used for subjects, so we
50097 + can share references in the kernel as well
50098 +*/
50099 +
50100 +static struct path real_root;
50101 +
50102 +static struct acl_subj_map_db subj_map_set;
50103 +
50104 +static struct acl_role_label *default_role;
50105 +
50106 +static struct acl_role_label *role_list;
50107 +
50108 +static u16 acl_sp_role_value;
50109 +
50110 +extern char *gr_shared_page[4];
50111 +static DEFINE_MUTEX(gr_dev_mutex);
50112 +DEFINE_RWLOCK(gr_inode_lock);
50113 +
50114 +struct gr_arg *gr_usermode;
50115 +
50116 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50117 +
50118 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50119 +extern void gr_clear_learn_entries(void);
50120 +
50121 +#ifdef CONFIG_GRKERNSEC_RESLOG
50122 +extern void gr_log_resource(const struct task_struct *task,
50123 + const int res, const unsigned long wanted, const int gt);
50124 +#endif
50125 +
50126 +unsigned char *gr_system_salt;
50127 +unsigned char *gr_system_sum;
50128 +
50129 +static struct sprole_pw **acl_special_roles = NULL;
50130 +static __u16 num_sprole_pws = 0;
50131 +
50132 +static struct acl_role_label *kernel_role = NULL;
50133 +
50134 +static unsigned int gr_auth_attempts = 0;
50135 +static unsigned long gr_auth_expires = 0UL;
50136 +
50137 +#ifdef CONFIG_NET
50138 +extern struct vfsmount *sock_mnt;
50139 +#endif
50140 +
50141 +extern struct vfsmount *pipe_mnt;
50142 +extern struct vfsmount *shm_mnt;
50143 +#ifdef CONFIG_HUGETLBFS
50144 +extern struct vfsmount *hugetlbfs_vfsmount;
50145 +#endif
50146 +
50147 +static struct acl_object_label *fakefs_obj_rw;
50148 +static struct acl_object_label *fakefs_obj_rwx;
50149 +
50150 +extern int gr_init_uidset(void);
50151 +extern void gr_free_uidset(void);
50152 +extern void gr_remove_uid(uid_t uid);
50153 +extern int gr_find_uid(uid_t uid);
50154 +
50155 +DECLARE_BRLOCK(vfsmount_lock);
50156 +
50157 +__inline__ int
50158 +gr_acl_is_enabled(void)
50159 +{
50160 + return (gr_status & GR_READY);
50161 +}
50162 +
50163 +#ifdef CONFIG_BTRFS_FS
50164 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50165 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50166 +#endif
50167 +
50168 +static inline dev_t __get_dev(const struct dentry *dentry)
50169 +{
50170 +#ifdef CONFIG_BTRFS_FS
50171 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50172 + return get_btrfs_dev_from_inode(dentry->d_inode);
50173 + else
50174 +#endif
50175 + return dentry->d_inode->i_sb->s_dev;
50176 +}
50177 +
50178 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50179 +{
50180 + return __get_dev(dentry);
50181 +}
50182 +
50183 +static char gr_task_roletype_to_char(struct task_struct *task)
50184 +{
50185 + switch (task->role->roletype &
50186 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50187 + GR_ROLE_SPECIAL)) {
50188 + case GR_ROLE_DEFAULT:
50189 + return 'D';
50190 + case GR_ROLE_USER:
50191 + return 'U';
50192 + case GR_ROLE_GROUP:
50193 + return 'G';
50194 + case GR_ROLE_SPECIAL:
50195 + return 'S';
50196 + }
50197 +
50198 + return 'X';
50199 +}
50200 +
50201 +char gr_roletype_to_char(void)
50202 +{
50203 + return gr_task_roletype_to_char(current);
50204 +}
50205 +
50206 +__inline__ int
50207 +gr_acl_tpe_check(void)
50208 +{
50209 + if (unlikely(!(gr_status & GR_READY)))
50210 + return 0;
50211 + if (current->role->roletype & GR_ROLE_TPE)
50212 + return 1;
50213 + else
50214 + return 0;
50215 +}
50216 +
50217 +int
50218 +gr_handle_rawio(const struct inode *inode)
50219 +{
50220 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50221 + if (inode && S_ISBLK(inode->i_mode) &&
50222 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50223 + !capable(CAP_SYS_RAWIO))
50224 + return 1;
50225 +#endif
50226 + return 0;
50227 +}
50228 +
50229 +static int
50230 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50231 +{
50232 + if (likely(lena != lenb))
50233 + return 0;
50234 +
50235 + return !memcmp(a, b, lena);
50236 +}
50237 +
50238 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50239 +{
50240 + *buflen -= namelen;
50241 + if (*buflen < 0)
50242 + return -ENAMETOOLONG;
50243 + *buffer -= namelen;
50244 + memcpy(*buffer, str, namelen);
50245 + return 0;
50246 +}
50247 +
50248 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50249 +{
50250 + return prepend(buffer, buflen, name->name, name->len);
50251 +}
50252 +
50253 +static int prepend_path(const struct path *path, struct path *root,
50254 + char **buffer, int *buflen)
50255 +{
50256 + struct dentry *dentry = path->dentry;
50257 + struct vfsmount *vfsmnt = path->mnt;
50258 + struct mount *mnt = real_mount(vfsmnt);
50259 + bool slash = false;
50260 + int error = 0;
50261 +
50262 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50263 + struct dentry * parent;
50264 +
50265 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50266 + /* Global root? */
50267 + if (!mnt_has_parent(mnt)) {
50268 + goto out;
50269 + }
50270 + dentry = mnt->mnt_mountpoint;
50271 + mnt = mnt->mnt_parent;
50272 + vfsmnt = &mnt->mnt;
50273 + continue;
50274 + }
50275 + parent = dentry->d_parent;
50276 + prefetch(parent);
50277 + spin_lock(&dentry->d_lock);
50278 + error = prepend_name(buffer, buflen, &dentry->d_name);
50279 + spin_unlock(&dentry->d_lock);
50280 + if (!error)
50281 + error = prepend(buffer, buflen, "/", 1);
50282 + if (error)
50283 + break;
50284 +
50285 + slash = true;
50286 + dentry = parent;
50287 + }
50288 +
50289 +out:
50290 + if (!error && !slash)
50291 + error = prepend(buffer, buflen, "/", 1);
50292 +
50293 + return error;
50294 +}
50295 +
50296 +/* this must be called with vfsmount_lock and rename_lock held */
50297 +
50298 +static char *__our_d_path(const struct path *path, struct path *root,
50299 + char *buf, int buflen)
50300 +{
50301 + char *res = buf + buflen;
50302 + int error;
50303 +
50304 + prepend(&res, &buflen, "\0", 1);
50305 + error = prepend_path(path, root, &res, &buflen);
50306 + if (error)
50307 + return ERR_PTR(error);
50308 +
50309 + return res;
50310 +}
50311 +
50312 +static char *
50313 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50314 +{
50315 + char *retval;
50316 +
50317 + retval = __our_d_path(path, root, buf, buflen);
50318 + if (unlikely(IS_ERR(retval)))
50319 + retval = strcpy(buf, "<path too long>");
50320 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50321 + retval[1] = '\0';
50322 +
50323 + return retval;
50324 +}
50325 +
50326 +static char *
50327 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50328 + char *buf, int buflen)
50329 +{
50330 + struct path path;
50331 + char *res;
50332 +
50333 + path.dentry = (struct dentry *)dentry;
50334 + path.mnt = (struct vfsmount *)vfsmnt;
50335 +
50336 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50337 + by the RBAC system */
50338 + res = gen_full_path(&path, &real_root, buf, buflen);
50339 +
50340 + return res;
50341 +}
50342 +
50343 +static char *
50344 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50345 + char *buf, int buflen)
50346 +{
50347 + char *res;
50348 + struct path path;
50349 + struct path root;
50350 + struct task_struct *reaper = init_pid_ns.child_reaper;
50351 +
50352 + path.dentry = (struct dentry *)dentry;
50353 + path.mnt = (struct vfsmount *)vfsmnt;
50354 +
50355 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50356 + get_fs_root(reaper->fs, &root);
50357 +
50358 + write_seqlock(&rename_lock);
50359 + br_read_lock(vfsmount_lock);
50360 + res = gen_full_path(&path, &root, buf, buflen);
50361 + br_read_unlock(vfsmount_lock);
50362 + write_sequnlock(&rename_lock);
50363 +
50364 + path_put(&root);
50365 + return res;
50366 +}
50367 +
50368 +static char *
50369 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50370 +{
50371 + char *ret;
50372 + write_seqlock(&rename_lock);
50373 + br_read_lock(vfsmount_lock);
50374 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50375 + PAGE_SIZE);
50376 + br_read_unlock(vfsmount_lock);
50377 + write_sequnlock(&rename_lock);
50378 + return ret;
50379 +}
50380 +
50381 +static char *
50382 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50383 +{
50384 + char *ret;
50385 + char *buf;
50386 + int buflen;
50387 +
50388 + write_seqlock(&rename_lock);
50389 + br_read_lock(vfsmount_lock);
50390 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50391 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50392 + buflen = (int)(ret - buf);
50393 + if (buflen >= 5)
50394 + prepend(&ret, &buflen, "/proc", 5);
50395 + else
50396 + ret = strcpy(buf, "<path too long>");
50397 + br_read_unlock(vfsmount_lock);
50398 + write_sequnlock(&rename_lock);
50399 + return ret;
50400 +}
50401 +
50402 +char *
50403 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50404 +{
50405 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50406 + PAGE_SIZE);
50407 +}
50408 +
50409 +char *
50410 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50411 +{
50412 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50413 + PAGE_SIZE);
50414 +}
50415 +
50416 +char *
50417 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50418 +{
50419 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50420 + PAGE_SIZE);
50421 +}
50422 +
50423 +char *
50424 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50425 +{
50426 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50427 + PAGE_SIZE);
50428 +}
50429 +
50430 +char *
50431 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50432 +{
50433 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50434 + PAGE_SIZE);
50435 +}
50436 +
50437 +__inline__ __u32
50438 +to_gr_audit(const __u32 reqmode)
50439 +{
50440 + /* masks off auditable permission flags, then shifts them to create
50441 + auditing flags, and adds the special case of append auditing if
50442 + we're requesting write */
50443 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50444 +}
50445 +
50446 +struct acl_subject_label *
50447 +lookup_subject_map(const struct acl_subject_label *userp)
50448 +{
50449 + unsigned int index = shash(userp, subj_map_set.s_size);
50450 + struct subject_map *match;
50451 +
50452 + match = subj_map_set.s_hash[index];
50453 +
50454 + while (match && match->user != userp)
50455 + match = match->next;
50456 +
50457 + if (match != NULL)
50458 + return match->kernel;
50459 + else
50460 + return NULL;
50461 +}
50462 +
50463 +static void
50464 +insert_subj_map_entry(struct subject_map *subjmap)
50465 +{
50466 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50467 + struct subject_map **curr;
50468 +
50469 + subjmap->prev = NULL;
50470 +
50471 + curr = &subj_map_set.s_hash[index];
50472 + if (*curr != NULL)
50473 + (*curr)->prev = subjmap;
50474 +
50475 + subjmap->next = *curr;
50476 + *curr = subjmap;
50477 +
50478 + return;
50479 +}
50480 +
50481 +static struct acl_role_label *
50482 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50483 + const gid_t gid)
50484 +{
50485 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50486 + struct acl_role_label *match;
50487 + struct role_allowed_ip *ipp;
50488 + unsigned int x;
50489 + u32 curr_ip = task->signal->curr_ip;
50490 +
50491 + task->signal->saved_ip = curr_ip;
50492 +
50493 + match = acl_role_set.r_hash[index];
50494 +
50495 + while (match) {
50496 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50497 + for (x = 0; x < match->domain_child_num; x++) {
50498 + if (match->domain_children[x] == uid)
50499 + goto found;
50500 + }
50501 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50502 + break;
50503 + match = match->next;
50504 + }
50505 +found:
50506 + if (match == NULL) {
50507 + try_group:
50508 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50509 + match = acl_role_set.r_hash[index];
50510 +
50511 + while (match) {
50512 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50513 + for (x = 0; x < match->domain_child_num; x++) {
50514 + if (match->domain_children[x] == gid)
50515 + goto found2;
50516 + }
50517 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50518 + break;
50519 + match = match->next;
50520 + }
50521 +found2:
50522 + if (match == NULL)
50523 + match = default_role;
50524 + if (match->allowed_ips == NULL)
50525 + return match;
50526 + else {
50527 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50528 + if (likely
50529 + ((ntohl(curr_ip) & ipp->netmask) ==
50530 + (ntohl(ipp->addr) & ipp->netmask)))
50531 + return match;
50532 + }
50533 + match = default_role;
50534 + }
50535 + } else if (match->allowed_ips == NULL) {
50536 + return match;
50537 + } else {
50538 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50539 + if (likely
50540 + ((ntohl(curr_ip) & ipp->netmask) ==
50541 + (ntohl(ipp->addr) & ipp->netmask)))
50542 + return match;
50543 + }
50544 + goto try_group;
50545 + }
50546 +
50547 + return match;
50548 +}
50549 +
50550 +struct acl_subject_label *
50551 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50552 + const struct acl_role_label *role)
50553 +{
50554 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50555 + struct acl_subject_label *match;
50556 +
50557 + match = role->subj_hash[index];
50558 +
50559 + while (match && (match->inode != ino || match->device != dev ||
50560 + (match->mode & GR_DELETED))) {
50561 + match = match->next;
50562 + }
50563 +
50564 + if (match && !(match->mode & GR_DELETED))
50565 + return match;
50566 + else
50567 + return NULL;
50568 +}
50569 +
50570 +struct acl_subject_label *
50571 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50572 + const struct acl_role_label *role)
50573 +{
50574 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50575 + struct acl_subject_label *match;
50576 +
50577 + match = role->subj_hash[index];
50578 +
50579 + while (match && (match->inode != ino || match->device != dev ||
50580 + !(match->mode & GR_DELETED))) {
50581 + match = match->next;
50582 + }
50583 +
50584 + if (match && (match->mode & GR_DELETED))
50585 + return match;
50586 + else
50587 + return NULL;
50588 +}
50589 +
50590 +static struct acl_object_label *
50591 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50592 + const struct acl_subject_label *subj)
50593 +{
50594 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50595 + struct acl_object_label *match;
50596 +
50597 + match = subj->obj_hash[index];
50598 +
50599 + while (match && (match->inode != ino || match->device != dev ||
50600 + (match->mode & GR_DELETED))) {
50601 + match = match->next;
50602 + }
50603 +
50604 + if (match && !(match->mode & GR_DELETED))
50605 + return match;
50606 + else
50607 + return NULL;
50608 +}
50609 +
50610 +static struct acl_object_label *
50611 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50612 + const struct acl_subject_label *subj)
50613 +{
50614 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50615 + struct acl_object_label *match;
50616 +
50617 + match = subj->obj_hash[index];
50618 +
50619 + while (match && (match->inode != ino || match->device != dev ||
50620 + !(match->mode & GR_DELETED))) {
50621 + match = match->next;
50622 + }
50623 +
50624 + if (match && (match->mode & GR_DELETED))
50625 + return match;
50626 +
50627 + match = subj->obj_hash[index];
50628 +
50629 + while (match && (match->inode != ino || match->device != dev ||
50630 + (match->mode & GR_DELETED))) {
50631 + match = match->next;
50632 + }
50633 +
50634 + if (match && !(match->mode & GR_DELETED))
50635 + return match;
50636 + else
50637 + return NULL;
50638 +}
50639 +
50640 +static struct name_entry *
50641 +lookup_name_entry(const char *name)
50642 +{
50643 + unsigned int len = strlen(name);
50644 + unsigned int key = full_name_hash(name, len);
50645 + unsigned int index = key % name_set.n_size;
50646 + struct name_entry *match;
50647 +
50648 + match = name_set.n_hash[index];
50649 +
50650 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50651 + match = match->next;
50652 +
50653 + return match;
50654 +}
50655 +
50656 +static struct name_entry *
50657 +lookup_name_entry_create(const char *name)
50658 +{
50659 + unsigned int len = strlen(name);
50660 + unsigned int key = full_name_hash(name, len);
50661 + unsigned int index = key % name_set.n_size;
50662 + struct name_entry *match;
50663 +
50664 + match = name_set.n_hash[index];
50665 +
50666 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50667 + !match->deleted))
50668 + match = match->next;
50669 +
50670 + if (match && match->deleted)
50671 + return match;
50672 +
50673 + match = name_set.n_hash[index];
50674 +
50675 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50676 + match->deleted))
50677 + match = match->next;
50678 +
50679 + if (match && !match->deleted)
50680 + return match;
50681 + else
50682 + return NULL;
50683 +}
50684 +
50685 +static struct inodev_entry *
50686 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
50687 +{
50688 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
50689 + struct inodev_entry *match;
50690 +
50691 + match = inodev_set.i_hash[index];
50692 +
50693 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50694 + match = match->next;
50695 +
50696 + return match;
50697 +}
50698 +
50699 +static void
50700 +insert_inodev_entry(struct inodev_entry *entry)
50701 +{
50702 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50703 + inodev_set.i_size);
50704 + struct inodev_entry **curr;
50705 +
50706 + entry->prev = NULL;
50707 +
50708 + curr = &inodev_set.i_hash[index];
50709 + if (*curr != NULL)
50710 + (*curr)->prev = entry;
50711 +
50712 + entry->next = *curr;
50713 + *curr = entry;
50714 +
50715 + return;
50716 +}
50717 +
50718 +static void
50719 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50720 +{
50721 + unsigned int index =
50722 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50723 + struct acl_role_label **curr;
50724 + struct acl_role_label *tmp, *tmp2;
50725 +
50726 + curr = &acl_role_set.r_hash[index];
50727 +
50728 + /* simple case, slot is empty, just set it to our role */
50729 + if (*curr == NULL) {
50730 + *curr = role;
50731 + } else {
50732 + /* example:
50733 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
50734 + 2 -> 3
50735 + */
50736 + /* first check to see if we can already be reached via this slot */
50737 + tmp = *curr;
50738 + while (tmp && tmp != role)
50739 + tmp = tmp->next;
50740 + if (tmp == role) {
50741 + /* we don't need to add ourselves to this slot's chain */
50742 + return;
50743 + }
50744 + /* we need to add ourselves to this chain, two cases */
50745 + if (role->next == NULL) {
50746 + /* simple case, append the current chain to our role */
50747 + role->next = *curr;
50748 + *curr = role;
50749 + } else {
50750 + /* 1 -> 2 -> 3 -> 4
50751 + 2 -> 3 -> 4
50752 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
50753 + */
50754 + /* trickier case: walk our role's chain until we find
50755 + the role for the start of the current slot's chain */
50756 + tmp = role;
50757 + tmp2 = *curr;
50758 + while (tmp->next && tmp->next != tmp2)
50759 + tmp = tmp->next;
50760 + if (tmp->next == tmp2) {
50761 + /* from example above, we found 3, so just
50762 + replace this slot's chain with ours */
50763 + *curr = role;
50764 + } else {
50765 + /* we didn't find a subset of our role's chain
50766 + in the current slot's chain, so append their
50767 + chain to ours, and set us as the first role in
50768 + the slot's chain
50769 +
50770 + we could fold this case with the case above,
50771 + but making it explicit for clarity
50772 + */
50773 + tmp->next = tmp2;
50774 + *curr = role;
50775 + }
50776 + }
50777 + }
50778 +
50779 + return;
50780 +}
50781 +
50782 +static void
50783 +insert_acl_role_label(struct acl_role_label *role)
50784 +{
50785 + int i;
50786 +
50787 + if (role_list == NULL) {
50788 + role_list = role;
50789 + role->prev = NULL;
50790 + } else {
50791 + role->prev = role_list;
50792 + role_list = role;
50793 + }
50794 +
50795 + /* used for hash chains */
50796 + role->next = NULL;
50797 +
50798 + if (role->roletype & GR_ROLE_DOMAIN) {
50799 + for (i = 0; i < role->domain_child_num; i++)
50800 + __insert_acl_role_label(role, role->domain_children[i]);
50801 + } else
50802 + __insert_acl_role_label(role, role->uidgid);
50803 +}
50804 +
50805 +static int
50806 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50807 +{
50808 + struct name_entry **curr, *nentry;
50809 + struct inodev_entry *ientry;
50810 + unsigned int len = strlen(name);
50811 + unsigned int key = full_name_hash(name, len);
50812 + unsigned int index = key % name_set.n_size;
50813 +
50814 + curr = &name_set.n_hash[index];
50815 +
50816 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50817 + curr = &((*curr)->next);
50818 +
50819 + if (*curr != NULL)
50820 + return 1;
50821 +
50822 + nentry = acl_alloc(sizeof (struct name_entry));
50823 + if (nentry == NULL)
50824 + return 0;
50825 + ientry = acl_alloc(sizeof (struct inodev_entry));
50826 + if (ientry == NULL)
50827 + return 0;
50828 + ientry->nentry = nentry;
50829 +
50830 + nentry->key = key;
50831 + nentry->name = name;
50832 + nentry->inode = inode;
50833 + nentry->device = device;
50834 + nentry->len = len;
50835 + nentry->deleted = deleted;
50836 +
50837 + nentry->prev = NULL;
50838 + curr = &name_set.n_hash[index];
50839 + if (*curr != NULL)
50840 + (*curr)->prev = nentry;
50841 + nentry->next = *curr;
50842 + *curr = nentry;
50843 +
50844 + /* insert us into the table searchable by inode/dev */
50845 + insert_inodev_entry(ientry);
50846 +
50847 + return 1;
50848 +}
50849 +
50850 +static void
50851 +insert_acl_obj_label(struct acl_object_label *obj,
50852 + struct acl_subject_label *subj)
50853 +{
50854 + unsigned int index =
50855 + fhash(obj->inode, obj->device, subj->obj_hash_size);
50856 + struct acl_object_label **curr;
50857 +
50858 +
50859 + obj->prev = NULL;
50860 +
50861 + curr = &subj->obj_hash[index];
50862 + if (*curr != NULL)
50863 + (*curr)->prev = obj;
50864 +
50865 + obj->next = *curr;
50866 + *curr = obj;
50867 +
50868 + return;
50869 +}
50870 +
50871 +static void
50872 +insert_acl_subj_label(struct acl_subject_label *obj,
50873 + struct acl_role_label *role)
50874 +{
50875 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
50876 + struct acl_subject_label **curr;
50877 +
50878 + obj->prev = NULL;
50879 +
50880 + curr = &role->subj_hash[index];
50881 + if (*curr != NULL)
50882 + (*curr)->prev = obj;
50883 +
50884 + obj->next = *curr;
50885 + *curr = obj;
50886 +
50887 + return;
50888 +}
50889 +
50890 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
50891 +
50892 +static void *
50893 +create_table(__u32 * len, int elementsize)
50894 +{
50895 + unsigned int table_sizes[] = {
50896 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
50897 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
50898 + 4194301, 8388593, 16777213, 33554393, 67108859
50899 + };
50900 + void *newtable = NULL;
50901 + unsigned int pwr = 0;
50902 +
50903 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
50904 + table_sizes[pwr] <= *len)
50905 + pwr++;
50906 +
50907 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
50908 + return newtable;
50909 +
50910 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
50911 + newtable =
50912 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
50913 + else
50914 + newtable = vmalloc(table_sizes[pwr] * elementsize);
50915 +
50916 + *len = table_sizes[pwr];
50917 +
50918 + return newtable;
50919 +}
50920 +
50921 +static int
50922 +init_variables(const struct gr_arg *arg)
50923 +{
50924 + struct task_struct *reaper = init_pid_ns.child_reaper;
50925 + unsigned int stacksize;
50926 +
50927 + subj_map_set.s_size = arg->role_db.num_subjects;
50928 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
50929 + name_set.n_size = arg->role_db.num_objects;
50930 + inodev_set.i_size = arg->role_db.num_objects;
50931 +
50932 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
50933 + !name_set.n_size || !inodev_set.i_size)
50934 + return 1;
50935 +
50936 + if (!gr_init_uidset())
50937 + return 1;
50938 +
50939 + /* set up the stack that holds allocation info */
50940 +
50941 + stacksize = arg->role_db.num_pointers + 5;
50942 +
50943 + if (!acl_alloc_stack_init(stacksize))
50944 + return 1;
50945 +
50946 + /* grab reference for the real root dentry and vfsmount */
50947 + get_fs_root(reaper->fs, &real_root);
50948 +
50949 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50950 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
50951 +#endif
50952 +
50953 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
50954 + if (fakefs_obj_rw == NULL)
50955 + return 1;
50956 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
50957 +
50958 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
50959 + if (fakefs_obj_rwx == NULL)
50960 + return 1;
50961 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
50962 +
50963 + subj_map_set.s_hash =
50964 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
50965 + acl_role_set.r_hash =
50966 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
50967 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
50968 + inodev_set.i_hash =
50969 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
50970 +
50971 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
50972 + !name_set.n_hash || !inodev_set.i_hash)
50973 + return 1;
50974 +
50975 + memset(subj_map_set.s_hash, 0,
50976 + sizeof(struct subject_map *) * subj_map_set.s_size);
50977 + memset(acl_role_set.r_hash, 0,
50978 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
50979 + memset(name_set.n_hash, 0,
50980 + sizeof (struct name_entry *) * name_set.n_size);
50981 + memset(inodev_set.i_hash, 0,
50982 + sizeof (struct inodev_entry *) * inodev_set.i_size);
50983 +
50984 + return 0;
50985 +}
50986 +
50987 +/* free information not needed after startup
50988 + currently contains user->kernel pointer mappings for subjects
50989 +*/
50990 +
50991 +static void
50992 +free_init_variables(void)
50993 +{
50994 + __u32 i;
50995 +
50996 + if (subj_map_set.s_hash) {
50997 + for (i = 0; i < subj_map_set.s_size; i++) {
50998 + if (subj_map_set.s_hash[i]) {
50999 + kfree(subj_map_set.s_hash[i]);
51000 + subj_map_set.s_hash[i] = NULL;
51001 + }
51002 + }
51003 +
51004 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51005 + PAGE_SIZE)
51006 + kfree(subj_map_set.s_hash);
51007 + else
51008 + vfree(subj_map_set.s_hash);
51009 + }
51010 +
51011 + return;
51012 +}
51013 +
51014 +static void
51015 +free_variables(void)
51016 +{
51017 + struct acl_subject_label *s;
51018 + struct acl_role_label *r;
51019 + struct task_struct *task, *task2;
51020 + unsigned int x;
51021 +
51022 + gr_clear_learn_entries();
51023 +
51024 + read_lock(&tasklist_lock);
51025 + do_each_thread(task2, task) {
51026 + task->acl_sp_role = 0;
51027 + task->acl_role_id = 0;
51028 + task->acl = NULL;
51029 + task->role = NULL;
51030 + } while_each_thread(task2, task);
51031 + read_unlock(&tasklist_lock);
51032 +
51033 + /* release the reference to the real root dentry and vfsmount */
51034 + path_put(&real_root);
51035 + memset(&real_root, 0, sizeof(real_root));
51036 +
51037 + /* free all object hash tables */
51038 +
51039 + FOR_EACH_ROLE_START(r)
51040 + if (r->subj_hash == NULL)
51041 + goto next_role;
51042 + FOR_EACH_SUBJECT_START(r, s, x)
51043 + if (s->obj_hash == NULL)
51044 + break;
51045 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51046 + kfree(s->obj_hash);
51047 + else
51048 + vfree(s->obj_hash);
51049 + FOR_EACH_SUBJECT_END(s, x)
51050 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51051 + if (s->obj_hash == NULL)
51052 + break;
51053 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51054 + kfree(s->obj_hash);
51055 + else
51056 + vfree(s->obj_hash);
51057 + FOR_EACH_NESTED_SUBJECT_END(s)
51058 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51059 + kfree(r->subj_hash);
51060 + else
51061 + vfree(r->subj_hash);
51062 + r->subj_hash = NULL;
51063 +next_role:
51064 + FOR_EACH_ROLE_END(r)
51065 +
51066 + acl_free_all();
51067 +
51068 + if (acl_role_set.r_hash) {
51069 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51070 + PAGE_SIZE)
51071 + kfree(acl_role_set.r_hash);
51072 + else
51073 + vfree(acl_role_set.r_hash);
51074 + }
51075 + if (name_set.n_hash) {
51076 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51077 + PAGE_SIZE)
51078 + kfree(name_set.n_hash);
51079 + else
51080 + vfree(name_set.n_hash);
51081 + }
51082 +
51083 + if (inodev_set.i_hash) {
51084 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51085 + PAGE_SIZE)
51086 + kfree(inodev_set.i_hash);
51087 + else
51088 + vfree(inodev_set.i_hash);
51089 + }
51090 +
51091 + gr_free_uidset();
51092 +
51093 + memset(&name_set, 0, sizeof (struct name_db));
51094 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51095 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51096 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51097 +
51098 + default_role = NULL;
51099 + kernel_role = NULL;
51100 + role_list = NULL;
51101 +
51102 + return;
51103 +}
51104 +
51105 +static __u32
51106 +count_user_objs(struct acl_object_label *userp)
51107 +{
51108 + struct acl_object_label o_tmp;
51109 + __u32 num = 0;
51110 +
51111 + while (userp) {
51112 + if (copy_from_user(&o_tmp, userp,
51113 + sizeof (struct acl_object_label)))
51114 + break;
51115 +
51116 + userp = o_tmp.prev;
51117 + num++;
51118 + }
51119 +
51120 + return num;
51121 +}
51122 +
51123 +static struct acl_subject_label *
51124 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51125 +
51126 +static int
51127 +copy_user_glob(struct acl_object_label *obj)
51128 +{
51129 + struct acl_object_label *g_tmp, **guser;
51130 + unsigned int len;
51131 + char *tmp;
51132 +
51133 + if (obj->globbed == NULL)
51134 + return 0;
51135 +
51136 + guser = &obj->globbed;
51137 + while (*guser) {
51138 + g_tmp = (struct acl_object_label *)
51139 + acl_alloc(sizeof (struct acl_object_label));
51140 + if (g_tmp == NULL)
51141 + return -ENOMEM;
51142 +
51143 + if (copy_from_user(g_tmp, *guser,
51144 + sizeof (struct acl_object_label)))
51145 + return -EFAULT;
51146 +
51147 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51148 +
51149 + if (!len || len >= PATH_MAX)
51150 + return -EINVAL;
51151 +
51152 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51153 + return -ENOMEM;
51154 +
51155 + if (copy_from_user(tmp, g_tmp->filename, len))
51156 + return -EFAULT;
51157 + tmp[len-1] = '\0';
51158 + g_tmp->filename = tmp;
51159 +
51160 + *guser = g_tmp;
51161 + guser = &(g_tmp->next);
51162 + }
51163 +
51164 + return 0;
51165 +}
51166 +
51167 +static int
51168 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51169 + struct acl_role_label *role)
51170 +{
51171 + struct acl_object_label *o_tmp;
51172 + unsigned int len;
51173 + int ret;
51174 + char *tmp;
51175 +
51176 + while (userp) {
51177 + if ((o_tmp = (struct acl_object_label *)
51178 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51179 + return -ENOMEM;
51180 +
51181 + if (copy_from_user(o_tmp, userp,
51182 + sizeof (struct acl_object_label)))
51183 + return -EFAULT;
51184 +
51185 + userp = o_tmp->prev;
51186 +
51187 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51188 +
51189 + if (!len || len >= PATH_MAX)
51190 + return -EINVAL;
51191 +
51192 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51193 + return -ENOMEM;
51194 +
51195 + if (copy_from_user(tmp, o_tmp->filename, len))
51196 + return -EFAULT;
51197 + tmp[len-1] = '\0';
51198 + o_tmp->filename = tmp;
51199 +
51200 + insert_acl_obj_label(o_tmp, subj);
51201 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51202 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51203 + return -ENOMEM;
51204 +
51205 + ret = copy_user_glob(o_tmp);
51206 + if (ret)
51207 + return ret;
51208 +
51209 + if (o_tmp->nested) {
51210 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51211 + if (IS_ERR(o_tmp->nested))
51212 + return PTR_ERR(o_tmp->nested);
51213 +
51214 + /* insert into nested subject list */
51215 + o_tmp->nested->next = role->hash->first;
51216 + role->hash->first = o_tmp->nested;
51217 + }
51218 + }
51219 +
51220 + return 0;
51221 +}
51222 +
51223 +static __u32
51224 +count_user_subjs(struct acl_subject_label *userp)
51225 +{
51226 + struct acl_subject_label s_tmp;
51227 + __u32 num = 0;
51228 +
51229 + while (userp) {
51230 + if (copy_from_user(&s_tmp, userp,
51231 + sizeof (struct acl_subject_label)))
51232 + break;
51233 +
51234 + userp = s_tmp.prev;
51235 + /* do not count nested subjects against this count, since
51236 + they are not included in the hash table, but are
51237 + attached to objects. We have already counted
51238 + the subjects in userspace for the allocation
51239 + stack
51240 + */
51241 + if (!(s_tmp.mode & GR_NESTED))
51242 + num++;
51243 + }
51244 +
51245 + return num;
51246 +}
51247 +
51248 +static int
51249 +copy_user_allowedips(struct acl_role_label *rolep)
51250 +{
51251 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51252 +
51253 + ruserip = rolep->allowed_ips;
51254 +
51255 + while (ruserip) {
51256 + rlast = rtmp;
51257 +
51258 + if ((rtmp = (struct role_allowed_ip *)
51259 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51260 + return -ENOMEM;
51261 +
51262 + if (copy_from_user(rtmp, ruserip,
51263 + sizeof (struct role_allowed_ip)))
51264 + return -EFAULT;
51265 +
51266 + ruserip = rtmp->prev;
51267 +
51268 + if (!rlast) {
51269 + rtmp->prev = NULL;
51270 + rolep->allowed_ips = rtmp;
51271 + } else {
51272 + rlast->next = rtmp;
51273 + rtmp->prev = rlast;
51274 + }
51275 +
51276 + if (!ruserip)
51277 + rtmp->next = NULL;
51278 + }
51279 +
51280 + return 0;
51281 +}
51282 +
51283 +static int
51284 +copy_user_transitions(struct acl_role_label *rolep)
51285 +{
51286 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51287 +
51288 + unsigned int len;
51289 + char *tmp;
51290 +
51291 + rusertp = rolep->transitions;
51292 +
51293 + while (rusertp) {
51294 + rlast = rtmp;
51295 +
51296 + if ((rtmp = (struct role_transition *)
51297 + acl_alloc(sizeof (struct role_transition))) == NULL)
51298 + return -ENOMEM;
51299 +
51300 + if (copy_from_user(rtmp, rusertp,
51301 + sizeof (struct role_transition)))
51302 + return -EFAULT;
51303 +
51304 + rusertp = rtmp->prev;
51305 +
51306 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51307 +
51308 + if (!len || len >= GR_SPROLE_LEN)
51309 + return -EINVAL;
51310 +
51311 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51312 + return -ENOMEM;
51313 +
51314 + if (copy_from_user(tmp, rtmp->rolename, len))
51315 + return -EFAULT;
51316 + tmp[len-1] = '\0';
51317 + rtmp->rolename = tmp;
51318 +
51319 + if (!rlast) {
51320 + rtmp->prev = NULL;
51321 + rolep->transitions = rtmp;
51322 + } else {
51323 + rlast->next = rtmp;
51324 + rtmp->prev = rlast;
51325 + }
51326 +
51327 + if (!rusertp)
51328 + rtmp->next = NULL;
51329 + }
51330 +
51331 + return 0;
51332 +}
51333 +
51334 +static struct acl_subject_label *
51335 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51336 +{
51337 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51338 + unsigned int len;
51339 + char *tmp;
51340 + __u32 num_objs;
51341 + struct acl_ip_label **i_tmp, *i_utmp2;
51342 + struct gr_hash_struct ghash;
51343 + struct subject_map *subjmap;
51344 + unsigned int i_num;
51345 + int err;
51346 +
51347 + s_tmp = lookup_subject_map(userp);
51348 +
51349 + /* we've already copied this subject into the kernel, just return
51350 + the reference to it, and don't copy it over again
51351 + */
51352 + if (s_tmp)
51353 + return(s_tmp);
51354 +
51355 + if ((s_tmp = (struct acl_subject_label *)
51356 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51357 + return ERR_PTR(-ENOMEM);
51358 +
51359 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51360 + if (subjmap == NULL)
51361 + return ERR_PTR(-ENOMEM);
51362 +
51363 + subjmap->user = userp;
51364 + subjmap->kernel = s_tmp;
51365 + insert_subj_map_entry(subjmap);
51366 +
51367 + if (copy_from_user(s_tmp, userp,
51368 + sizeof (struct acl_subject_label)))
51369 + return ERR_PTR(-EFAULT);
51370 +
51371 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51372 +
51373 + if (!len || len >= PATH_MAX)
51374 + return ERR_PTR(-EINVAL);
51375 +
51376 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51377 + return ERR_PTR(-ENOMEM);
51378 +
51379 + if (copy_from_user(tmp, s_tmp->filename, len))
51380 + return ERR_PTR(-EFAULT);
51381 + tmp[len-1] = '\0';
51382 + s_tmp->filename = tmp;
51383 +
51384 + if (!strcmp(s_tmp->filename, "/"))
51385 + role->root_label = s_tmp;
51386 +
51387 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51388 + return ERR_PTR(-EFAULT);
51389 +
51390 + /* copy user and group transition tables */
51391 +
51392 + if (s_tmp->user_trans_num) {
51393 + uid_t *uidlist;
51394 +
51395 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51396 + if (uidlist == NULL)
51397 + return ERR_PTR(-ENOMEM);
51398 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51399 + return ERR_PTR(-EFAULT);
51400 +
51401 + s_tmp->user_transitions = uidlist;
51402 + }
51403 +
51404 + if (s_tmp->group_trans_num) {
51405 + gid_t *gidlist;
51406 +
51407 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51408 + if (gidlist == NULL)
51409 + return ERR_PTR(-ENOMEM);
51410 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51411 + return ERR_PTR(-EFAULT);
51412 +
51413 + s_tmp->group_transitions = gidlist;
51414 + }
51415 +
51416 + /* set up object hash table */
51417 + num_objs = count_user_objs(ghash.first);
51418 +
51419 + s_tmp->obj_hash_size = num_objs;
51420 + s_tmp->obj_hash =
51421 + (struct acl_object_label **)
51422 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51423 +
51424 + if (!s_tmp->obj_hash)
51425 + return ERR_PTR(-ENOMEM);
51426 +
51427 + memset(s_tmp->obj_hash, 0,
51428 + s_tmp->obj_hash_size *
51429 + sizeof (struct acl_object_label *));
51430 +
51431 + /* add in objects */
51432 + err = copy_user_objs(ghash.first, s_tmp, role);
51433 +
51434 + if (err)
51435 + return ERR_PTR(err);
51436 +
51437 + /* set pointer for parent subject */
51438 + if (s_tmp->parent_subject) {
51439 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51440 +
51441 + if (IS_ERR(s_tmp2))
51442 + return s_tmp2;
51443 +
51444 + s_tmp->parent_subject = s_tmp2;
51445 + }
51446 +
51447 + /* add in ip acls */
51448 +
51449 + if (!s_tmp->ip_num) {
51450 + s_tmp->ips = NULL;
51451 + goto insert;
51452 + }
51453 +
51454 + i_tmp =
51455 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51456 + sizeof (struct acl_ip_label *));
51457 +
51458 + if (!i_tmp)
51459 + return ERR_PTR(-ENOMEM);
51460 +
51461 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51462 + *(i_tmp + i_num) =
51463 + (struct acl_ip_label *)
51464 + acl_alloc(sizeof (struct acl_ip_label));
51465 + if (!*(i_tmp + i_num))
51466 + return ERR_PTR(-ENOMEM);
51467 +
51468 + if (copy_from_user
51469 + (&i_utmp2, s_tmp->ips + i_num,
51470 + sizeof (struct acl_ip_label *)))
51471 + return ERR_PTR(-EFAULT);
51472 +
51473 + if (copy_from_user
51474 + (*(i_tmp + i_num), i_utmp2,
51475 + sizeof (struct acl_ip_label)))
51476 + return ERR_PTR(-EFAULT);
51477 +
51478 + if ((*(i_tmp + i_num))->iface == NULL)
51479 + continue;
51480 +
51481 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51482 + if (!len || len >= IFNAMSIZ)
51483 + return ERR_PTR(-EINVAL);
51484 + tmp = acl_alloc(len);
51485 + if (tmp == NULL)
51486 + return ERR_PTR(-ENOMEM);
51487 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51488 + return ERR_PTR(-EFAULT);
51489 + (*(i_tmp + i_num))->iface = tmp;
51490 + }
51491 +
51492 + s_tmp->ips = i_tmp;
51493 +
51494 +insert:
51495 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51496 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51497 + return ERR_PTR(-ENOMEM);
51498 +
51499 + return s_tmp;
51500 +}
51501 +
51502 +static int
51503 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51504 +{
51505 + struct acl_subject_label s_pre;
51506 + struct acl_subject_label * ret;
51507 + int err;
51508 +
51509 + while (userp) {
51510 + if (copy_from_user(&s_pre, userp,
51511 + sizeof (struct acl_subject_label)))
51512 + return -EFAULT;
51513 +
51514 + /* do not add nested subjects here, add
51515 + while parsing objects
51516 + */
51517 +
51518 + if (s_pre.mode & GR_NESTED) {
51519 + userp = s_pre.prev;
51520 + continue;
51521 + }
51522 +
51523 + ret = do_copy_user_subj(userp, role);
51524 +
51525 + err = PTR_ERR(ret);
51526 + if (IS_ERR(ret))
51527 + return err;
51528 +
51529 + insert_acl_subj_label(ret, role);
51530 +
51531 + userp = s_pre.prev;
51532 + }
51533 +
51534 + return 0;
51535 +}
51536 +
51537 +static int
51538 +copy_user_acl(struct gr_arg *arg)
51539 +{
51540 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51541 + struct sprole_pw *sptmp;
51542 + struct gr_hash_struct *ghash;
51543 + uid_t *domainlist;
51544 + unsigned int r_num;
51545 + unsigned int len;
51546 + char *tmp;
51547 + int err = 0;
51548 + __u16 i;
51549 + __u32 num_subjs;
51550 +
51551 + /* we need a default and kernel role */
51552 + if (arg->role_db.num_roles < 2)
51553 + return -EINVAL;
51554 +
51555 + /* copy special role authentication info from userspace */
51556 +
51557 + num_sprole_pws = arg->num_sprole_pws;
51558 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51559 +
51560 + if (!acl_special_roles && num_sprole_pws)
51561 + return -ENOMEM;
51562 +
51563 + for (i = 0; i < num_sprole_pws; i++) {
51564 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51565 + if (!sptmp)
51566 + return -ENOMEM;
51567 + if (copy_from_user(sptmp, arg->sprole_pws + i,
51568 + sizeof (struct sprole_pw)))
51569 + return -EFAULT;
51570 +
51571 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51572 +
51573 + if (!len || len >= GR_SPROLE_LEN)
51574 + return -EINVAL;
51575 +
51576 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51577 + return -ENOMEM;
51578 +
51579 + if (copy_from_user(tmp, sptmp->rolename, len))
51580 + return -EFAULT;
51581 +
51582 + tmp[len-1] = '\0';
51583 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51584 + printk(KERN_ALERT "Copying special role %s\n", tmp);
51585 +#endif
51586 + sptmp->rolename = tmp;
51587 + acl_special_roles[i] = sptmp;
51588 + }
51589 +
51590 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51591 +
51592 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51593 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
51594 +
51595 + if (!r_tmp)
51596 + return -ENOMEM;
51597 +
51598 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
51599 + sizeof (struct acl_role_label *)))
51600 + return -EFAULT;
51601 +
51602 + if (copy_from_user(r_tmp, r_utmp2,
51603 + sizeof (struct acl_role_label)))
51604 + return -EFAULT;
51605 +
51606 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51607 +
51608 + if (!len || len >= PATH_MAX)
51609 + return -EINVAL;
51610 +
51611 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51612 + return -ENOMEM;
51613 +
51614 + if (copy_from_user(tmp, r_tmp->rolename, len))
51615 + return -EFAULT;
51616 +
51617 + tmp[len-1] = '\0';
51618 + r_tmp->rolename = tmp;
51619 +
51620 + if (!strcmp(r_tmp->rolename, "default")
51621 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51622 + default_role = r_tmp;
51623 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51624 + kernel_role = r_tmp;
51625 + }
51626 +
51627 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
51628 + return -ENOMEM;
51629 +
51630 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
51631 + return -EFAULT;
51632 +
51633 + r_tmp->hash = ghash;
51634 +
51635 + num_subjs = count_user_subjs(r_tmp->hash->first);
51636 +
51637 + r_tmp->subj_hash_size = num_subjs;
51638 + r_tmp->subj_hash =
51639 + (struct acl_subject_label **)
51640 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51641 +
51642 + if (!r_tmp->subj_hash)
51643 + return -ENOMEM;
51644 +
51645 + err = copy_user_allowedips(r_tmp);
51646 + if (err)
51647 + return err;
51648 +
51649 + /* copy domain info */
51650 + if (r_tmp->domain_children != NULL) {
51651 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51652 + if (domainlist == NULL)
51653 + return -ENOMEM;
51654 +
51655 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
51656 + return -EFAULT;
51657 +
51658 + r_tmp->domain_children = domainlist;
51659 + }
51660 +
51661 + err = copy_user_transitions(r_tmp);
51662 + if (err)
51663 + return err;
51664 +
51665 + memset(r_tmp->subj_hash, 0,
51666 + r_tmp->subj_hash_size *
51667 + sizeof (struct acl_subject_label *));
51668 +
51669 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51670 +
51671 + if (err)
51672 + return err;
51673 +
51674 + /* set nested subject list to null */
51675 + r_tmp->hash->first = NULL;
51676 +
51677 + insert_acl_role_label(r_tmp);
51678 + }
51679 +
51680 + if (default_role == NULL || kernel_role == NULL)
51681 + return -EINVAL;
51682 +
51683 + return err;
51684 +}
51685 +
51686 +static int
51687 +gracl_init(struct gr_arg *args)
51688 +{
51689 + int error = 0;
51690 +
51691 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51692 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51693 +
51694 + if (init_variables(args)) {
51695 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51696 + error = -ENOMEM;
51697 + free_variables();
51698 + goto out;
51699 + }
51700 +
51701 + error = copy_user_acl(args);
51702 + free_init_variables();
51703 + if (error) {
51704 + free_variables();
51705 + goto out;
51706 + }
51707 +
51708 + if ((error = gr_set_acls(0))) {
51709 + free_variables();
51710 + goto out;
51711 + }
51712 +
51713 + pax_open_kernel();
51714 + gr_status |= GR_READY;
51715 + pax_close_kernel();
51716 +
51717 + out:
51718 + return error;
51719 +}
51720 +
51721 +/* derived from glibc fnmatch() 0: match, 1: no match*/
51722 +
51723 +static int
51724 +glob_match(const char *p, const char *n)
51725 +{
51726 + char c;
51727 +
51728 + while ((c = *p++) != '\0') {
51729 + switch (c) {
51730 + case '?':
51731 + if (*n == '\0')
51732 + return 1;
51733 + else if (*n == '/')
51734 + return 1;
51735 + break;
51736 + case '\\':
51737 + if (*n != c)
51738 + return 1;
51739 + break;
51740 + case '*':
51741 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
51742 + if (*n == '/')
51743 + return 1;
51744 + else if (c == '?') {
51745 + if (*n == '\0')
51746 + return 1;
51747 + else
51748 + ++n;
51749 + }
51750 + }
51751 + if (c == '\0') {
51752 + return 0;
51753 + } else {
51754 + const char *endp;
51755 +
51756 + if ((endp = strchr(n, '/')) == NULL)
51757 + endp = n + strlen(n);
51758 +
51759 + if (c == '[') {
51760 + for (--p; n < endp; ++n)
51761 + if (!glob_match(p, n))
51762 + return 0;
51763 + } else if (c == '/') {
51764 + while (*n != '\0' && *n != '/')
51765 + ++n;
51766 + if (*n == '/' && !glob_match(p, n + 1))
51767 + return 0;
51768 + } else {
51769 + for (--p; n < endp; ++n)
51770 + if (*n == c && !glob_match(p, n))
51771 + return 0;
51772 + }
51773 +
51774 + return 1;
51775 + }
51776 + case '[':
51777 + {
51778 + int not;
51779 + char cold;
51780 +
51781 + if (*n == '\0' || *n == '/')
51782 + return 1;
51783 +
51784 + not = (*p == '!' || *p == '^');
51785 + if (not)
51786 + ++p;
51787 +
51788 + c = *p++;
51789 + for (;;) {
51790 + unsigned char fn = (unsigned char)*n;
51791 +
51792 + if (c == '\0')
51793 + return 1;
51794 + else {
51795 + if (c == fn)
51796 + goto matched;
51797 + cold = c;
51798 + c = *p++;
51799 +
51800 + if (c == '-' && *p != ']') {
51801 + unsigned char cend = *p++;
51802 +
51803 + if (cend == '\0')
51804 + return 1;
51805 +
51806 + if (cold <= fn && fn <= cend)
51807 + goto matched;
51808 +
51809 + c = *p++;
51810 + }
51811 + }
51812 +
51813 + if (c == ']')
51814 + break;
51815 + }
51816 + if (!not)
51817 + return 1;
51818 + break;
51819 + matched:
51820 + while (c != ']') {
51821 + if (c == '\0')
51822 + return 1;
51823 +
51824 + c = *p++;
51825 + }
51826 + if (not)
51827 + return 1;
51828 + }
51829 + break;
51830 + default:
51831 + if (c != *n)
51832 + return 1;
51833 + }
51834 +
51835 + ++n;
51836 + }
51837 +
51838 + if (*n == '\0')
51839 + return 0;
51840 +
51841 + if (*n == '/')
51842 + return 0;
51843 +
51844 + return 1;
51845 +}
51846 +
51847 +static struct acl_object_label *
51848 +chk_glob_label(struct acl_object_label *globbed,
51849 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51850 +{
51851 + struct acl_object_label *tmp;
51852 +
51853 + if (*path == NULL)
51854 + *path = gr_to_filename_nolock(dentry, mnt);
51855 +
51856 + tmp = globbed;
51857 +
51858 + while (tmp) {
51859 + if (!glob_match(tmp->filename, *path))
51860 + return tmp;
51861 + tmp = tmp->next;
51862 + }
51863 +
51864 + return NULL;
51865 +}
51866 +
51867 +static struct acl_object_label *
51868 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51869 + const ino_t curr_ino, const dev_t curr_dev,
51870 + const struct acl_subject_label *subj, char **path, const int checkglob)
51871 +{
51872 + struct acl_subject_label *tmpsubj;
51873 + struct acl_object_label *retval;
51874 + struct acl_object_label *retval2;
51875 +
51876 + tmpsubj = (struct acl_subject_label *) subj;
51877 + read_lock(&gr_inode_lock);
51878 + do {
51879 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
51880 + if (retval) {
51881 + if (checkglob && retval->globbed) {
51882 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
51883 + if (retval2)
51884 + retval = retval2;
51885 + }
51886 + break;
51887 + }
51888 + } while ((tmpsubj = tmpsubj->parent_subject));
51889 + read_unlock(&gr_inode_lock);
51890 +
51891 + return retval;
51892 +}
51893 +
51894 +static __inline__ struct acl_object_label *
51895 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51896 + struct dentry *curr_dentry,
51897 + const struct acl_subject_label *subj, char **path, const int checkglob)
51898 +{
51899 + int newglob = checkglob;
51900 + ino_t inode;
51901 + dev_t device;
51902 +
51903 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
51904 + as we don't want a / * rule to match instead of the / object
51905 + don't do this for create lookups that call this function though, since they're looking up
51906 + on the parent and thus need globbing checks on all paths
51907 + */
51908 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
51909 + newglob = GR_NO_GLOB;
51910 +
51911 + spin_lock(&curr_dentry->d_lock);
51912 + inode = curr_dentry->d_inode->i_ino;
51913 + device = __get_dev(curr_dentry);
51914 + spin_unlock(&curr_dentry->d_lock);
51915 +
51916 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
51917 +}
51918 +
51919 +static struct acl_object_label *
51920 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51921 + const struct acl_subject_label *subj, char *path, const int checkglob)
51922 +{
51923 + struct dentry *dentry = (struct dentry *) l_dentry;
51924 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51925 + struct mount *real_mnt = real_mount(mnt);
51926 + struct acl_object_label *retval;
51927 + struct dentry *parent;
51928 +
51929 + write_seqlock(&rename_lock);
51930 + br_read_lock(vfsmount_lock);
51931 +
51932 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
51933 +#ifdef CONFIG_NET
51934 + mnt == sock_mnt ||
51935 +#endif
51936 +#ifdef CONFIG_HUGETLBFS
51937 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
51938 +#endif
51939 + /* ignore Eric Biederman */
51940 + IS_PRIVATE(l_dentry->d_inode))) {
51941 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
51942 + goto out;
51943 + }
51944 +
51945 + for (;;) {
51946 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51947 + break;
51948 +
51949 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51950 + if (!mnt_has_parent(real_mnt))
51951 + break;
51952 +
51953 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51954 + if (retval != NULL)
51955 + goto out;
51956 +
51957 + dentry = real_mnt->mnt_mountpoint;
51958 + real_mnt = real_mnt->mnt_parent;
51959 + mnt = &real_mnt->mnt;
51960 + continue;
51961 + }
51962 +
51963 + parent = dentry->d_parent;
51964 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51965 + if (retval != NULL)
51966 + goto out;
51967 +
51968 + dentry = parent;
51969 + }
51970 +
51971 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51972 +
51973 + /* real_root is pinned so we don't have to hold a reference */
51974 + if (retval == NULL)
51975 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
51976 +out:
51977 + br_read_unlock(vfsmount_lock);
51978 + write_sequnlock(&rename_lock);
51979 +
51980 + BUG_ON(retval == NULL);
51981 +
51982 + return retval;
51983 +}
51984 +
51985 +static __inline__ struct acl_object_label *
51986 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51987 + const struct acl_subject_label *subj)
51988 +{
51989 + char *path = NULL;
51990 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
51991 +}
51992 +
51993 +static __inline__ struct acl_object_label *
51994 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51995 + const struct acl_subject_label *subj)
51996 +{
51997 + char *path = NULL;
51998 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
51999 +}
52000 +
52001 +static __inline__ struct acl_object_label *
52002 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52003 + const struct acl_subject_label *subj, char *path)
52004 +{
52005 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52006 +}
52007 +
52008 +static struct acl_subject_label *
52009 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52010 + const struct acl_role_label *role)
52011 +{
52012 + struct dentry *dentry = (struct dentry *) l_dentry;
52013 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52014 + struct mount *real_mnt = real_mount(mnt);
52015 + struct acl_subject_label *retval;
52016 + struct dentry *parent;
52017 +
52018 + write_seqlock(&rename_lock);
52019 + br_read_lock(vfsmount_lock);
52020 +
52021 + for (;;) {
52022 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52023 + break;
52024 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52025 + if (!mnt_has_parent(real_mnt))
52026 + break;
52027 +
52028 + spin_lock(&dentry->d_lock);
52029 + read_lock(&gr_inode_lock);
52030 + retval =
52031 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52032 + __get_dev(dentry), role);
52033 + read_unlock(&gr_inode_lock);
52034 + spin_unlock(&dentry->d_lock);
52035 + if (retval != NULL)
52036 + goto out;
52037 +
52038 + dentry = real_mnt->mnt_mountpoint;
52039 + real_mnt = real_mnt->mnt_parent;
52040 + mnt = &real_mnt->mnt;
52041 + continue;
52042 + }
52043 +
52044 + spin_lock(&dentry->d_lock);
52045 + read_lock(&gr_inode_lock);
52046 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52047 + __get_dev(dentry), role);
52048 + read_unlock(&gr_inode_lock);
52049 + parent = dentry->d_parent;
52050 + spin_unlock(&dentry->d_lock);
52051 +
52052 + if (retval != NULL)
52053 + goto out;
52054 +
52055 + dentry = parent;
52056 + }
52057 +
52058 + spin_lock(&dentry->d_lock);
52059 + read_lock(&gr_inode_lock);
52060 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52061 + __get_dev(dentry), role);
52062 + read_unlock(&gr_inode_lock);
52063 + spin_unlock(&dentry->d_lock);
52064 +
52065 + if (unlikely(retval == NULL)) {
52066 + /* real_root is pinned, we don't need to hold a reference */
52067 + read_lock(&gr_inode_lock);
52068 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52069 + __get_dev(real_root.dentry), role);
52070 + read_unlock(&gr_inode_lock);
52071 + }
52072 +out:
52073 + br_read_unlock(vfsmount_lock);
52074 + write_sequnlock(&rename_lock);
52075 +
52076 + BUG_ON(retval == NULL);
52077 +
52078 + return retval;
52079 +}
52080 +
52081 +static void
52082 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52083 +{
52084 + struct task_struct *task = current;
52085 + const struct cred *cred = current_cred();
52086 +
52087 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52088 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52089 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52090 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52091 +
52092 + return;
52093 +}
52094 +
52095 +static void
52096 +gr_log_learn_id_change(const char type, const unsigned int real,
52097 + const unsigned int effective, const unsigned int fs)
52098 +{
52099 + struct task_struct *task = current;
52100 + const struct cred *cred = current_cred();
52101 +
52102 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52103 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52104 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52105 + type, real, effective, fs, &task->signal->saved_ip);
52106 +
52107 + return;
52108 +}
52109 +
52110 +__u32
52111 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52112 + const struct vfsmount * mnt)
52113 +{
52114 + __u32 retval = mode;
52115 + struct acl_subject_label *curracl;
52116 + struct acl_object_label *currobj;
52117 +
52118 + if (unlikely(!(gr_status & GR_READY)))
52119 + return (mode & ~GR_AUDITS);
52120 +
52121 + curracl = current->acl;
52122 +
52123 + currobj = chk_obj_label(dentry, mnt, curracl);
52124 + retval = currobj->mode & mode;
52125 +
52126 + /* if we're opening a specified transfer file for writing
52127 + (e.g. /dev/initctl), then transfer our role to init
52128 + */
52129 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52130 + current->role->roletype & GR_ROLE_PERSIST)) {
52131 + struct task_struct *task = init_pid_ns.child_reaper;
52132 +
52133 + if (task->role != current->role) {
52134 + task->acl_sp_role = 0;
52135 + task->acl_role_id = current->acl_role_id;
52136 + task->role = current->role;
52137 + rcu_read_lock();
52138 + read_lock(&grsec_exec_file_lock);
52139 + gr_apply_subject_to_task(task);
52140 + read_unlock(&grsec_exec_file_lock);
52141 + rcu_read_unlock();
52142 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52143 + }
52144 + }
52145 +
52146 + if (unlikely
52147 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52148 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52149 + __u32 new_mode = mode;
52150 +
52151 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52152 +
52153 + retval = new_mode;
52154 +
52155 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52156 + new_mode |= GR_INHERIT;
52157 +
52158 + if (!(mode & GR_NOLEARN))
52159 + gr_log_learn(dentry, mnt, new_mode);
52160 + }
52161 +
52162 + return retval;
52163 +}
52164 +
52165 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52166 + const struct dentry *parent,
52167 + const struct vfsmount *mnt)
52168 +{
52169 + struct name_entry *match;
52170 + struct acl_object_label *matchpo;
52171 + struct acl_subject_label *curracl;
52172 + char *path;
52173 +
52174 + if (unlikely(!(gr_status & GR_READY)))
52175 + return NULL;
52176 +
52177 + preempt_disable();
52178 + path = gr_to_filename_rbac(new_dentry, mnt);
52179 + match = lookup_name_entry_create(path);
52180 +
52181 + curracl = current->acl;
52182 +
52183 + if (match) {
52184 + read_lock(&gr_inode_lock);
52185 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52186 + read_unlock(&gr_inode_lock);
52187 +
52188 + if (matchpo) {
52189 + preempt_enable();
52190 + return matchpo;
52191 + }
52192 + }
52193 +
52194 + // lookup parent
52195 +
52196 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52197 +
52198 + preempt_enable();
52199 + return matchpo;
52200 +}
52201 +
52202 +__u32
52203 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52204 + const struct vfsmount * mnt, const __u32 mode)
52205 +{
52206 + struct acl_object_label *matchpo;
52207 + __u32 retval;
52208 +
52209 + if (unlikely(!(gr_status & GR_READY)))
52210 + return (mode & ~GR_AUDITS);
52211 +
52212 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52213 +
52214 + retval = matchpo->mode & mode;
52215 +
52216 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52217 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52218 + __u32 new_mode = mode;
52219 +
52220 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52221 +
52222 + gr_log_learn(new_dentry, mnt, new_mode);
52223 + return new_mode;
52224 + }
52225 +
52226 + return retval;
52227 +}
52228 +
52229 +__u32
52230 +gr_check_link(const struct dentry * new_dentry,
52231 + const struct dentry * parent_dentry,
52232 + const struct vfsmount * parent_mnt,
52233 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52234 +{
52235 + struct acl_object_label *obj;
52236 + __u32 oldmode, newmode;
52237 + __u32 needmode;
52238 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52239 + GR_DELETE | GR_INHERIT;
52240 +
52241 + if (unlikely(!(gr_status & GR_READY)))
52242 + return (GR_CREATE | GR_LINK);
52243 +
52244 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52245 + oldmode = obj->mode;
52246 +
52247 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52248 + newmode = obj->mode;
52249 +
52250 + needmode = newmode & checkmodes;
52251 +
52252 + // old name for hardlink must have at least the permissions of the new name
52253 + if ((oldmode & needmode) != needmode)
52254 + goto bad;
52255 +
52256 + // if old name had restrictions/auditing, make sure the new name does as well
52257 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52258 +
52259 + // don't allow hardlinking of suid/sgid files without permission
52260 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52261 + needmode |= GR_SETID;
52262 +
52263 + if ((newmode & needmode) != needmode)
52264 + goto bad;
52265 +
52266 + // enforce minimum permissions
52267 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52268 + return newmode;
52269 +bad:
52270 + needmode = oldmode;
52271 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52272 + needmode |= GR_SETID;
52273 +
52274 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52275 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52276 + return (GR_CREATE | GR_LINK);
52277 + } else if (newmode & GR_SUPPRESS)
52278 + return GR_SUPPRESS;
52279 + else
52280 + return 0;
52281 +}
52282 +
52283 +int
52284 +gr_check_hidden_task(const struct task_struct *task)
52285 +{
52286 + if (unlikely(!(gr_status & GR_READY)))
52287 + return 0;
52288 +
52289 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52290 + return 1;
52291 +
52292 + return 0;
52293 +}
52294 +
52295 +int
52296 +gr_check_protected_task(const struct task_struct *task)
52297 +{
52298 + if (unlikely(!(gr_status & GR_READY) || !task))
52299 + return 0;
52300 +
52301 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52302 + task->acl != current->acl)
52303 + return 1;
52304 +
52305 + return 0;
52306 +}
52307 +
52308 +int
52309 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52310 +{
52311 + struct task_struct *p;
52312 + int ret = 0;
52313 +
52314 + if (unlikely(!(gr_status & GR_READY) || !pid))
52315 + return ret;
52316 +
52317 + read_lock(&tasklist_lock);
52318 + do_each_pid_task(pid, type, p) {
52319 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52320 + p->acl != current->acl) {
52321 + ret = 1;
52322 + goto out;
52323 + }
52324 + } while_each_pid_task(pid, type, p);
52325 +out:
52326 + read_unlock(&tasklist_lock);
52327 +
52328 + return ret;
52329 +}
52330 +
52331 +void
52332 +gr_copy_label(struct task_struct *tsk)
52333 +{
52334 + /* plain copying of fields is already done by dup_task_struct */
52335 + tsk->signal->used_accept = 0;
52336 + tsk->acl_sp_role = 0;
52337 + //tsk->acl_role_id = current->acl_role_id;
52338 + //tsk->acl = current->acl;
52339 + //tsk->role = current->role;
52340 + tsk->signal->curr_ip = current->signal->curr_ip;
52341 + tsk->signal->saved_ip = current->signal->saved_ip;
52342 + if (current->exec_file)
52343 + get_file(current->exec_file);
52344 + //tsk->exec_file = current->exec_file;
52345 + //tsk->is_writable = current->is_writable;
52346 + if (unlikely(current->signal->used_accept)) {
52347 + current->signal->curr_ip = 0;
52348 + current->signal->saved_ip = 0;
52349 + }
52350 +
52351 + return;
52352 +}
52353 +
52354 +static void
52355 +gr_set_proc_res(struct task_struct *task)
52356 +{
52357 + struct acl_subject_label *proc;
52358 + unsigned short i;
52359 +
52360 + proc = task->acl;
52361 +
52362 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52363 + return;
52364 +
52365 + for (i = 0; i < RLIM_NLIMITS; i++) {
52366 + if (!(proc->resmask & (1 << i)))
52367 + continue;
52368 +
52369 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52370 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52371 + }
52372 +
52373 + return;
52374 +}
52375 +
52376 +extern int __gr_process_user_ban(struct user_struct *user);
52377 +
52378 +int
52379 +gr_check_user_change(int real, int effective, int fs)
52380 +{
52381 + unsigned int i;
52382 + __u16 num;
52383 + uid_t *uidlist;
52384 + int curuid;
52385 + int realok = 0;
52386 + int effectiveok = 0;
52387 + int fsok = 0;
52388 +
52389 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52390 + struct user_struct *user;
52391 +
52392 + if (real == -1)
52393 + goto skipit;
52394 +
52395 + user = find_user(real);
52396 + if (user == NULL)
52397 + goto skipit;
52398 +
52399 + if (__gr_process_user_ban(user)) {
52400 + /* for find_user */
52401 + free_uid(user);
52402 + return 1;
52403 + }
52404 +
52405 + /* for find_user */
52406 + free_uid(user);
52407 +
52408 +skipit:
52409 +#endif
52410 +
52411 + if (unlikely(!(gr_status & GR_READY)))
52412 + return 0;
52413 +
52414 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52415 + gr_log_learn_id_change('u', real, effective, fs);
52416 +
52417 + num = current->acl->user_trans_num;
52418 + uidlist = current->acl->user_transitions;
52419 +
52420 + if (uidlist == NULL)
52421 + return 0;
52422 +
52423 + if (real == -1)
52424 + realok = 1;
52425 + if (effective == -1)
52426 + effectiveok = 1;
52427 + if (fs == -1)
52428 + fsok = 1;
52429 +
52430 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
52431 + for (i = 0; i < num; i++) {
52432 + curuid = (int)uidlist[i];
52433 + if (real == curuid)
52434 + realok = 1;
52435 + if (effective == curuid)
52436 + effectiveok = 1;
52437 + if (fs == curuid)
52438 + fsok = 1;
52439 + }
52440 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
52441 + for (i = 0; i < num; i++) {
52442 + curuid = (int)uidlist[i];
52443 + if (real == curuid)
52444 + break;
52445 + if (effective == curuid)
52446 + break;
52447 + if (fs == curuid)
52448 + break;
52449 + }
52450 + /* not in deny list */
52451 + if (i == num) {
52452 + realok = 1;
52453 + effectiveok = 1;
52454 + fsok = 1;
52455 + }
52456 + }
52457 +
52458 + if (realok && effectiveok && fsok)
52459 + return 0;
52460 + else {
52461 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52462 + return 1;
52463 + }
52464 +}
52465 +
52466 +int
52467 +gr_check_group_change(int real, int effective, int fs)
52468 +{
52469 + unsigned int i;
52470 + __u16 num;
52471 + gid_t *gidlist;
52472 + int curgid;
52473 + int realok = 0;
52474 + int effectiveok = 0;
52475 + int fsok = 0;
52476 +
52477 + if (unlikely(!(gr_status & GR_READY)))
52478 + return 0;
52479 +
52480 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52481 + gr_log_learn_id_change('g', real, effective, fs);
52482 +
52483 + num = current->acl->group_trans_num;
52484 + gidlist = current->acl->group_transitions;
52485 +
52486 + if (gidlist == NULL)
52487 + return 0;
52488 +
52489 + if (real == -1)
52490 + realok = 1;
52491 + if (effective == -1)
52492 + effectiveok = 1;
52493 + if (fs == -1)
52494 + fsok = 1;
52495 +
52496 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52497 + for (i = 0; i < num; i++) {
52498 + curgid = (int)gidlist[i];
52499 + if (real == curgid)
52500 + realok = 1;
52501 + if (effective == curgid)
52502 + effectiveok = 1;
52503 + if (fs == curgid)
52504 + fsok = 1;
52505 + }
52506 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
52507 + for (i = 0; i < num; i++) {
52508 + curgid = (int)gidlist[i];
52509 + if (real == curgid)
52510 + break;
52511 + if (effective == curgid)
52512 + break;
52513 + if (fs == curgid)
52514 + break;
52515 + }
52516 + /* not in deny list */
52517 + if (i == num) {
52518 + realok = 1;
52519 + effectiveok = 1;
52520 + fsok = 1;
52521 + }
52522 + }
52523 +
52524 + if (realok && effectiveok && fsok)
52525 + return 0;
52526 + else {
52527 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52528 + return 1;
52529 + }
52530 +}
52531 +
52532 +extern int gr_acl_is_capable(const int cap);
52533 +
52534 +void
52535 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52536 +{
52537 + struct acl_role_label *role = task->role;
52538 + struct acl_subject_label *subj = NULL;
52539 + struct acl_object_label *obj;
52540 + struct file *filp;
52541 +
52542 + if (unlikely(!(gr_status & GR_READY)))
52543 + return;
52544 +
52545 + filp = task->exec_file;
52546 +
52547 + /* kernel process, we'll give them the kernel role */
52548 + if (unlikely(!filp)) {
52549 + task->role = kernel_role;
52550 + task->acl = kernel_role->root_label;
52551 + return;
52552 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52553 + role = lookup_acl_role_label(task, uid, gid);
52554 +
52555 + /* don't change the role if we're not a privileged process */
52556 + if (role && task->role != role &&
52557 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52558 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52559 + return;
52560 +
52561 + /* perform subject lookup in possibly new role
52562 + we can use this result below in the case where role == task->role
52563 + */
52564 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52565 +
52566 + /* if we changed uid/gid, but result in the same role
52567 + and are using inheritance, don't lose the inherited subject
52568 + if current subject is other than what normal lookup
52569 + would result in, we arrived via inheritance, don't
52570 + lose subject
52571 + */
52572 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52573 + (subj == task->acl)))
52574 + task->acl = subj;
52575 +
52576 + task->role = role;
52577 +
52578 + task->is_writable = 0;
52579 +
52580 + /* ignore additional mmap checks for processes that are writable
52581 + by the default ACL */
52582 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52583 + if (unlikely(obj->mode & GR_WRITE))
52584 + task->is_writable = 1;
52585 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52586 + if (unlikely(obj->mode & GR_WRITE))
52587 + task->is_writable = 1;
52588 +
52589 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52590 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52591 +#endif
52592 +
52593 + gr_set_proc_res(task);
52594 +
52595 + return;
52596 +}
52597 +
52598 +int
52599 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52600 + const int unsafe_flags)
52601 +{
52602 + struct task_struct *task = current;
52603 + struct acl_subject_label *newacl;
52604 + struct acl_object_label *obj;
52605 + __u32 retmode;
52606 +
52607 + if (unlikely(!(gr_status & GR_READY)))
52608 + return 0;
52609 +
52610 + newacl = chk_subj_label(dentry, mnt, task->role);
52611 +
52612 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
52613 + did an exec
52614 + */
52615 + rcu_read_lock();
52616 + read_lock(&tasklist_lock);
52617 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
52618 + (task->parent->acl->mode & GR_POVERRIDE))) {
52619 + read_unlock(&tasklist_lock);
52620 + rcu_read_unlock();
52621 + goto skip_check;
52622 + }
52623 + read_unlock(&tasklist_lock);
52624 + rcu_read_unlock();
52625 +
52626 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52627 + !(task->role->roletype & GR_ROLE_GOD) &&
52628 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52629 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52630 + if (unsafe_flags & LSM_UNSAFE_SHARE)
52631 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52632 + else
52633 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52634 + return -EACCES;
52635 + }
52636 +
52637 +skip_check:
52638 +
52639 + obj = chk_obj_label(dentry, mnt, task->acl);
52640 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52641 +
52642 + if (!(task->acl->mode & GR_INHERITLEARN) &&
52643 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52644 + if (obj->nested)
52645 + task->acl = obj->nested;
52646 + else
52647 + task->acl = newacl;
52648 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52649 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52650 +
52651 + task->is_writable = 0;
52652 +
52653 + /* ignore additional mmap checks for processes that are writable
52654 + by the default ACL */
52655 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
52656 + if (unlikely(obj->mode & GR_WRITE))
52657 + task->is_writable = 1;
52658 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
52659 + if (unlikely(obj->mode & GR_WRITE))
52660 + task->is_writable = 1;
52661 +
52662 + gr_set_proc_res(task);
52663 +
52664 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52665 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52666 +#endif
52667 + return 0;
52668 +}
52669 +
52670 +/* always called with valid inodev ptr */
52671 +static void
52672 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52673 +{
52674 + struct acl_object_label *matchpo;
52675 + struct acl_subject_label *matchps;
52676 + struct acl_subject_label *subj;
52677 + struct acl_role_label *role;
52678 + unsigned int x;
52679 +
52680 + FOR_EACH_ROLE_START(role)
52681 + FOR_EACH_SUBJECT_START(role, subj, x)
52682 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52683 + matchpo->mode |= GR_DELETED;
52684 + FOR_EACH_SUBJECT_END(subj,x)
52685 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52686 + if (subj->inode == ino && subj->device == dev)
52687 + subj->mode |= GR_DELETED;
52688 + FOR_EACH_NESTED_SUBJECT_END(subj)
52689 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52690 + matchps->mode |= GR_DELETED;
52691 + FOR_EACH_ROLE_END(role)
52692 +
52693 + inodev->nentry->deleted = 1;
52694 +
52695 + return;
52696 +}
52697 +
52698 +void
52699 +gr_handle_delete(const ino_t ino, const dev_t dev)
52700 +{
52701 + struct inodev_entry *inodev;
52702 +
52703 + if (unlikely(!(gr_status & GR_READY)))
52704 + return;
52705 +
52706 + write_lock(&gr_inode_lock);
52707 + inodev = lookup_inodev_entry(ino, dev);
52708 + if (inodev != NULL)
52709 + do_handle_delete(inodev, ino, dev);
52710 + write_unlock(&gr_inode_lock);
52711 +
52712 + return;
52713 +}
52714 +
52715 +static void
52716 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52717 + const ino_t newinode, const dev_t newdevice,
52718 + struct acl_subject_label *subj)
52719 +{
52720 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52721 + struct acl_object_label *match;
52722 +
52723 + match = subj->obj_hash[index];
52724 +
52725 + while (match && (match->inode != oldinode ||
52726 + match->device != olddevice ||
52727 + !(match->mode & GR_DELETED)))
52728 + match = match->next;
52729 +
52730 + if (match && (match->inode == oldinode)
52731 + && (match->device == olddevice)
52732 + && (match->mode & GR_DELETED)) {
52733 + if (match->prev == NULL) {
52734 + subj->obj_hash[index] = match->next;
52735 + if (match->next != NULL)
52736 + match->next->prev = NULL;
52737 + } else {
52738 + match->prev->next = match->next;
52739 + if (match->next != NULL)
52740 + match->next->prev = match->prev;
52741 + }
52742 + match->prev = NULL;
52743 + match->next = NULL;
52744 + match->inode = newinode;
52745 + match->device = newdevice;
52746 + match->mode &= ~GR_DELETED;
52747 +
52748 + insert_acl_obj_label(match, subj);
52749 + }
52750 +
52751 + return;
52752 +}
52753 +
52754 +static void
52755 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52756 + const ino_t newinode, const dev_t newdevice,
52757 + struct acl_role_label *role)
52758 +{
52759 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52760 + struct acl_subject_label *match;
52761 +
52762 + match = role->subj_hash[index];
52763 +
52764 + while (match && (match->inode != oldinode ||
52765 + match->device != olddevice ||
52766 + !(match->mode & GR_DELETED)))
52767 + match = match->next;
52768 +
52769 + if (match && (match->inode == oldinode)
52770 + && (match->device == olddevice)
52771 + && (match->mode & GR_DELETED)) {
52772 + if (match->prev == NULL) {
52773 + role->subj_hash[index] = match->next;
52774 + if (match->next != NULL)
52775 + match->next->prev = NULL;
52776 + } else {
52777 + match->prev->next = match->next;
52778 + if (match->next != NULL)
52779 + match->next->prev = match->prev;
52780 + }
52781 + match->prev = NULL;
52782 + match->next = NULL;
52783 + match->inode = newinode;
52784 + match->device = newdevice;
52785 + match->mode &= ~GR_DELETED;
52786 +
52787 + insert_acl_subj_label(match, role);
52788 + }
52789 +
52790 + return;
52791 +}
52792 +
52793 +static void
52794 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52795 + const ino_t newinode, const dev_t newdevice)
52796 +{
52797 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52798 + struct inodev_entry *match;
52799 +
52800 + match = inodev_set.i_hash[index];
52801 +
52802 + while (match && (match->nentry->inode != oldinode ||
52803 + match->nentry->device != olddevice || !match->nentry->deleted))
52804 + match = match->next;
52805 +
52806 + if (match && (match->nentry->inode == oldinode)
52807 + && (match->nentry->device == olddevice) &&
52808 + match->nentry->deleted) {
52809 + if (match->prev == NULL) {
52810 + inodev_set.i_hash[index] = match->next;
52811 + if (match->next != NULL)
52812 + match->next->prev = NULL;
52813 + } else {
52814 + match->prev->next = match->next;
52815 + if (match->next != NULL)
52816 + match->next->prev = match->prev;
52817 + }
52818 + match->prev = NULL;
52819 + match->next = NULL;
52820 + match->nentry->inode = newinode;
52821 + match->nentry->device = newdevice;
52822 + match->nentry->deleted = 0;
52823 +
52824 + insert_inodev_entry(match);
52825 + }
52826 +
52827 + return;
52828 +}
52829 +
52830 +static void
52831 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52832 +{
52833 + struct acl_subject_label *subj;
52834 + struct acl_role_label *role;
52835 + unsigned int x;
52836 +
52837 + FOR_EACH_ROLE_START(role)
52838 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52839 +
52840 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52841 + if ((subj->inode == ino) && (subj->device == dev)) {
52842 + subj->inode = ino;
52843 + subj->device = dev;
52844 + }
52845 + FOR_EACH_NESTED_SUBJECT_END(subj)
52846 + FOR_EACH_SUBJECT_START(role, subj, x)
52847 + update_acl_obj_label(matchn->inode, matchn->device,
52848 + ino, dev, subj);
52849 + FOR_EACH_SUBJECT_END(subj,x)
52850 + FOR_EACH_ROLE_END(role)
52851 +
52852 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
52853 +
52854 + return;
52855 +}
52856 +
52857 +static void
52858 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
52859 + const struct vfsmount *mnt)
52860 +{
52861 + ino_t ino = dentry->d_inode->i_ino;
52862 + dev_t dev = __get_dev(dentry);
52863 +
52864 + __do_handle_create(matchn, ino, dev);
52865 +
52866 + return;
52867 +}
52868 +
52869 +void
52870 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52871 +{
52872 + struct name_entry *matchn;
52873 +
52874 + if (unlikely(!(gr_status & GR_READY)))
52875 + return;
52876 +
52877 + preempt_disable();
52878 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
52879 +
52880 + if (unlikely((unsigned long)matchn)) {
52881 + write_lock(&gr_inode_lock);
52882 + do_handle_create(matchn, dentry, mnt);
52883 + write_unlock(&gr_inode_lock);
52884 + }
52885 + preempt_enable();
52886 +
52887 + return;
52888 +}
52889 +
52890 +void
52891 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
52892 +{
52893 + struct name_entry *matchn;
52894 +
52895 + if (unlikely(!(gr_status & GR_READY)))
52896 + return;
52897 +
52898 + preempt_disable();
52899 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
52900 +
52901 + if (unlikely((unsigned long)matchn)) {
52902 + write_lock(&gr_inode_lock);
52903 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
52904 + write_unlock(&gr_inode_lock);
52905 + }
52906 + preempt_enable();
52907 +
52908 + return;
52909 +}
52910 +
52911 +void
52912 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52913 + struct dentry *old_dentry,
52914 + struct dentry *new_dentry,
52915 + struct vfsmount *mnt, const __u8 replace)
52916 +{
52917 + struct name_entry *matchn;
52918 + struct inodev_entry *inodev;
52919 + struct inode *inode = new_dentry->d_inode;
52920 + ino_t old_ino = old_dentry->d_inode->i_ino;
52921 + dev_t old_dev = __get_dev(old_dentry);
52922 +
52923 + /* vfs_rename swaps the name and parent link for old_dentry and
52924 + new_dentry
52925 + at this point, old_dentry has the new name, parent link, and inode
52926 + for the renamed file
52927 + if a file is being replaced by a rename, new_dentry has the inode
52928 + and name for the replaced file
52929 + */
52930 +
52931 + if (unlikely(!(gr_status & GR_READY)))
52932 + return;
52933 +
52934 + preempt_disable();
52935 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
52936 +
52937 + /* we wouldn't have to check d_inode if it weren't for
52938 + NFS silly-renaming
52939 + */
52940 +
52941 + write_lock(&gr_inode_lock);
52942 + if (unlikely(replace && inode)) {
52943 + ino_t new_ino = inode->i_ino;
52944 + dev_t new_dev = __get_dev(new_dentry);
52945 +
52946 + inodev = lookup_inodev_entry(new_ino, new_dev);
52947 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
52948 + do_handle_delete(inodev, new_ino, new_dev);
52949 + }
52950 +
52951 + inodev = lookup_inodev_entry(old_ino, old_dev);
52952 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
52953 + do_handle_delete(inodev, old_ino, old_dev);
52954 +
52955 + if (unlikely((unsigned long)matchn))
52956 + do_handle_create(matchn, old_dentry, mnt);
52957 +
52958 + write_unlock(&gr_inode_lock);
52959 + preempt_enable();
52960 +
52961 + return;
52962 +}
52963 +
52964 +static int
52965 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
52966 + unsigned char **sum)
52967 +{
52968 + struct acl_role_label *r;
52969 + struct role_allowed_ip *ipp;
52970 + struct role_transition *trans;
52971 + unsigned int i;
52972 + int found = 0;
52973 + u32 curr_ip = current->signal->curr_ip;
52974 +
52975 + current->signal->saved_ip = curr_ip;
52976 +
52977 + /* check transition table */
52978 +
52979 + for (trans = current->role->transitions; trans; trans = trans->next) {
52980 + if (!strcmp(rolename, trans->rolename)) {
52981 + found = 1;
52982 + break;
52983 + }
52984 + }
52985 +
52986 + if (!found)
52987 + return 0;
52988 +
52989 + /* handle special roles that do not require authentication
52990 + and check ip */
52991 +
52992 + FOR_EACH_ROLE_START(r)
52993 + if (!strcmp(rolename, r->rolename) &&
52994 + (r->roletype & GR_ROLE_SPECIAL)) {
52995 + found = 0;
52996 + if (r->allowed_ips != NULL) {
52997 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
52998 + if ((ntohl(curr_ip) & ipp->netmask) ==
52999 + (ntohl(ipp->addr) & ipp->netmask))
53000 + found = 1;
53001 + }
53002 + } else
53003 + found = 2;
53004 + if (!found)
53005 + return 0;
53006 +
53007 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53008 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53009 + *salt = NULL;
53010 + *sum = NULL;
53011 + return 1;
53012 + }
53013 + }
53014 + FOR_EACH_ROLE_END(r)
53015 +
53016 + for (i = 0; i < num_sprole_pws; i++) {
53017 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53018 + *salt = acl_special_roles[i]->salt;
53019 + *sum = acl_special_roles[i]->sum;
53020 + return 1;
53021 + }
53022 + }
53023 +
53024 + return 0;
53025 +}
53026 +
53027 +static void
53028 +assign_special_role(char *rolename)
53029 +{
53030 + struct acl_object_label *obj;
53031 + struct acl_role_label *r;
53032 + struct acl_role_label *assigned = NULL;
53033 + struct task_struct *tsk;
53034 + struct file *filp;
53035 +
53036 + FOR_EACH_ROLE_START(r)
53037 + if (!strcmp(rolename, r->rolename) &&
53038 + (r->roletype & GR_ROLE_SPECIAL)) {
53039 + assigned = r;
53040 + break;
53041 + }
53042 + FOR_EACH_ROLE_END(r)
53043 +
53044 + if (!assigned)
53045 + return;
53046 +
53047 + read_lock(&tasklist_lock);
53048 + read_lock(&grsec_exec_file_lock);
53049 +
53050 + tsk = current->real_parent;
53051 + if (tsk == NULL)
53052 + goto out_unlock;
53053 +
53054 + filp = tsk->exec_file;
53055 + if (filp == NULL)
53056 + goto out_unlock;
53057 +
53058 + tsk->is_writable = 0;
53059 +
53060 + tsk->acl_sp_role = 1;
53061 + tsk->acl_role_id = ++acl_sp_role_value;
53062 + tsk->role = assigned;
53063 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53064 +
53065 + /* ignore additional mmap checks for processes that are writable
53066 + by the default ACL */
53067 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53068 + if (unlikely(obj->mode & GR_WRITE))
53069 + tsk->is_writable = 1;
53070 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53071 + if (unlikely(obj->mode & GR_WRITE))
53072 + tsk->is_writable = 1;
53073 +
53074 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53075 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53076 +#endif
53077 +
53078 +out_unlock:
53079 + read_unlock(&grsec_exec_file_lock);
53080 + read_unlock(&tasklist_lock);
53081 + return;
53082 +}
53083 +
53084 +int gr_check_secure_terminal(struct task_struct *task)
53085 +{
53086 + struct task_struct *p, *p2, *p3;
53087 + struct files_struct *files;
53088 + struct fdtable *fdt;
53089 + struct file *our_file = NULL, *file;
53090 + int i;
53091 +
53092 + if (task->signal->tty == NULL)
53093 + return 1;
53094 +
53095 + files = get_files_struct(task);
53096 + if (files != NULL) {
53097 + rcu_read_lock();
53098 + fdt = files_fdtable(files);
53099 + for (i=0; i < fdt->max_fds; i++) {
53100 + file = fcheck_files(files, i);
53101 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53102 + get_file(file);
53103 + our_file = file;
53104 + }
53105 + }
53106 + rcu_read_unlock();
53107 + put_files_struct(files);
53108 + }
53109 +
53110 + if (our_file == NULL)
53111 + return 1;
53112 +
53113 + read_lock(&tasklist_lock);
53114 + do_each_thread(p2, p) {
53115 + files = get_files_struct(p);
53116 + if (files == NULL ||
53117 + (p->signal && p->signal->tty == task->signal->tty)) {
53118 + if (files != NULL)
53119 + put_files_struct(files);
53120 + continue;
53121 + }
53122 + rcu_read_lock();
53123 + fdt = files_fdtable(files);
53124 + for (i=0; i < fdt->max_fds; i++) {
53125 + file = fcheck_files(files, i);
53126 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53127 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53128 + p3 = task;
53129 + while (p3->pid > 0) {
53130 + if (p3 == p)
53131 + break;
53132 + p3 = p3->real_parent;
53133 + }
53134 + if (p3 == p)
53135 + break;
53136 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53137 + gr_handle_alertkill(p);
53138 + rcu_read_unlock();
53139 + put_files_struct(files);
53140 + read_unlock(&tasklist_lock);
53141 + fput(our_file);
53142 + return 0;
53143 + }
53144 + }
53145 + rcu_read_unlock();
53146 + put_files_struct(files);
53147 + } while_each_thread(p2, p);
53148 + read_unlock(&tasklist_lock);
53149 +
53150 + fput(our_file);
53151 + return 1;
53152 +}
53153 +
53154 +ssize_t
53155 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53156 +{
53157 + struct gr_arg_wrapper uwrap;
53158 + unsigned char *sprole_salt = NULL;
53159 + unsigned char *sprole_sum = NULL;
53160 + int error = sizeof (struct gr_arg_wrapper);
53161 + int error2 = 0;
53162 +
53163 + mutex_lock(&gr_dev_mutex);
53164 +
53165 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53166 + error = -EPERM;
53167 + goto out;
53168 + }
53169 +
53170 + if (count != sizeof (struct gr_arg_wrapper)) {
53171 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53172 + error = -EINVAL;
53173 + goto out;
53174 + }
53175 +
53176 +
53177 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53178 + gr_auth_expires = 0;
53179 + gr_auth_attempts = 0;
53180 + }
53181 +
53182 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53183 + error = -EFAULT;
53184 + goto out;
53185 + }
53186 +
53187 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53188 + error = -EINVAL;
53189 + goto out;
53190 + }
53191 +
53192 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53193 + error = -EFAULT;
53194 + goto out;
53195 + }
53196 +
53197 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53198 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53199 + time_after(gr_auth_expires, get_seconds())) {
53200 + error = -EBUSY;
53201 + goto out;
53202 + }
53203 +
53204 + /* if non-root trying to do anything other than use a special role,
53205 + do not attempt authentication, do not count towards authentication
53206 + locking
53207 + */
53208 +
53209 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53210 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53211 + current_uid()) {
53212 + error = -EPERM;
53213 + goto out;
53214 + }
53215 +
53216 + /* ensure pw and special role name are null terminated */
53217 +
53218 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53219 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53220 +
53221 + /* Okay.
53222 + * We have our enough of the argument structure..(we have yet
53223 + * to copy_from_user the tables themselves) . Copy the tables
53224 + * only if we need them, i.e. for loading operations. */
53225 +
53226 + switch (gr_usermode->mode) {
53227 + case GR_STATUS:
53228 + if (gr_status & GR_READY) {
53229 + error = 1;
53230 + if (!gr_check_secure_terminal(current))
53231 + error = 3;
53232 + } else
53233 + error = 2;
53234 + goto out;
53235 + case GR_SHUTDOWN:
53236 + if ((gr_status & GR_READY)
53237 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53238 + pax_open_kernel();
53239 + gr_status &= ~GR_READY;
53240 + pax_close_kernel();
53241 +
53242 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53243 + free_variables();
53244 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53245 + memset(gr_system_salt, 0, GR_SALT_LEN);
53246 + memset(gr_system_sum, 0, GR_SHA_LEN);
53247 + } else if (gr_status & GR_READY) {
53248 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53249 + error = -EPERM;
53250 + } else {
53251 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53252 + error = -EAGAIN;
53253 + }
53254 + break;
53255 + case GR_ENABLE:
53256 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53257 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53258 + else {
53259 + if (gr_status & GR_READY)
53260 + error = -EAGAIN;
53261 + else
53262 + error = error2;
53263 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53264 + }
53265 + break;
53266 + case GR_RELOAD:
53267 + if (!(gr_status & GR_READY)) {
53268 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53269 + error = -EAGAIN;
53270 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53271 + preempt_disable();
53272 +
53273 + pax_open_kernel();
53274 + gr_status &= ~GR_READY;
53275 + pax_close_kernel();
53276 +
53277 + free_variables();
53278 + if (!(error2 = gracl_init(gr_usermode))) {
53279 + preempt_enable();
53280 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53281 + } else {
53282 + preempt_enable();
53283 + error = error2;
53284 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53285 + }
53286 + } else {
53287 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53288 + error = -EPERM;
53289 + }
53290 + break;
53291 + case GR_SEGVMOD:
53292 + if (unlikely(!(gr_status & GR_READY))) {
53293 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53294 + error = -EAGAIN;
53295 + break;
53296 + }
53297 +
53298 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53299 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53300 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53301 + struct acl_subject_label *segvacl;
53302 + segvacl =
53303 + lookup_acl_subj_label(gr_usermode->segv_inode,
53304 + gr_usermode->segv_device,
53305 + current->role);
53306 + if (segvacl) {
53307 + segvacl->crashes = 0;
53308 + segvacl->expires = 0;
53309 + }
53310 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53311 + gr_remove_uid(gr_usermode->segv_uid);
53312 + }
53313 + } else {
53314 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53315 + error = -EPERM;
53316 + }
53317 + break;
53318 + case GR_SPROLE:
53319 + case GR_SPROLEPAM:
53320 + if (unlikely(!(gr_status & GR_READY))) {
53321 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53322 + error = -EAGAIN;
53323 + break;
53324 + }
53325 +
53326 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53327 + current->role->expires = 0;
53328 + current->role->auth_attempts = 0;
53329 + }
53330 +
53331 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53332 + time_after(current->role->expires, get_seconds())) {
53333 + error = -EBUSY;
53334 + goto out;
53335 + }
53336 +
53337 + if (lookup_special_role_auth
53338 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53339 + && ((!sprole_salt && !sprole_sum)
53340 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53341 + char *p = "";
53342 + assign_special_role(gr_usermode->sp_role);
53343 + read_lock(&tasklist_lock);
53344 + if (current->real_parent)
53345 + p = current->real_parent->role->rolename;
53346 + read_unlock(&tasklist_lock);
53347 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53348 + p, acl_sp_role_value);
53349 + } else {
53350 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53351 + error = -EPERM;
53352 + if(!(current->role->auth_attempts++))
53353 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53354 +
53355 + goto out;
53356 + }
53357 + break;
53358 + case GR_UNSPROLE:
53359 + if (unlikely(!(gr_status & GR_READY))) {
53360 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53361 + error = -EAGAIN;
53362 + break;
53363 + }
53364 +
53365 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53366 + char *p = "";
53367 + int i = 0;
53368 +
53369 + read_lock(&tasklist_lock);
53370 + if (current->real_parent) {
53371 + p = current->real_parent->role->rolename;
53372 + i = current->real_parent->acl_role_id;
53373 + }
53374 + read_unlock(&tasklist_lock);
53375 +
53376 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53377 + gr_set_acls(1);
53378 + } else {
53379 + error = -EPERM;
53380 + goto out;
53381 + }
53382 + break;
53383 + default:
53384 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53385 + error = -EINVAL;
53386 + break;
53387 + }
53388 +
53389 + if (error != -EPERM)
53390 + goto out;
53391 +
53392 + if(!(gr_auth_attempts++))
53393 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53394 +
53395 + out:
53396 + mutex_unlock(&gr_dev_mutex);
53397 + return error;
53398 +}
53399 +
53400 +/* must be called with
53401 + rcu_read_lock();
53402 + read_lock(&tasklist_lock);
53403 + read_lock(&grsec_exec_file_lock);
53404 +*/
53405 +int gr_apply_subject_to_task(struct task_struct *task)
53406 +{
53407 + struct acl_object_label *obj;
53408 + char *tmpname;
53409 + struct acl_subject_label *tmpsubj;
53410 + struct file *filp;
53411 + struct name_entry *nmatch;
53412 +
53413 + filp = task->exec_file;
53414 + if (filp == NULL)
53415 + return 0;
53416 +
53417 + /* the following is to apply the correct subject
53418 + on binaries running when the RBAC system
53419 + is enabled, when the binaries have been
53420 + replaced or deleted since their execution
53421 + -----
53422 + when the RBAC system starts, the inode/dev
53423 + from exec_file will be one the RBAC system
53424 + is unaware of. It only knows the inode/dev
53425 + of the present file on disk, or the absence
53426 + of it.
53427 + */
53428 + preempt_disable();
53429 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53430 +
53431 + nmatch = lookup_name_entry(tmpname);
53432 + preempt_enable();
53433 + tmpsubj = NULL;
53434 + if (nmatch) {
53435 + if (nmatch->deleted)
53436 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53437 + else
53438 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53439 + if (tmpsubj != NULL)
53440 + task->acl = tmpsubj;
53441 + }
53442 + if (tmpsubj == NULL)
53443 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53444 + task->role);
53445 + if (task->acl) {
53446 + task->is_writable = 0;
53447 + /* ignore additional mmap checks for processes that are writable
53448 + by the default ACL */
53449 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53450 + if (unlikely(obj->mode & GR_WRITE))
53451 + task->is_writable = 1;
53452 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53453 + if (unlikely(obj->mode & GR_WRITE))
53454 + task->is_writable = 1;
53455 +
53456 + gr_set_proc_res(task);
53457 +
53458 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53459 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53460 +#endif
53461 + } else {
53462 + return 1;
53463 + }
53464 +
53465 + return 0;
53466 +}
53467 +
53468 +int
53469 +gr_set_acls(const int type)
53470 +{
53471 + struct task_struct *task, *task2;
53472 + struct acl_role_label *role = current->role;
53473 + __u16 acl_role_id = current->acl_role_id;
53474 + const struct cred *cred;
53475 + int ret;
53476 +
53477 + rcu_read_lock();
53478 + read_lock(&tasklist_lock);
53479 + read_lock(&grsec_exec_file_lock);
53480 + do_each_thread(task2, task) {
53481 + /* check to see if we're called from the exit handler,
53482 + if so, only replace ACLs that have inherited the admin
53483 + ACL */
53484 +
53485 + if (type && (task->role != role ||
53486 + task->acl_role_id != acl_role_id))
53487 + continue;
53488 +
53489 + task->acl_role_id = 0;
53490 + task->acl_sp_role = 0;
53491 +
53492 + if (task->exec_file) {
53493 + cred = __task_cred(task);
53494 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53495 + ret = gr_apply_subject_to_task(task);
53496 + if (ret) {
53497 + read_unlock(&grsec_exec_file_lock);
53498 + read_unlock(&tasklist_lock);
53499 + rcu_read_unlock();
53500 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53501 + return ret;
53502 + }
53503 + } else {
53504 + // it's a kernel process
53505 + task->role = kernel_role;
53506 + task->acl = kernel_role->root_label;
53507 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53508 + task->acl->mode &= ~GR_PROCFIND;
53509 +#endif
53510 + }
53511 + } while_each_thread(task2, task);
53512 + read_unlock(&grsec_exec_file_lock);
53513 + read_unlock(&tasklist_lock);
53514 + rcu_read_unlock();
53515 +
53516 + return 0;
53517 +}
53518 +
53519 +void
53520 +gr_learn_resource(const struct task_struct *task,
53521 + const int res, const unsigned long wanted, const int gt)
53522 +{
53523 + struct acl_subject_label *acl;
53524 + const struct cred *cred;
53525 +
53526 + if (unlikely((gr_status & GR_READY) &&
53527 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53528 + goto skip_reslog;
53529 +
53530 +#ifdef CONFIG_GRKERNSEC_RESLOG
53531 + gr_log_resource(task, res, wanted, gt);
53532 +#endif
53533 + skip_reslog:
53534 +
53535 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53536 + return;
53537 +
53538 + acl = task->acl;
53539 +
53540 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53541 + !(acl->resmask & (1 << (unsigned short) res))))
53542 + return;
53543 +
53544 + if (wanted >= acl->res[res].rlim_cur) {
53545 + unsigned long res_add;
53546 +
53547 + res_add = wanted;
53548 + switch (res) {
53549 + case RLIMIT_CPU:
53550 + res_add += GR_RLIM_CPU_BUMP;
53551 + break;
53552 + case RLIMIT_FSIZE:
53553 + res_add += GR_RLIM_FSIZE_BUMP;
53554 + break;
53555 + case RLIMIT_DATA:
53556 + res_add += GR_RLIM_DATA_BUMP;
53557 + break;
53558 + case RLIMIT_STACK:
53559 + res_add += GR_RLIM_STACK_BUMP;
53560 + break;
53561 + case RLIMIT_CORE:
53562 + res_add += GR_RLIM_CORE_BUMP;
53563 + break;
53564 + case RLIMIT_RSS:
53565 + res_add += GR_RLIM_RSS_BUMP;
53566 + break;
53567 + case RLIMIT_NPROC:
53568 + res_add += GR_RLIM_NPROC_BUMP;
53569 + break;
53570 + case RLIMIT_NOFILE:
53571 + res_add += GR_RLIM_NOFILE_BUMP;
53572 + break;
53573 + case RLIMIT_MEMLOCK:
53574 + res_add += GR_RLIM_MEMLOCK_BUMP;
53575 + break;
53576 + case RLIMIT_AS:
53577 + res_add += GR_RLIM_AS_BUMP;
53578 + break;
53579 + case RLIMIT_LOCKS:
53580 + res_add += GR_RLIM_LOCKS_BUMP;
53581 + break;
53582 + case RLIMIT_SIGPENDING:
53583 + res_add += GR_RLIM_SIGPENDING_BUMP;
53584 + break;
53585 + case RLIMIT_MSGQUEUE:
53586 + res_add += GR_RLIM_MSGQUEUE_BUMP;
53587 + break;
53588 + case RLIMIT_NICE:
53589 + res_add += GR_RLIM_NICE_BUMP;
53590 + break;
53591 + case RLIMIT_RTPRIO:
53592 + res_add += GR_RLIM_RTPRIO_BUMP;
53593 + break;
53594 + case RLIMIT_RTTIME:
53595 + res_add += GR_RLIM_RTTIME_BUMP;
53596 + break;
53597 + }
53598 +
53599 + acl->res[res].rlim_cur = res_add;
53600 +
53601 + if (wanted > acl->res[res].rlim_max)
53602 + acl->res[res].rlim_max = res_add;
53603 +
53604 + /* only log the subject filename, since resource logging is supported for
53605 + single-subject learning only */
53606 + rcu_read_lock();
53607 + cred = __task_cred(task);
53608 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53609 + task->role->roletype, cred->uid, cred->gid, acl->filename,
53610 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53611 + "", (unsigned long) res, &task->signal->saved_ip);
53612 + rcu_read_unlock();
53613 + }
53614 +
53615 + return;
53616 +}
53617 +
53618 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53619 +void
53620 +pax_set_initial_flags(struct linux_binprm *bprm)
53621 +{
53622 + struct task_struct *task = current;
53623 + struct acl_subject_label *proc;
53624 + unsigned long flags;
53625 +
53626 + if (unlikely(!(gr_status & GR_READY)))
53627 + return;
53628 +
53629 + flags = pax_get_flags(task);
53630 +
53631 + proc = task->acl;
53632 +
53633 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53634 + flags &= ~MF_PAX_PAGEEXEC;
53635 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53636 + flags &= ~MF_PAX_SEGMEXEC;
53637 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53638 + flags &= ~MF_PAX_RANDMMAP;
53639 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53640 + flags &= ~MF_PAX_EMUTRAMP;
53641 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53642 + flags &= ~MF_PAX_MPROTECT;
53643 +
53644 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53645 + flags |= MF_PAX_PAGEEXEC;
53646 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53647 + flags |= MF_PAX_SEGMEXEC;
53648 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53649 + flags |= MF_PAX_RANDMMAP;
53650 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53651 + flags |= MF_PAX_EMUTRAMP;
53652 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53653 + flags |= MF_PAX_MPROTECT;
53654 +
53655 + pax_set_flags(task, flags);
53656 +
53657 + return;
53658 +}
53659 +#endif
53660 +
53661 +int
53662 +gr_handle_proc_ptrace(struct task_struct *task)
53663 +{
53664 + struct file *filp;
53665 + struct task_struct *tmp = task;
53666 + struct task_struct *curtemp = current;
53667 + __u32 retmode;
53668 +
53669 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53670 + if (unlikely(!(gr_status & GR_READY)))
53671 + return 0;
53672 +#endif
53673 +
53674 + read_lock(&tasklist_lock);
53675 + read_lock(&grsec_exec_file_lock);
53676 + filp = task->exec_file;
53677 +
53678 + while (tmp->pid > 0) {
53679 + if (tmp == curtemp)
53680 + break;
53681 + tmp = tmp->real_parent;
53682 + }
53683 +
53684 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53685 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53686 + read_unlock(&grsec_exec_file_lock);
53687 + read_unlock(&tasklist_lock);
53688 + return 1;
53689 + }
53690 +
53691 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53692 + if (!(gr_status & GR_READY)) {
53693 + read_unlock(&grsec_exec_file_lock);
53694 + read_unlock(&tasklist_lock);
53695 + return 0;
53696 + }
53697 +#endif
53698 +
53699 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53700 + read_unlock(&grsec_exec_file_lock);
53701 + read_unlock(&tasklist_lock);
53702 +
53703 + if (retmode & GR_NOPTRACE)
53704 + return 1;
53705 +
53706 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53707 + && (current->acl != task->acl || (current->acl != current->role->root_label
53708 + && current->pid != task->pid)))
53709 + return 1;
53710 +
53711 + return 0;
53712 +}
53713 +
53714 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53715 +{
53716 + if (unlikely(!(gr_status & GR_READY)))
53717 + return;
53718 +
53719 + if (!(current->role->roletype & GR_ROLE_GOD))
53720 + return;
53721 +
53722 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53723 + p->role->rolename, gr_task_roletype_to_char(p),
53724 + p->acl->filename);
53725 +}
53726 +
53727 +int
53728 +gr_handle_ptrace(struct task_struct *task, const long request)
53729 +{
53730 + struct task_struct *tmp = task;
53731 + struct task_struct *curtemp = current;
53732 + __u32 retmode;
53733 +
53734 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53735 + if (unlikely(!(gr_status & GR_READY)))
53736 + return 0;
53737 +#endif
53738 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
53739 + read_lock(&tasklist_lock);
53740 + while (tmp->pid > 0) {
53741 + if (tmp == curtemp)
53742 + break;
53743 + tmp = tmp->real_parent;
53744 + }
53745 +
53746 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53747 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53748 + read_unlock(&tasklist_lock);
53749 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53750 + return 1;
53751 + }
53752 + read_unlock(&tasklist_lock);
53753 + }
53754 +
53755 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53756 + if (!(gr_status & GR_READY))
53757 + return 0;
53758 +#endif
53759 +
53760 + read_lock(&grsec_exec_file_lock);
53761 + if (unlikely(!task->exec_file)) {
53762 + read_unlock(&grsec_exec_file_lock);
53763 + return 0;
53764 + }
53765 +
53766 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53767 + read_unlock(&grsec_exec_file_lock);
53768 +
53769 + if (retmode & GR_NOPTRACE) {
53770 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53771 + return 1;
53772 + }
53773 +
53774 + if (retmode & GR_PTRACERD) {
53775 + switch (request) {
53776 + case PTRACE_SEIZE:
53777 + case PTRACE_POKETEXT:
53778 + case PTRACE_POKEDATA:
53779 + case PTRACE_POKEUSR:
53780 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53781 + case PTRACE_SETREGS:
53782 + case PTRACE_SETFPREGS:
53783 +#endif
53784 +#ifdef CONFIG_X86
53785 + case PTRACE_SETFPXREGS:
53786 +#endif
53787 +#ifdef CONFIG_ALTIVEC
53788 + case PTRACE_SETVRREGS:
53789 +#endif
53790 + return 1;
53791 + default:
53792 + return 0;
53793 + }
53794 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
53795 + !(current->role->roletype & GR_ROLE_GOD) &&
53796 + (current->acl != task->acl)) {
53797 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53798 + return 1;
53799 + }
53800 +
53801 + return 0;
53802 +}
53803 +
53804 +static int is_writable_mmap(const struct file *filp)
53805 +{
53806 + struct task_struct *task = current;
53807 + struct acl_object_label *obj, *obj2;
53808 +
53809 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53810 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53811 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53812 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53813 + task->role->root_label);
53814 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53815 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53816 + return 1;
53817 + }
53818 + }
53819 + return 0;
53820 +}
53821 +
53822 +int
53823 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53824 +{
53825 + __u32 mode;
53826 +
53827 + if (unlikely(!file || !(prot & PROT_EXEC)))
53828 + return 1;
53829 +
53830 + if (is_writable_mmap(file))
53831 + return 0;
53832 +
53833 + mode =
53834 + gr_search_file(file->f_path.dentry,
53835 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53836 + file->f_path.mnt);
53837 +
53838 + if (!gr_tpe_allow(file))
53839 + return 0;
53840 +
53841 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53842 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53843 + return 0;
53844 + } else if (unlikely(!(mode & GR_EXEC))) {
53845 + return 0;
53846 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53847 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53848 + return 1;
53849 + }
53850 +
53851 + return 1;
53852 +}
53853 +
53854 +int
53855 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53856 +{
53857 + __u32 mode;
53858 +
53859 + if (unlikely(!file || !(prot & PROT_EXEC)))
53860 + return 1;
53861 +
53862 + if (is_writable_mmap(file))
53863 + return 0;
53864 +
53865 + mode =
53866 + gr_search_file(file->f_path.dentry,
53867 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53868 + file->f_path.mnt);
53869 +
53870 + if (!gr_tpe_allow(file))
53871 + return 0;
53872 +
53873 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53874 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53875 + return 0;
53876 + } else if (unlikely(!(mode & GR_EXEC))) {
53877 + return 0;
53878 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53879 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53880 + return 1;
53881 + }
53882 +
53883 + return 1;
53884 +}
53885 +
53886 +void
53887 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53888 +{
53889 + unsigned long runtime;
53890 + unsigned long cputime;
53891 + unsigned int wday, cday;
53892 + __u8 whr, chr;
53893 + __u8 wmin, cmin;
53894 + __u8 wsec, csec;
53895 + struct timespec timeval;
53896 +
53897 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
53898 + !(task->acl->mode & GR_PROCACCT)))
53899 + return;
53900 +
53901 + do_posix_clock_monotonic_gettime(&timeval);
53902 + runtime = timeval.tv_sec - task->start_time.tv_sec;
53903 + wday = runtime / (3600 * 24);
53904 + runtime -= wday * (3600 * 24);
53905 + whr = runtime / 3600;
53906 + runtime -= whr * 3600;
53907 + wmin = runtime / 60;
53908 + runtime -= wmin * 60;
53909 + wsec = runtime;
53910 +
53911 + cputime = (task->utime + task->stime) / HZ;
53912 + cday = cputime / (3600 * 24);
53913 + cputime -= cday * (3600 * 24);
53914 + chr = cputime / 3600;
53915 + cputime -= chr * 3600;
53916 + cmin = cputime / 60;
53917 + cputime -= cmin * 60;
53918 + csec = cputime;
53919 +
53920 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
53921 +
53922 + return;
53923 +}
53924 +
53925 +void gr_set_kernel_label(struct task_struct *task)
53926 +{
53927 + if (gr_status & GR_READY) {
53928 + task->role = kernel_role;
53929 + task->acl = kernel_role->root_label;
53930 + }
53931 + return;
53932 +}
53933 +
53934 +#ifdef CONFIG_TASKSTATS
53935 +int gr_is_taskstats_denied(int pid)
53936 +{
53937 + struct task_struct *task;
53938 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53939 + const struct cred *cred;
53940 +#endif
53941 + int ret = 0;
53942 +
53943 + /* restrict taskstats viewing to un-chrooted root users
53944 + who have the 'view' subject flag if the RBAC system is enabled
53945 + */
53946 +
53947 + rcu_read_lock();
53948 + read_lock(&tasklist_lock);
53949 + task = find_task_by_vpid(pid);
53950 + if (task) {
53951 +#ifdef CONFIG_GRKERNSEC_CHROOT
53952 + if (proc_is_chrooted(task))
53953 + ret = -EACCES;
53954 +#endif
53955 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53956 + cred = __task_cred(task);
53957 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53958 + if (cred->uid != 0)
53959 + ret = -EACCES;
53960 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53961 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
53962 + ret = -EACCES;
53963 +#endif
53964 +#endif
53965 + if (gr_status & GR_READY) {
53966 + if (!(task->acl->mode & GR_VIEW))
53967 + ret = -EACCES;
53968 + }
53969 + } else
53970 + ret = -ENOENT;
53971 +
53972 + read_unlock(&tasklist_lock);
53973 + rcu_read_unlock();
53974 +
53975 + return ret;
53976 +}
53977 +#endif
53978 +
53979 +/* AUXV entries are filled via a descendant of search_binary_handler
53980 + after we've already applied the subject for the target
53981 +*/
53982 +int gr_acl_enable_at_secure(void)
53983 +{
53984 + if (unlikely(!(gr_status & GR_READY)))
53985 + return 0;
53986 +
53987 + if (current->acl->mode & GR_ATSECURE)
53988 + return 1;
53989 +
53990 + return 0;
53991 +}
53992 +
53993 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
53994 +{
53995 + struct task_struct *task = current;
53996 + struct dentry *dentry = file->f_path.dentry;
53997 + struct vfsmount *mnt = file->f_path.mnt;
53998 + struct acl_object_label *obj, *tmp;
53999 + struct acl_subject_label *subj;
54000 + unsigned int bufsize;
54001 + int is_not_root;
54002 + char *path;
54003 + dev_t dev = __get_dev(dentry);
54004 +
54005 + if (unlikely(!(gr_status & GR_READY)))
54006 + return 1;
54007 +
54008 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54009 + return 1;
54010 +
54011 + /* ignore Eric Biederman */
54012 + if (IS_PRIVATE(dentry->d_inode))
54013 + return 1;
54014 +
54015 + subj = task->acl;
54016 + do {
54017 + obj = lookup_acl_obj_label(ino, dev, subj);
54018 + if (obj != NULL)
54019 + return (obj->mode & GR_FIND) ? 1 : 0;
54020 + } while ((subj = subj->parent_subject));
54021 +
54022 + /* this is purely an optimization since we're looking for an object
54023 + for the directory we're doing a readdir on
54024 + if it's possible for any globbed object to match the entry we're
54025 + filling into the directory, then the object we find here will be
54026 + an anchor point with attached globbed objects
54027 + */
54028 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54029 + if (obj->globbed == NULL)
54030 + return (obj->mode & GR_FIND) ? 1 : 0;
54031 +
54032 + is_not_root = ((obj->filename[0] == '/') &&
54033 + (obj->filename[1] == '\0')) ? 0 : 1;
54034 + bufsize = PAGE_SIZE - namelen - is_not_root;
54035 +
54036 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54037 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54038 + return 1;
54039 +
54040 + preempt_disable();
54041 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54042 + bufsize);
54043 +
54044 + bufsize = strlen(path);
54045 +
54046 + /* if base is "/", don't append an additional slash */
54047 + if (is_not_root)
54048 + *(path + bufsize) = '/';
54049 + memcpy(path + bufsize + is_not_root, name, namelen);
54050 + *(path + bufsize + namelen + is_not_root) = '\0';
54051 +
54052 + tmp = obj->globbed;
54053 + while (tmp) {
54054 + if (!glob_match(tmp->filename, path)) {
54055 + preempt_enable();
54056 + return (tmp->mode & GR_FIND) ? 1 : 0;
54057 + }
54058 + tmp = tmp->next;
54059 + }
54060 + preempt_enable();
54061 + return (obj->mode & GR_FIND) ? 1 : 0;
54062 +}
54063 +
54064 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54065 +EXPORT_SYMBOL(gr_acl_is_enabled);
54066 +#endif
54067 +EXPORT_SYMBOL(gr_learn_resource);
54068 +EXPORT_SYMBOL(gr_set_kernel_label);
54069 +#ifdef CONFIG_SECURITY
54070 +EXPORT_SYMBOL(gr_check_user_change);
54071 +EXPORT_SYMBOL(gr_check_group_change);
54072 +#endif
54073 +
54074 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54075 new file mode 100644
54076 index 0000000..34fefda
54077 --- /dev/null
54078 +++ b/grsecurity/gracl_alloc.c
54079 @@ -0,0 +1,105 @@
54080 +#include <linux/kernel.h>
54081 +#include <linux/mm.h>
54082 +#include <linux/slab.h>
54083 +#include <linux/vmalloc.h>
54084 +#include <linux/gracl.h>
54085 +#include <linux/grsecurity.h>
54086 +
54087 +static unsigned long alloc_stack_next = 1;
54088 +static unsigned long alloc_stack_size = 1;
54089 +static void **alloc_stack;
54090 +
54091 +static __inline__ int
54092 +alloc_pop(void)
54093 +{
54094 + if (alloc_stack_next == 1)
54095 + return 0;
54096 +
54097 + kfree(alloc_stack[alloc_stack_next - 2]);
54098 +
54099 + alloc_stack_next--;
54100 +
54101 + return 1;
54102 +}
54103 +
54104 +static __inline__ int
54105 +alloc_push(void *buf)
54106 +{
54107 + if (alloc_stack_next >= alloc_stack_size)
54108 + return 1;
54109 +
54110 + alloc_stack[alloc_stack_next - 1] = buf;
54111 +
54112 + alloc_stack_next++;
54113 +
54114 + return 0;
54115 +}
54116 +
54117 +void *
54118 +acl_alloc(unsigned long len)
54119 +{
54120 + void *ret = NULL;
54121 +
54122 + if (!len || len > PAGE_SIZE)
54123 + goto out;
54124 +
54125 + ret = kmalloc(len, GFP_KERNEL);
54126 +
54127 + if (ret) {
54128 + if (alloc_push(ret)) {
54129 + kfree(ret);
54130 + ret = NULL;
54131 + }
54132 + }
54133 +
54134 +out:
54135 + return ret;
54136 +}
54137 +
54138 +void *
54139 +acl_alloc_num(unsigned long num, unsigned long len)
54140 +{
54141 + if (!len || (num > (PAGE_SIZE / len)))
54142 + return NULL;
54143 +
54144 + return acl_alloc(num * len);
54145 +}
54146 +
54147 +void
54148 +acl_free_all(void)
54149 +{
54150 + if (gr_acl_is_enabled() || !alloc_stack)
54151 + return;
54152 +
54153 + while (alloc_pop()) ;
54154 +
54155 + if (alloc_stack) {
54156 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54157 + kfree(alloc_stack);
54158 + else
54159 + vfree(alloc_stack);
54160 + }
54161 +
54162 + alloc_stack = NULL;
54163 + alloc_stack_size = 1;
54164 + alloc_stack_next = 1;
54165 +
54166 + return;
54167 +}
54168 +
54169 +int
54170 +acl_alloc_stack_init(unsigned long size)
54171 +{
54172 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54173 + alloc_stack =
54174 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54175 + else
54176 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54177 +
54178 + alloc_stack_size = size;
54179 +
54180 + if (!alloc_stack)
54181 + return 0;
54182 + else
54183 + return 1;
54184 +}
54185 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54186 new file mode 100644
54187 index 0000000..6d21049
54188 --- /dev/null
54189 +++ b/grsecurity/gracl_cap.c
54190 @@ -0,0 +1,110 @@
54191 +#include <linux/kernel.h>
54192 +#include <linux/module.h>
54193 +#include <linux/sched.h>
54194 +#include <linux/gracl.h>
54195 +#include <linux/grsecurity.h>
54196 +#include <linux/grinternal.h>
54197 +
54198 +extern const char *captab_log[];
54199 +extern int captab_log_entries;
54200 +
54201 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54202 +{
54203 + struct acl_subject_label *curracl;
54204 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54205 + kernel_cap_t cap_audit = __cap_empty_set;
54206 +
54207 + if (!gr_acl_is_enabled())
54208 + return 1;
54209 +
54210 + curracl = task->acl;
54211 +
54212 + cap_drop = curracl->cap_lower;
54213 + cap_mask = curracl->cap_mask;
54214 + cap_audit = curracl->cap_invert_audit;
54215 +
54216 + while ((curracl = curracl->parent_subject)) {
54217 + /* if the cap isn't specified in the current computed mask but is specified in the
54218 + current level subject, and is lowered in the current level subject, then add
54219 + it to the set of dropped capabilities
54220 + otherwise, add the current level subject's mask to the current computed mask
54221 + */
54222 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54223 + cap_raise(cap_mask, cap);
54224 + if (cap_raised(curracl->cap_lower, cap))
54225 + cap_raise(cap_drop, cap);
54226 + if (cap_raised(curracl->cap_invert_audit, cap))
54227 + cap_raise(cap_audit, cap);
54228 + }
54229 + }
54230 +
54231 + if (!cap_raised(cap_drop, cap)) {
54232 + if (cap_raised(cap_audit, cap))
54233 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54234 + return 1;
54235 + }
54236 +
54237 + curracl = task->acl;
54238 +
54239 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54240 + && cap_raised(cred->cap_effective, cap)) {
54241 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54242 + task->role->roletype, cred->uid,
54243 + cred->gid, task->exec_file ?
54244 + gr_to_filename(task->exec_file->f_path.dentry,
54245 + task->exec_file->f_path.mnt) : curracl->filename,
54246 + curracl->filename, 0UL,
54247 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54248 + return 1;
54249 + }
54250 +
54251 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54252 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54253 +
54254 + return 0;
54255 +}
54256 +
54257 +int
54258 +gr_acl_is_capable(const int cap)
54259 +{
54260 + return gr_task_acl_is_capable(current, current_cred(), cap);
54261 +}
54262 +
54263 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54264 +{
54265 + struct acl_subject_label *curracl;
54266 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54267 +
54268 + if (!gr_acl_is_enabled())
54269 + return 1;
54270 +
54271 + curracl = task->acl;
54272 +
54273 + cap_drop = curracl->cap_lower;
54274 + cap_mask = curracl->cap_mask;
54275 +
54276 + while ((curracl = curracl->parent_subject)) {
54277 + /* if the cap isn't specified in the current computed mask but is specified in the
54278 + current level subject, and is lowered in the current level subject, then add
54279 + it to the set of dropped capabilities
54280 + otherwise, add the current level subject's mask to the current computed mask
54281 + */
54282 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54283 + cap_raise(cap_mask, cap);
54284 + if (cap_raised(curracl->cap_lower, cap))
54285 + cap_raise(cap_drop, cap);
54286 + }
54287 + }
54288 +
54289 + if (!cap_raised(cap_drop, cap))
54290 + return 1;
54291 +
54292 + return 0;
54293 +}
54294 +
54295 +int
54296 +gr_acl_is_capable_nolog(const int cap)
54297 +{
54298 + return gr_task_acl_is_capable_nolog(current, cap);
54299 +}
54300 +
54301 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54302 new file mode 100644
54303 index 0000000..88d0e87
54304 --- /dev/null
54305 +++ b/grsecurity/gracl_fs.c
54306 @@ -0,0 +1,435 @@
54307 +#include <linux/kernel.h>
54308 +#include <linux/sched.h>
54309 +#include <linux/types.h>
54310 +#include <linux/fs.h>
54311 +#include <linux/file.h>
54312 +#include <linux/stat.h>
54313 +#include <linux/grsecurity.h>
54314 +#include <linux/grinternal.h>
54315 +#include <linux/gracl.h>
54316 +
54317 +umode_t
54318 +gr_acl_umask(void)
54319 +{
54320 + if (unlikely(!gr_acl_is_enabled()))
54321 + return 0;
54322 +
54323 + return current->role->umask;
54324 +}
54325 +
54326 +__u32
54327 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54328 + const struct vfsmount * mnt)
54329 +{
54330 + __u32 mode;
54331 +
54332 + if (unlikely(!dentry->d_inode))
54333 + return GR_FIND;
54334 +
54335 + mode =
54336 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54337 +
54338 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54339 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54340 + return mode;
54341 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54342 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54343 + return 0;
54344 + } else if (unlikely(!(mode & GR_FIND)))
54345 + return 0;
54346 +
54347 + return GR_FIND;
54348 +}
54349 +
54350 +__u32
54351 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54352 + int acc_mode)
54353 +{
54354 + __u32 reqmode = GR_FIND;
54355 + __u32 mode;
54356 +
54357 + if (unlikely(!dentry->d_inode))
54358 + return reqmode;
54359 +
54360 + if (acc_mode & MAY_APPEND)
54361 + reqmode |= GR_APPEND;
54362 + else if (acc_mode & MAY_WRITE)
54363 + reqmode |= GR_WRITE;
54364 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54365 + reqmode |= GR_READ;
54366 +
54367 + mode =
54368 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54369 + mnt);
54370 +
54371 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54372 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54373 + reqmode & GR_READ ? " reading" : "",
54374 + reqmode & GR_WRITE ? " writing" : reqmode &
54375 + GR_APPEND ? " appending" : "");
54376 + return reqmode;
54377 + } else
54378 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54379 + {
54380 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54381 + reqmode & GR_READ ? " reading" : "",
54382 + reqmode & GR_WRITE ? " writing" : reqmode &
54383 + GR_APPEND ? " appending" : "");
54384 + return 0;
54385 + } else if (unlikely((mode & reqmode) != reqmode))
54386 + return 0;
54387 +
54388 + return reqmode;
54389 +}
54390 +
54391 +__u32
54392 +gr_acl_handle_creat(const struct dentry * dentry,
54393 + const struct dentry * p_dentry,
54394 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54395 + const int imode)
54396 +{
54397 + __u32 reqmode = GR_WRITE | GR_CREATE;
54398 + __u32 mode;
54399 +
54400 + if (acc_mode & MAY_APPEND)
54401 + reqmode |= GR_APPEND;
54402 + // if a directory was required or the directory already exists, then
54403 + // don't count this open as a read
54404 + if ((acc_mode & MAY_READ) &&
54405 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54406 + reqmode |= GR_READ;
54407 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54408 + reqmode |= GR_SETID;
54409 +
54410 + mode =
54411 + gr_check_create(dentry, p_dentry, p_mnt,
54412 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54413 +
54414 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54415 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54416 + reqmode & GR_READ ? " reading" : "",
54417 + reqmode & GR_WRITE ? " writing" : reqmode &
54418 + GR_APPEND ? " appending" : "");
54419 + return reqmode;
54420 + } else
54421 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54422 + {
54423 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54424 + reqmode & GR_READ ? " reading" : "",
54425 + reqmode & GR_WRITE ? " writing" : reqmode &
54426 + GR_APPEND ? " appending" : "");
54427 + return 0;
54428 + } else if (unlikely((mode & reqmode) != reqmode))
54429 + return 0;
54430 +
54431 + return reqmode;
54432 +}
54433 +
54434 +__u32
54435 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54436 + const int fmode)
54437 +{
54438 + __u32 mode, reqmode = GR_FIND;
54439 +
54440 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54441 + reqmode |= GR_EXEC;
54442 + if (fmode & S_IWOTH)
54443 + reqmode |= GR_WRITE;
54444 + if (fmode & S_IROTH)
54445 + reqmode |= GR_READ;
54446 +
54447 + mode =
54448 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54449 + mnt);
54450 +
54451 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54452 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54453 + reqmode & GR_READ ? " reading" : "",
54454 + reqmode & GR_WRITE ? " writing" : "",
54455 + reqmode & GR_EXEC ? " executing" : "");
54456 + return reqmode;
54457 + } else
54458 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54459 + {
54460 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54461 + reqmode & GR_READ ? " reading" : "",
54462 + reqmode & GR_WRITE ? " writing" : "",
54463 + reqmode & GR_EXEC ? " executing" : "");
54464 + return 0;
54465 + } else if (unlikely((mode & reqmode) != reqmode))
54466 + return 0;
54467 +
54468 + return reqmode;
54469 +}
54470 +
54471 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54472 +{
54473 + __u32 mode;
54474 +
54475 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54476 +
54477 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54478 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54479 + return mode;
54480 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54481 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54482 + return 0;
54483 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54484 + return 0;
54485 +
54486 + return (reqmode);
54487 +}
54488 +
54489 +__u32
54490 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54491 +{
54492 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54493 +}
54494 +
54495 +__u32
54496 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54497 +{
54498 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54499 +}
54500 +
54501 +__u32
54502 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54503 +{
54504 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54505 +}
54506 +
54507 +__u32
54508 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54509 +{
54510 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54511 +}
54512 +
54513 +__u32
54514 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54515 + umode_t *modeptr)
54516 +{
54517 + umode_t mode;
54518 +
54519 + *modeptr &= ~gr_acl_umask();
54520 + mode = *modeptr;
54521 +
54522 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54523 + return 1;
54524 +
54525 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
54526 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54527 + GR_CHMOD_ACL_MSG);
54528 + } else {
54529 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54530 + }
54531 +}
54532 +
54533 +__u32
54534 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54535 +{
54536 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54537 +}
54538 +
54539 +__u32
54540 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54541 +{
54542 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54543 +}
54544 +
54545 +__u32
54546 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54547 +{
54548 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54549 +}
54550 +
54551 +__u32
54552 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54553 +{
54554 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54555 + GR_UNIXCONNECT_ACL_MSG);
54556 +}
54557 +
54558 +/* hardlinks require at minimum create and link permission,
54559 + any additional privilege required is based on the
54560 + privilege of the file being linked to
54561 +*/
54562 +__u32
54563 +gr_acl_handle_link(const struct dentry * new_dentry,
54564 + const struct dentry * parent_dentry,
54565 + const struct vfsmount * parent_mnt,
54566 + const struct dentry * old_dentry,
54567 + const struct vfsmount * old_mnt, const char *to)
54568 +{
54569 + __u32 mode;
54570 + __u32 needmode = GR_CREATE | GR_LINK;
54571 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54572 +
54573 + mode =
54574 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54575 + old_mnt);
54576 +
54577 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54578 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54579 + return mode;
54580 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54581 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54582 + return 0;
54583 + } else if (unlikely((mode & needmode) != needmode))
54584 + return 0;
54585 +
54586 + return 1;
54587 +}
54588 +
54589 +__u32
54590 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54591 + const struct dentry * parent_dentry,
54592 + const struct vfsmount * parent_mnt, const char *from)
54593 +{
54594 + __u32 needmode = GR_WRITE | GR_CREATE;
54595 + __u32 mode;
54596 +
54597 + mode =
54598 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
54599 + GR_CREATE | GR_AUDIT_CREATE |
54600 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54601 +
54602 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54603 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54604 + return mode;
54605 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54606 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54607 + return 0;
54608 + } else if (unlikely((mode & needmode) != needmode))
54609 + return 0;
54610 +
54611 + return (GR_WRITE | GR_CREATE);
54612 +}
54613 +
54614 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54615 +{
54616 + __u32 mode;
54617 +
54618 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54619 +
54620 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54621 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54622 + return mode;
54623 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54624 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54625 + return 0;
54626 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54627 + return 0;
54628 +
54629 + return (reqmode);
54630 +}
54631 +
54632 +__u32
54633 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54634 + const struct dentry * parent_dentry,
54635 + const struct vfsmount * parent_mnt,
54636 + const int mode)
54637 +{
54638 + __u32 reqmode = GR_WRITE | GR_CREATE;
54639 + if (unlikely(mode & (S_ISUID | S_ISGID)))
54640 + reqmode |= GR_SETID;
54641 +
54642 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54643 + reqmode, GR_MKNOD_ACL_MSG);
54644 +}
54645 +
54646 +__u32
54647 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
54648 + const struct dentry *parent_dentry,
54649 + const struct vfsmount *parent_mnt)
54650 +{
54651 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54652 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54653 +}
54654 +
54655 +#define RENAME_CHECK_SUCCESS(old, new) \
54656 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54657 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54658 +
54659 +int
54660 +gr_acl_handle_rename(struct dentry *new_dentry,
54661 + struct dentry *parent_dentry,
54662 + const struct vfsmount *parent_mnt,
54663 + struct dentry *old_dentry,
54664 + struct inode *old_parent_inode,
54665 + struct vfsmount *old_mnt, const char *newname)
54666 +{
54667 + __u32 comp1, comp2;
54668 + int error = 0;
54669 +
54670 + if (unlikely(!gr_acl_is_enabled()))
54671 + return 0;
54672 +
54673 + if (!new_dentry->d_inode) {
54674 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54675 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54676 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54677 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54678 + GR_DELETE | GR_AUDIT_DELETE |
54679 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54680 + GR_SUPPRESS, old_mnt);
54681 + } else {
54682 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54683 + GR_CREATE | GR_DELETE |
54684 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54685 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54686 + GR_SUPPRESS, parent_mnt);
54687 + comp2 =
54688 + gr_search_file(old_dentry,
54689 + GR_READ | GR_WRITE | GR_AUDIT_READ |
54690 + GR_DELETE | GR_AUDIT_DELETE |
54691 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54692 + }
54693 +
54694 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54695 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54696 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54697 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54698 + && !(comp2 & GR_SUPPRESS)) {
54699 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54700 + error = -EACCES;
54701 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54702 + error = -EACCES;
54703 +
54704 + return error;
54705 +}
54706 +
54707 +void
54708 +gr_acl_handle_exit(void)
54709 +{
54710 + u16 id;
54711 + char *rolename;
54712 + struct file *exec_file;
54713 +
54714 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54715 + !(current->role->roletype & GR_ROLE_PERSIST))) {
54716 + id = current->acl_role_id;
54717 + rolename = current->role->rolename;
54718 + gr_set_acls(1);
54719 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54720 + }
54721 +
54722 + write_lock(&grsec_exec_file_lock);
54723 + exec_file = current->exec_file;
54724 + current->exec_file = NULL;
54725 + write_unlock(&grsec_exec_file_lock);
54726 +
54727 + if (exec_file)
54728 + fput(exec_file);
54729 +}
54730 +
54731 +int
54732 +gr_acl_handle_procpidmem(const struct task_struct *task)
54733 +{
54734 + if (unlikely(!gr_acl_is_enabled()))
54735 + return 0;
54736 +
54737 + if (task != current && task->acl->mode & GR_PROTPROCFD)
54738 + return -EACCES;
54739 +
54740 + return 0;
54741 +}
54742 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54743 new file mode 100644
54744 index 0000000..58800a7
54745 --- /dev/null
54746 +++ b/grsecurity/gracl_ip.c
54747 @@ -0,0 +1,384 @@
54748 +#include <linux/kernel.h>
54749 +#include <asm/uaccess.h>
54750 +#include <asm/errno.h>
54751 +#include <net/sock.h>
54752 +#include <linux/file.h>
54753 +#include <linux/fs.h>
54754 +#include <linux/net.h>
54755 +#include <linux/in.h>
54756 +#include <linux/skbuff.h>
54757 +#include <linux/ip.h>
54758 +#include <linux/udp.h>
54759 +#include <linux/types.h>
54760 +#include <linux/sched.h>
54761 +#include <linux/netdevice.h>
54762 +#include <linux/inetdevice.h>
54763 +#include <linux/gracl.h>
54764 +#include <linux/grsecurity.h>
54765 +#include <linux/grinternal.h>
54766 +
54767 +#define GR_BIND 0x01
54768 +#define GR_CONNECT 0x02
54769 +#define GR_INVERT 0x04
54770 +#define GR_BINDOVERRIDE 0x08
54771 +#define GR_CONNECTOVERRIDE 0x10
54772 +#define GR_SOCK_FAMILY 0x20
54773 +
54774 +static const char * gr_protocols[IPPROTO_MAX] = {
54775 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54776 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54777 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54778 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54779 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54780 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54781 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54782 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54783 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54784 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54785 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54786 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54787 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54788 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54789 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54790 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54791 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54792 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54793 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54794 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54795 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54796 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54797 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54798 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54799 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54800 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54801 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54802 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54803 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54804 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54805 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54806 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54807 + };
54808 +
54809 +static const char * gr_socktypes[SOCK_MAX] = {
54810 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54811 + "unknown:7", "unknown:8", "unknown:9", "packet"
54812 + };
54813 +
54814 +static const char * gr_sockfamilies[AF_MAX+1] = {
54815 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54816 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54817 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54818 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54819 + };
54820 +
54821 +const char *
54822 +gr_proto_to_name(unsigned char proto)
54823 +{
54824 + return gr_protocols[proto];
54825 +}
54826 +
54827 +const char *
54828 +gr_socktype_to_name(unsigned char type)
54829 +{
54830 + return gr_socktypes[type];
54831 +}
54832 +
54833 +const char *
54834 +gr_sockfamily_to_name(unsigned char family)
54835 +{
54836 + return gr_sockfamilies[family];
54837 +}
54838 +
54839 +int
54840 +gr_search_socket(const int domain, const int type, const int protocol)
54841 +{
54842 + struct acl_subject_label *curr;
54843 + const struct cred *cred = current_cred();
54844 +
54845 + if (unlikely(!gr_acl_is_enabled()))
54846 + goto exit;
54847 +
54848 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
54849 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54850 + goto exit; // let the kernel handle it
54851 +
54852 + curr = current->acl;
54853 +
54854 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
54855 + /* the family is allowed, if this is PF_INET allow it only if
54856 + the extra sock type/protocol checks pass */
54857 + if (domain == PF_INET)
54858 + goto inet_check;
54859 + goto exit;
54860 + } else {
54861 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54862 + __u32 fakeip = 0;
54863 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54864 + current->role->roletype, cred->uid,
54865 + cred->gid, current->exec_file ?
54866 + gr_to_filename(current->exec_file->f_path.dentry,
54867 + current->exec_file->f_path.mnt) :
54868 + curr->filename, curr->filename,
54869 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
54870 + &current->signal->saved_ip);
54871 + goto exit;
54872 + }
54873 + goto exit_fail;
54874 + }
54875 +
54876 +inet_check:
54877 + /* the rest of this checking is for IPv4 only */
54878 + if (!curr->ips)
54879 + goto exit;
54880 +
54881 + if ((curr->ip_type & (1 << type)) &&
54882 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
54883 + goto exit;
54884 +
54885 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54886 + /* we don't place acls on raw sockets , and sometimes
54887 + dgram/ip sockets are opened for ioctl and not
54888 + bind/connect, so we'll fake a bind learn log */
54889 + if (type == SOCK_RAW || type == SOCK_PACKET) {
54890 + __u32 fakeip = 0;
54891 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54892 + current->role->roletype, cred->uid,
54893 + cred->gid, current->exec_file ?
54894 + gr_to_filename(current->exec_file->f_path.dentry,
54895 + current->exec_file->f_path.mnt) :
54896 + curr->filename, curr->filename,
54897 + &fakeip, 0, type,
54898 + protocol, GR_CONNECT, &current->signal->saved_ip);
54899 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
54900 + __u32 fakeip = 0;
54901 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54902 + current->role->roletype, cred->uid,
54903 + cred->gid, current->exec_file ?
54904 + gr_to_filename(current->exec_file->f_path.dentry,
54905 + current->exec_file->f_path.mnt) :
54906 + curr->filename, curr->filename,
54907 + &fakeip, 0, type,
54908 + protocol, GR_BIND, &current->signal->saved_ip);
54909 + }
54910 + /* we'll log when they use connect or bind */
54911 + goto exit;
54912 + }
54913 +
54914 +exit_fail:
54915 + if (domain == PF_INET)
54916 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
54917 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
54918 + else
54919 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
54920 + gr_socktype_to_name(type), protocol);
54921 +
54922 + return 0;
54923 +exit:
54924 + return 1;
54925 +}
54926 +
54927 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
54928 +{
54929 + if ((ip->mode & mode) &&
54930 + (ip_port >= ip->low) &&
54931 + (ip_port <= ip->high) &&
54932 + ((ntohl(ip_addr) & our_netmask) ==
54933 + (ntohl(our_addr) & our_netmask))
54934 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
54935 + && (ip->type & (1 << type))) {
54936 + if (ip->mode & GR_INVERT)
54937 + return 2; // specifically denied
54938 + else
54939 + return 1; // allowed
54940 + }
54941 +
54942 + return 0; // not specifically allowed, may continue parsing
54943 +}
54944 +
54945 +static int
54946 +gr_search_connectbind(const int full_mode, struct sock *sk,
54947 + struct sockaddr_in *addr, const int type)
54948 +{
54949 + char iface[IFNAMSIZ] = {0};
54950 + struct acl_subject_label *curr;
54951 + struct acl_ip_label *ip;
54952 + struct inet_sock *isk;
54953 + struct net_device *dev;
54954 + struct in_device *idev;
54955 + unsigned long i;
54956 + int ret;
54957 + int mode = full_mode & (GR_BIND | GR_CONNECT);
54958 + __u32 ip_addr = 0;
54959 + __u32 our_addr;
54960 + __u32 our_netmask;
54961 + char *p;
54962 + __u16 ip_port = 0;
54963 + const struct cred *cred = current_cred();
54964 +
54965 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
54966 + return 0;
54967 +
54968 + curr = current->acl;
54969 + isk = inet_sk(sk);
54970 +
54971 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
54972 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
54973 + addr->sin_addr.s_addr = curr->inaddr_any_override;
54974 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
54975 + struct sockaddr_in saddr;
54976 + int err;
54977 +
54978 + saddr.sin_family = AF_INET;
54979 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
54980 + saddr.sin_port = isk->inet_sport;
54981 +
54982 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54983 + if (err)
54984 + return err;
54985 +
54986 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54987 + if (err)
54988 + return err;
54989 + }
54990 +
54991 + if (!curr->ips)
54992 + return 0;
54993 +
54994 + ip_addr = addr->sin_addr.s_addr;
54995 + ip_port = ntohs(addr->sin_port);
54996 +
54997 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54998 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54999 + current->role->roletype, cred->uid,
55000 + cred->gid, current->exec_file ?
55001 + gr_to_filename(current->exec_file->f_path.dentry,
55002 + current->exec_file->f_path.mnt) :
55003 + curr->filename, curr->filename,
55004 + &ip_addr, ip_port, type,
55005 + sk->sk_protocol, mode, &current->signal->saved_ip);
55006 + return 0;
55007 + }
55008 +
55009 + for (i = 0; i < curr->ip_num; i++) {
55010 + ip = *(curr->ips + i);
55011 + if (ip->iface != NULL) {
55012 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55013 + p = strchr(iface, ':');
55014 + if (p != NULL)
55015 + *p = '\0';
55016 + dev = dev_get_by_name(sock_net(sk), iface);
55017 + if (dev == NULL)
55018 + continue;
55019 + idev = in_dev_get(dev);
55020 + if (idev == NULL) {
55021 + dev_put(dev);
55022 + continue;
55023 + }
55024 + rcu_read_lock();
55025 + for_ifa(idev) {
55026 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55027 + our_addr = ifa->ifa_address;
55028 + our_netmask = 0xffffffff;
55029 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55030 + if (ret == 1) {
55031 + rcu_read_unlock();
55032 + in_dev_put(idev);
55033 + dev_put(dev);
55034 + return 0;
55035 + } else if (ret == 2) {
55036 + rcu_read_unlock();
55037 + in_dev_put(idev);
55038 + dev_put(dev);
55039 + goto denied;
55040 + }
55041 + }
55042 + } endfor_ifa(idev);
55043 + rcu_read_unlock();
55044 + in_dev_put(idev);
55045 + dev_put(dev);
55046 + } else {
55047 + our_addr = ip->addr;
55048 + our_netmask = ip->netmask;
55049 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55050 + if (ret == 1)
55051 + return 0;
55052 + else if (ret == 2)
55053 + goto denied;
55054 + }
55055 + }
55056 +
55057 +denied:
55058 + if (mode == GR_BIND)
55059 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55060 + else if (mode == GR_CONNECT)
55061 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55062 +
55063 + return -EACCES;
55064 +}
55065 +
55066 +int
55067 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55068 +{
55069 + /* always allow disconnection of dgram sockets with connect */
55070 + if (addr->sin_family == AF_UNSPEC)
55071 + return 0;
55072 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55073 +}
55074 +
55075 +int
55076 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55077 +{
55078 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55079 +}
55080 +
55081 +int gr_search_listen(struct socket *sock)
55082 +{
55083 + struct sock *sk = sock->sk;
55084 + struct sockaddr_in addr;
55085 +
55086 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55087 + addr.sin_port = inet_sk(sk)->inet_sport;
55088 +
55089 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55090 +}
55091 +
55092 +int gr_search_accept(struct socket *sock)
55093 +{
55094 + struct sock *sk = sock->sk;
55095 + struct sockaddr_in addr;
55096 +
55097 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55098 + addr.sin_port = inet_sk(sk)->inet_sport;
55099 +
55100 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55101 +}
55102 +
55103 +int
55104 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55105 +{
55106 + if (addr)
55107 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55108 + else {
55109 + struct sockaddr_in sin;
55110 + const struct inet_sock *inet = inet_sk(sk);
55111 +
55112 + sin.sin_addr.s_addr = inet->inet_daddr;
55113 + sin.sin_port = inet->inet_dport;
55114 +
55115 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55116 + }
55117 +}
55118 +
55119 +int
55120 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55121 +{
55122 + struct sockaddr_in sin;
55123 +
55124 + if (unlikely(skb->len < sizeof (struct udphdr)))
55125 + return 0; // skip this packet
55126 +
55127 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55128 + sin.sin_port = udp_hdr(skb)->source;
55129 +
55130 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55131 +}
55132 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55133 new file mode 100644
55134 index 0000000..25f54ef
55135 --- /dev/null
55136 +++ b/grsecurity/gracl_learn.c
55137 @@ -0,0 +1,207 @@
55138 +#include <linux/kernel.h>
55139 +#include <linux/mm.h>
55140 +#include <linux/sched.h>
55141 +#include <linux/poll.h>
55142 +#include <linux/string.h>
55143 +#include <linux/file.h>
55144 +#include <linux/types.h>
55145 +#include <linux/vmalloc.h>
55146 +#include <linux/grinternal.h>
55147 +
55148 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55149 + size_t count, loff_t *ppos);
55150 +extern int gr_acl_is_enabled(void);
55151 +
55152 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55153 +static int gr_learn_attached;
55154 +
55155 +/* use a 512k buffer */
55156 +#define LEARN_BUFFER_SIZE (512 * 1024)
55157 +
55158 +static DEFINE_SPINLOCK(gr_learn_lock);
55159 +static DEFINE_MUTEX(gr_learn_user_mutex);
55160 +
55161 +/* we need to maintain two buffers, so that the kernel context of grlearn
55162 + uses a semaphore around the userspace copying, and the other kernel contexts
55163 + use a spinlock when copying into the buffer, since they cannot sleep
55164 +*/
55165 +static char *learn_buffer;
55166 +static char *learn_buffer_user;
55167 +static int learn_buffer_len;
55168 +static int learn_buffer_user_len;
55169 +
55170 +static ssize_t
55171 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55172 +{
55173 + DECLARE_WAITQUEUE(wait, current);
55174 + ssize_t retval = 0;
55175 +
55176 + add_wait_queue(&learn_wait, &wait);
55177 + set_current_state(TASK_INTERRUPTIBLE);
55178 + do {
55179 + mutex_lock(&gr_learn_user_mutex);
55180 + spin_lock(&gr_learn_lock);
55181 + if (learn_buffer_len)
55182 + break;
55183 + spin_unlock(&gr_learn_lock);
55184 + mutex_unlock(&gr_learn_user_mutex);
55185 + if (file->f_flags & O_NONBLOCK) {
55186 + retval = -EAGAIN;
55187 + goto out;
55188 + }
55189 + if (signal_pending(current)) {
55190 + retval = -ERESTARTSYS;
55191 + goto out;
55192 + }
55193 +
55194 + schedule();
55195 + } while (1);
55196 +
55197 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55198 + learn_buffer_user_len = learn_buffer_len;
55199 + retval = learn_buffer_len;
55200 + learn_buffer_len = 0;
55201 +
55202 + spin_unlock(&gr_learn_lock);
55203 +
55204 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55205 + retval = -EFAULT;
55206 +
55207 + mutex_unlock(&gr_learn_user_mutex);
55208 +out:
55209 + set_current_state(TASK_RUNNING);
55210 + remove_wait_queue(&learn_wait, &wait);
55211 + return retval;
55212 +}
55213 +
55214 +static unsigned int
55215 +poll_learn(struct file * file, poll_table * wait)
55216 +{
55217 + poll_wait(file, &learn_wait, wait);
55218 +
55219 + if (learn_buffer_len)
55220 + return (POLLIN | POLLRDNORM);
55221 +
55222 + return 0;
55223 +}
55224 +
55225 +void
55226 +gr_clear_learn_entries(void)
55227 +{
55228 + char *tmp;
55229 +
55230 + mutex_lock(&gr_learn_user_mutex);
55231 + spin_lock(&gr_learn_lock);
55232 + tmp = learn_buffer;
55233 + learn_buffer = NULL;
55234 + spin_unlock(&gr_learn_lock);
55235 + if (tmp)
55236 + vfree(tmp);
55237 + if (learn_buffer_user != NULL) {
55238 + vfree(learn_buffer_user);
55239 + learn_buffer_user = NULL;
55240 + }
55241 + learn_buffer_len = 0;
55242 + mutex_unlock(&gr_learn_user_mutex);
55243 +
55244 + return;
55245 +}
55246 +
55247 +void
55248 +gr_add_learn_entry(const char *fmt, ...)
55249 +{
55250 + va_list args;
55251 + unsigned int len;
55252 +
55253 + if (!gr_learn_attached)
55254 + return;
55255 +
55256 + spin_lock(&gr_learn_lock);
55257 +
55258 + /* leave a gap at the end so we know when it's "full" but don't have to
55259 + compute the exact length of the string we're trying to append
55260 + */
55261 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55262 + spin_unlock(&gr_learn_lock);
55263 + wake_up_interruptible(&learn_wait);
55264 + return;
55265 + }
55266 + if (learn_buffer == NULL) {
55267 + spin_unlock(&gr_learn_lock);
55268 + return;
55269 + }
55270 +
55271 + va_start(args, fmt);
55272 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55273 + va_end(args);
55274 +
55275 + learn_buffer_len += len + 1;
55276 +
55277 + spin_unlock(&gr_learn_lock);
55278 + wake_up_interruptible(&learn_wait);
55279 +
55280 + return;
55281 +}
55282 +
55283 +static int
55284 +open_learn(struct inode *inode, struct file *file)
55285 +{
55286 + if (file->f_mode & FMODE_READ && gr_learn_attached)
55287 + return -EBUSY;
55288 + if (file->f_mode & FMODE_READ) {
55289 + int retval = 0;
55290 + mutex_lock(&gr_learn_user_mutex);
55291 + if (learn_buffer == NULL)
55292 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55293 + if (learn_buffer_user == NULL)
55294 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55295 + if (learn_buffer == NULL) {
55296 + retval = -ENOMEM;
55297 + goto out_error;
55298 + }
55299 + if (learn_buffer_user == NULL) {
55300 + retval = -ENOMEM;
55301 + goto out_error;
55302 + }
55303 + learn_buffer_len = 0;
55304 + learn_buffer_user_len = 0;
55305 + gr_learn_attached = 1;
55306 +out_error:
55307 + mutex_unlock(&gr_learn_user_mutex);
55308 + return retval;
55309 + }
55310 + return 0;
55311 +}
55312 +
55313 +static int
55314 +close_learn(struct inode *inode, struct file *file)
55315 +{
55316 + if (file->f_mode & FMODE_READ) {
55317 + char *tmp = NULL;
55318 + mutex_lock(&gr_learn_user_mutex);
55319 + spin_lock(&gr_learn_lock);
55320 + tmp = learn_buffer;
55321 + learn_buffer = NULL;
55322 + spin_unlock(&gr_learn_lock);
55323 + if (tmp)
55324 + vfree(tmp);
55325 + if (learn_buffer_user != NULL) {
55326 + vfree(learn_buffer_user);
55327 + learn_buffer_user = NULL;
55328 + }
55329 + learn_buffer_len = 0;
55330 + learn_buffer_user_len = 0;
55331 + gr_learn_attached = 0;
55332 + mutex_unlock(&gr_learn_user_mutex);
55333 + }
55334 +
55335 + return 0;
55336 +}
55337 +
55338 +const struct file_operations grsec_fops = {
55339 + .read = read_learn,
55340 + .write = write_grsec_handler,
55341 + .open = open_learn,
55342 + .release = close_learn,
55343 + .poll = poll_learn,
55344 +};
55345 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55346 new file mode 100644
55347 index 0000000..39645c9
55348 --- /dev/null
55349 +++ b/grsecurity/gracl_res.c
55350 @@ -0,0 +1,68 @@
55351 +#include <linux/kernel.h>
55352 +#include <linux/sched.h>
55353 +#include <linux/gracl.h>
55354 +#include <linux/grinternal.h>
55355 +
55356 +static const char *restab_log[] = {
55357 + [RLIMIT_CPU] = "RLIMIT_CPU",
55358 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55359 + [RLIMIT_DATA] = "RLIMIT_DATA",
55360 + [RLIMIT_STACK] = "RLIMIT_STACK",
55361 + [RLIMIT_CORE] = "RLIMIT_CORE",
55362 + [RLIMIT_RSS] = "RLIMIT_RSS",
55363 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55364 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55365 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55366 + [RLIMIT_AS] = "RLIMIT_AS",
55367 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55368 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55369 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55370 + [RLIMIT_NICE] = "RLIMIT_NICE",
55371 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55372 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55373 + [GR_CRASH_RES] = "RLIMIT_CRASH"
55374 +};
55375 +
55376 +void
55377 +gr_log_resource(const struct task_struct *task,
55378 + const int res, const unsigned long wanted, const int gt)
55379 +{
55380 + const struct cred *cred;
55381 + unsigned long rlim;
55382 +
55383 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
55384 + return;
55385 +
55386 + // not yet supported resource
55387 + if (unlikely(!restab_log[res]))
55388 + return;
55389 +
55390 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55391 + rlim = task_rlimit_max(task, res);
55392 + else
55393 + rlim = task_rlimit(task, res);
55394 +
55395 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55396 + return;
55397 +
55398 + rcu_read_lock();
55399 + cred = __task_cred(task);
55400 +
55401 + if (res == RLIMIT_NPROC &&
55402 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55403 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55404 + goto out_rcu_unlock;
55405 + else if (res == RLIMIT_MEMLOCK &&
55406 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55407 + goto out_rcu_unlock;
55408 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55409 + goto out_rcu_unlock;
55410 + rcu_read_unlock();
55411 +
55412 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55413 +
55414 + return;
55415 +out_rcu_unlock:
55416 + rcu_read_unlock();
55417 + return;
55418 +}
55419 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55420 new file mode 100644
55421 index 0000000..5556be3
55422 --- /dev/null
55423 +++ b/grsecurity/gracl_segv.c
55424 @@ -0,0 +1,299 @@
55425 +#include <linux/kernel.h>
55426 +#include <linux/mm.h>
55427 +#include <asm/uaccess.h>
55428 +#include <asm/errno.h>
55429 +#include <asm/mman.h>
55430 +#include <net/sock.h>
55431 +#include <linux/file.h>
55432 +#include <linux/fs.h>
55433 +#include <linux/net.h>
55434 +#include <linux/in.h>
55435 +#include <linux/slab.h>
55436 +#include <linux/types.h>
55437 +#include <linux/sched.h>
55438 +#include <linux/timer.h>
55439 +#include <linux/gracl.h>
55440 +#include <linux/grsecurity.h>
55441 +#include <linux/grinternal.h>
55442 +
55443 +static struct crash_uid *uid_set;
55444 +static unsigned short uid_used;
55445 +static DEFINE_SPINLOCK(gr_uid_lock);
55446 +extern rwlock_t gr_inode_lock;
55447 +extern struct acl_subject_label *
55448 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55449 + struct acl_role_label *role);
55450 +
55451 +#ifdef CONFIG_BTRFS_FS
55452 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55453 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55454 +#endif
55455 +
55456 +static inline dev_t __get_dev(const struct dentry *dentry)
55457 +{
55458 +#ifdef CONFIG_BTRFS_FS
55459 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55460 + return get_btrfs_dev_from_inode(dentry->d_inode);
55461 + else
55462 +#endif
55463 + return dentry->d_inode->i_sb->s_dev;
55464 +}
55465 +
55466 +int
55467 +gr_init_uidset(void)
55468 +{
55469 + uid_set =
55470 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55471 + uid_used = 0;
55472 +
55473 + return uid_set ? 1 : 0;
55474 +}
55475 +
55476 +void
55477 +gr_free_uidset(void)
55478 +{
55479 + if (uid_set)
55480 + kfree(uid_set);
55481 +
55482 + return;
55483 +}
55484 +
55485 +int
55486 +gr_find_uid(const uid_t uid)
55487 +{
55488 + struct crash_uid *tmp = uid_set;
55489 + uid_t buid;
55490 + int low = 0, high = uid_used - 1, mid;
55491 +
55492 + while (high >= low) {
55493 + mid = (low + high) >> 1;
55494 + buid = tmp[mid].uid;
55495 + if (buid == uid)
55496 + return mid;
55497 + if (buid > uid)
55498 + high = mid - 1;
55499 + if (buid < uid)
55500 + low = mid + 1;
55501 + }
55502 +
55503 + return -1;
55504 +}
55505 +
55506 +static __inline__ void
55507 +gr_insertsort(void)
55508 +{
55509 + unsigned short i, j;
55510 + struct crash_uid index;
55511 +
55512 + for (i = 1; i < uid_used; i++) {
55513 + index = uid_set[i];
55514 + j = i;
55515 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55516 + uid_set[j] = uid_set[j - 1];
55517 + j--;
55518 + }
55519 + uid_set[j] = index;
55520 + }
55521 +
55522 + return;
55523 +}
55524 +
55525 +static __inline__ void
55526 +gr_insert_uid(const uid_t uid, const unsigned long expires)
55527 +{
55528 + int loc;
55529 +
55530 + if (uid_used == GR_UIDTABLE_MAX)
55531 + return;
55532 +
55533 + loc = gr_find_uid(uid);
55534 +
55535 + if (loc >= 0) {
55536 + uid_set[loc].expires = expires;
55537 + return;
55538 + }
55539 +
55540 + uid_set[uid_used].uid = uid;
55541 + uid_set[uid_used].expires = expires;
55542 + uid_used++;
55543 +
55544 + gr_insertsort();
55545 +
55546 + return;
55547 +}
55548 +
55549 +void
55550 +gr_remove_uid(const unsigned short loc)
55551 +{
55552 + unsigned short i;
55553 +
55554 + for (i = loc + 1; i < uid_used; i++)
55555 + uid_set[i - 1] = uid_set[i];
55556 +
55557 + uid_used--;
55558 +
55559 + return;
55560 +}
55561 +
55562 +int
55563 +gr_check_crash_uid(const uid_t uid)
55564 +{
55565 + int loc;
55566 + int ret = 0;
55567 +
55568 + if (unlikely(!gr_acl_is_enabled()))
55569 + return 0;
55570 +
55571 + spin_lock(&gr_uid_lock);
55572 + loc = gr_find_uid(uid);
55573 +
55574 + if (loc < 0)
55575 + goto out_unlock;
55576 +
55577 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
55578 + gr_remove_uid(loc);
55579 + else
55580 + ret = 1;
55581 +
55582 +out_unlock:
55583 + spin_unlock(&gr_uid_lock);
55584 + return ret;
55585 +}
55586 +
55587 +static __inline__ int
55588 +proc_is_setxid(const struct cred *cred)
55589 +{
55590 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
55591 + cred->uid != cred->fsuid)
55592 + return 1;
55593 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55594 + cred->gid != cred->fsgid)
55595 + return 1;
55596 +
55597 + return 0;
55598 +}
55599 +
55600 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
55601 +
55602 +void
55603 +gr_handle_crash(struct task_struct *task, const int sig)
55604 +{
55605 + struct acl_subject_label *curr;
55606 + struct task_struct *tsk, *tsk2;
55607 + const struct cred *cred;
55608 + const struct cred *cred2;
55609 +
55610 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55611 + return;
55612 +
55613 + if (unlikely(!gr_acl_is_enabled()))
55614 + return;
55615 +
55616 + curr = task->acl;
55617 +
55618 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
55619 + return;
55620 +
55621 + if (time_before_eq(curr->expires, get_seconds())) {
55622 + curr->expires = 0;
55623 + curr->crashes = 0;
55624 + }
55625 +
55626 + curr->crashes++;
55627 +
55628 + if (!curr->expires)
55629 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55630 +
55631 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55632 + time_after(curr->expires, get_seconds())) {
55633 + rcu_read_lock();
55634 + cred = __task_cred(task);
55635 + if (cred->uid && proc_is_setxid(cred)) {
55636 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55637 + spin_lock(&gr_uid_lock);
55638 + gr_insert_uid(cred->uid, curr->expires);
55639 + spin_unlock(&gr_uid_lock);
55640 + curr->expires = 0;
55641 + curr->crashes = 0;
55642 + read_lock(&tasklist_lock);
55643 + do_each_thread(tsk2, tsk) {
55644 + cred2 = __task_cred(tsk);
55645 + if (tsk != task && cred2->uid == cred->uid)
55646 + gr_fake_force_sig(SIGKILL, tsk);
55647 + } while_each_thread(tsk2, tsk);
55648 + read_unlock(&tasklist_lock);
55649 + } else {
55650 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55651 + read_lock(&tasklist_lock);
55652 + read_lock(&grsec_exec_file_lock);
55653 + do_each_thread(tsk2, tsk) {
55654 + if (likely(tsk != task)) {
55655 + // if this thread has the same subject as the one that triggered
55656 + // RES_CRASH and it's the same binary, kill it
55657 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55658 + gr_fake_force_sig(SIGKILL, tsk);
55659 + }
55660 + } while_each_thread(tsk2, tsk);
55661 + read_unlock(&grsec_exec_file_lock);
55662 + read_unlock(&tasklist_lock);
55663 + }
55664 + rcu_read_unlock();
55665 + }
55666 +
55667 + return;
55668 +}
55669 +
55670 +int
55671 +gr_check_crash_exec(const struct file *filp)
55672 +{
55673 + struct acl_subject_label *curr;
55674 +
55675 + if (unlikely(!gr_acl_is_enabled()))
55676 + return 0;
55677 +
55678 + read_lock(&gr_inode_lock);
55679 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55680 + __get_dev(filp->f_path.dentry),
55681 + current->role);
55682 + read_unlock(&gr_inode_lock);
55683 +
55684 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55685 + (!curr->crashes && !curr->expires))
55686 + return 0;
55687 +
55688 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55689 + time_after(curr->expires, get_seconds()))
55690 + return 1;
55691 + else if (time_before_eq(curr->expires, get_seconds())) {
55692 + curr->crashes = 0;
55693 + curr->expires = 0;
55694 + }
55695 +
55696 + return 0;
55697 +}
55698 +
55699 +void
55700 +gr_handle_alertkill(struct task_struct *task)
55701 +{
55702 + struct acl_subject_label *curracl;
55703 + __u32 curr_ip;
55704 + struct task_struct *p, *p2;
55705 +
55706 + if (unlikely(!gr_acl_is_enabled()))
55707 + return;
55708 +
55709 + curracl = task->acl;
55710 + curr_ip = task->signal->curr_ip;
55711 +
55712 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55713 + read_lock(&tasklist_lock);
55714 + do_each_thread(p2, p) {
55715 + if (p->signal->curr_ip == curr_ip)
55716 + gr_fake_force_sig(SIGKILL, p);
55717 + } while_each_thread(p2, p);
55718 + read_unlock(&tasklist_lock);
55719 + } else if (curracl->mode & GR_KILLPROC)
55720 + gr_fake_force_sig(SIGKILL, task);
55721 +
55722 + return;
55723 +}
55724 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55725 new file mode 100644
55726 index 0000000..9d83a69
55727 --- /dev/null
55728 +++ b/grsecurity/gracl_shm.c
55729 @@ -0,0 +1,40 @@
55730 +#include <linux/kernel.h>
55731 +#include <linux/mm.h>
55732 +#include <linux/sched.h>
55733 +#include <linux/file.h>
55734 +#include <linux/ipc.h>
55735 +#include <linux/gracl.h>
55736 +#include <linux/grsecurity.h>
55737 +#include <linux/grinternal.h>
55738 +
55739 +int
55740 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55741 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55742 +{
55743 + struct task_struct *task;
55744 +
55745 + if (!gr_acl_is_enabled())
55746 + return 1;
55747 +
55748 + rcu_read_lock();
55749 + read_lock(&tasklist_lock);
55750 +
55751 + task = find_task_by_vpid(shm_cprid);
55752 +
55753 + if (unlikely(!task))
55754 + task = find_task_by_vpid(shm_lapid);
55755 +
55756 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55757 + (task->pid == shm_lapid)) &&
55758 + (task->acl->mode & GR_PROTSHM) &&
55759 + (task->acl != current->acl))) {
55760 + read_unlock(&tasklist_lock);
55761 + rcu_read_unlock();
55762 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55763 + return 0;
55764 + }
55765 + read_unlock(&tasklist_lock);
55766 + rcu_read_unlock();
55767 +
55768 + return 1;
55769 +}
55770 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55771 new file mode 100644
55772 index 0000000..bc0be01
55773 --- /dev/null
55774 +++ b/grsecurity/grsec_chdir.c
55775 @@ -0,0 +1,19 @@
55776 +#include <linux/kernel.h>
55777 +#include <linux/sched.h>
55778 +#include <linux/fs.h>
55779 +#include <linux/file.h>
55780 +#include <linux/grsecurity.h>
55781 +#include <linux/grinternal.h>
55782 +
55783 +void
55784 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55785 +{
55786 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55787 + if ((grsec_enable_chdir && grsec_enable_group &&
55788 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55789 + !grsec_enable_group)) {
55790 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55791 + }
55792 +#endif
55793 + return;
55794 +}
55795 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55796 new file mode 100644
55797 index 0000000..9807ee2
55798 --- /dev/null
55799 +++ b/grsecurity/grsec_chroot.c
55800 @@ -0,0 +1,368 @@
55801 +#include <linux/kernel.h>
55802 +#include <linux/module.h>
55803 +#include <linux/sched.h>
55804 +#include <linux/file.h>
55805 +#include <linux/fs.h>
55806 +#include <linux/mount.h>
55807 +#include <linux/types.h>
55808 +#include "../fs/mount.h"
55809 +#include <linux/grsecurity.h>
55810 +#include <linux/grinternal.h>
55811 +
55812 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55813 +{
55814 +#ifdef CONFIG_GRKERNSEC
55815 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55816 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
55817 + task->gr_is_chrooted = 1;
55818 + else
55819 + task->gr_is_chrooted = 0;
55820 +
55821 + task->gr_chroot_dentry = path->dentry;
55822 +#endif
55823 + return;
55824 +}
55825 +
55826 +void gr_clear_chroot_entries(struct task_struct *task)
55827 +{
55828 +#ifdef CONFIG_GRKERNSEC
55829 + task->gr_is_chrooted = 0;
55830 + task->gr_chroot_dentry = NULL;
55831 +#endif
55832 + return;
55833 +}
55834 +
55835 +int
55836 +gr_handle_chroot_unix(const pid_t pid)
55837 +{
55838 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55839 + struct task_struct *p;
55840 +
55841 + if (unlikely(!grsec_enable_chroot_unix))
55842 + return 1;
55843 +
55844 + if (likely(!proc_is_chrooted(current)))
55845 + return 1;
55846 +
55847 + rcu_read_lock();
55848 + read_lock(&tasklist_lock);
55849 + p = find_task_by_vpid_unrestricted(pid);
55850 + if (unlikely(p && !have_same_root(current, p))) {
55851 + read_unlock(&tasklist_lock);
55852 + rcu_read_unlock();
55853 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
55854 + return 0;
55855 + }
55856 + read_unlock(&tasklist_lock);
55857 + rcu_read_unlock();
55858 +#endif
55859 + return 1;
55860 +}
55861 +
55862 +int
55863 +gr_handle_chroot_nice(void)
55864 +{
55865 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55866 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
55867 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
55868 + return -EPERM;
55869 + }
55870 +#endif
55871 + return 0;
55872 +}
55873 +
55874 +int
55875 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
55876 +{
55877 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55878 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
55879 + && proc_is_chrooted(current)) {
55880 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
55881 + return -EACCES;
55882 + }
55883 +#endif
55884 + return 0;
55885 +}
55886 +
55887 +int
55888 +gr_handle_chroot_rawio(const struct inode *inode)
55889 +{
55890 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55891 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55892 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
55893 + return 1;
55894 +#endif
55895 + return 0;
55896 +}
55897 +
55898 +int
55899 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
55900 +{
55901 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55902 + struct task_struct *p;
55903 + int ret = 0;
55904 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
55905 + return ret;
55906 +
55907 + read_lock(&tasklist_lock);
55908 + do_each_pid_task(pid, type, p) {
55909 + if (!have_same_root(current, p)) {
55910 + ret = 1;
55911 + goto out;
55912 + }
55913 + } while_each_pid_task(pid, type, p);
55914 +out:
55915 + read_unlock(&tasklist_lock);
55916 + return ret;
55917 +#endif
55918 + return 0;
55919 +}
55920 +
55921 +int
55922 +gr_pid_is_chrooted(struct task_struct *p)
55923 +{
55924 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55925 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
55926 + return 0;
55927 +
55928 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
55929 + !have_same_root(current, p)) {
55930 + return 1;
55931 + }
55932 +#endif
55933 + return 0;
55934 +}
55935 +
55936 +EXPORT_SYMBOL(gr_pid_is_chrooted);
55937 +
55938 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
55939 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
55940 +{
55941 + struct path path, currentroot;
55942 + int ret = 0;
55943 +
55944 + path.dentry = (struct dentry *)u_dentry;
55945 + path.mnt = (struct vfsmount *)u_mnt;
55946 + get_fs_root(current->fs, &currentroot);
55947 + if (path_is_under(&path, &currentroot))
55948 + ret = 1;
55949 + path_put(&currentroot);
55950 +
55951 + return ret;
55952 +}
55953 +#endif
55954 +
55955 +int
55956 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
55957 +{
55958 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55959 + if (!grsec_enable_chroot_fchdir)
55960 + return 1;
55961 +
55962 + if (!proc_is_chrooted(current))
55963 + return 1;
55964 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
55965 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
55966 + return 0;
55967 + }
55968 +#endif
55969 + return 1;
55970 +}
55971 +
55972 +int
55973 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55974 + const time_t shm_createtime)
55975 +{
55976 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55977 + struct task_struct *p;
55978 + time_t starttime;
55979 +
55980 + if (unlikely(!grsec_enable_chroot_shmat))
55981 + return 1;
55982 +
55983 + if (likely(!proc_is_chrooted(current)))
55984 + return 1;
55985 +
55986 + rcu_read_lock();
55987 + read_lock(&tasklist_lock);
55988 +
55989 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
55990 + starttime = p->start_time.tv_sec;
55991 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
55992 + if (have_same_root(current, p)) {
55993 + goto allow;
55994 + } else {
55995 + read_unlock(&tasklist_lock);
55996 + rcu_read_unlock();
55997 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55998 + return 0;
55999 + }
56000 + }
56001 + /* creator exited, pid reuse, fall through to next check */
56002 + }
56003 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56004 + if (unlikely(!have_same_root(current, p))) {
56005 + read_unlock(&tasklist_lock);
56006 + rcu_read_unlock();
56007 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56008 + return 0;
56009 + }
56010 + }
56011 +
56012 +allow:
56013 + read_unlock(&tasklist_lock);
56014 + rcu_read_unlock();
56015 +#endif
56016 + return 1;
56017 +}
56018 +
56019 +void
56020 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56021 +{
56022 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56023 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56024 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56025 +#endif
56026 + return;
56027 +}
56028 +
56029 +int
56030 +gr_handle_chroot_mknod(const struct dentry *dentry,
56031 + const struct vfsmount *mnt, const int mode)
56032 +{
56033 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56034 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56035 + proc_is_chrooted(current)) {
56036 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56037 + return -EPERM;
56038 + }
56039 +#endif
56040 + return 0;
56041 +}
56042 +
56043 +int
56044 +gr_handle_chroot_mount(const struct dentry *dentry,
56045 + const struct vfsmount *mnt, const char *dev_name)
56046 +{
56047 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56048 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56049 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56050 + return -EPERM;
56051 + }
56052 +#endif
56053 + return 0;
56054 +}
56055 +
56056 +int
56057 +gr_handle_chroot_pivot(void)
56058 +{
56059 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56060 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56061 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56062 + return -EPERM;
56063 + }
56064 +#endif
56065 + return 0;
56066 +}
56067 +
56068 +int
56069 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56070 +{
56071 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56072 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56073 + !gr_is_outside_chroot(dentry, mnt)) {
56074 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56075 + return -EPERM;
56076 + }
56077 +#endif
56078 + return 0;
56079 +}
56080 +
56081 +extern const char *captab_log[];
56082 +extern int captab_log_entries;
56083 +
56084 +int
56085 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56086 +{
56087 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56088 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56089 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56090 + if (cap_raised(chroot_caps, cap)) {
56091 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56092 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56093 + }
56094 + return 0;
56095 + }
56096 + }
56097 +#endif
56098 + return 1;
56099 +}
56100 +
56101 +int
56102 +gr_chroot_is_capable(const int cap)
56103 +{
56104 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56105 + return gr_task_chroot_is_capable(current, current_cred(), cap);
56106 +#endif
56107 + return 1;
56108 +}
56109 +
56110 +int
56111 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56112 +{
56113 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56114 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56115 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56116 + if (cap_raised(chroot_caps, cap)) {
56117 + return 0;
56118 + }
56119 + }
56120 +#endif
56121 + return 1;
56122 +}
56123 +
56124 +int
56125 +gr_chroot_is_capable_nolog(const int cap)
56126 +{
56127 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56128 + return gr_task_chroot_is_capable_nolog(current, cap);
56129 +#endif
56130 + return 1;
56131 +}
56132 +
56133 +int
56134 +gr_handle_chroot_sysctl(const int op)
56135 +{
56136 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56137 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56138 + proc_is_chrooted(current))
56139 + return -EACCES;
56140 +#endif
56141 + return 0;
56142 +}
56143 +
56144 +void
56145 +gr_handle_chroot_chdir(struct path *path)
56146 +{
56147 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56148 + if (grsec_enable_chroot_chdir)
56149 + set_fs_pwd(current->fs, path);
56150 +#endif
56151 + return;
56152 +}
56153 +
56154 +int
56155 +gr_handle_chroot_chmod(const struct dentry *dentry,
56156 + const struct vfsmount *mnt, const int mode)
56157 +{
56158 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56159 + /* allow chmod +s on directories, but not files */
56160 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56161 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56162 + proc_is_chrooted(current)) {
56163 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56164 + return -EPERM;
56165 + }
56166 +#endif
56167 + return 0;
56168 +}
56169 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56170 new file mode 100644
56171 index 0000000..213ad8b
56172 --- /dev/null
56173 +++ b/grsecurity/grsec_disabled.c
56174 @@ -0,0 +1,437 @@
56175 +#include <linux/kernel.h>
56176 +#include <linux/module.h>
56177 +#include <linux/sched.h>
56178 +#include <linux/file.h>
56179 +#include <linux/fs.h>
56180 +#include <linux/kdev_t.h>
56181 +#include <linux/net.h>
56182 +#include <linux/in.h>
56183 +#include <linux/ip.h>
56184 +#include <linux/skbuff.h>
56185 +#include <linux/sysctl.h>
56186 +
56187 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56188 +void
56189 +pax_set_initial_flags(struct linux_binprm *bprm)
56190 +{
56191 + return;
56192 +}
56193 +#endif
56194 +
56195 +#ifdef CONFIG_SYSCTL
56196 +__u32
56197 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56198 +{
56199 + return 0;
56200 +}
56201 +#endif
56202 +
56203 +#ifdef CONFIG_TASKSTATS
56204 +int gr_is_taskstats_denied(int pid)
56205 +{
56206 + return 0;
56207 +}
56208 +#endif
56209 +
56210 +int
56211 +gr_acl_is_enabled(void)
56212 +{
56213 + return 0;
56214 +}
56215 +
56216 +void
56217 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56218 +{
56219 + return;
56220 +}
56221 +
56222 +int
56223 +gr_handle_rawio(const struct inode *inode)
56224 +{
56225 + return 0;
56226 +}
56227 +
56228 +void
56229 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56230 +{
56231 + return;
56232 +}
56233 +
56234 +int
56235 +gr_handle_ptrace(struct task_struct *task, const long request)
56236 +{
56237 + return 0;
56238 +}
56239 +
56240 +int
56241 +gr_handle_proc_ptrace(struct task_struct *task)
56242 +{
56243 + return 0;
56244 +}
56245 +
56246 +void
56247 +gr_learn_resource(const struct task_struct *task,
56248 + const int res, const unsigned long wanted, const int gt)
56249 +{
56250 + return;
56251 +}
56252 +
56253 +int
56254 +gr_set_acls(const int type)
56255 +{
56256 + return 0;
56257 +}
56258 +
56259 +int
56260 +gr_check_hidden_task(const struct task_struct *tsk)
56261 +{
56262 + return 0;
56263 +}
56264 +
56265 +int
56266 +gr_check_protected_task(const struct task_struct *task)
56267 +{
56268 + return 0;
56269 +}
56270 +
56271 +int
56272 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56273 +{
56274 + return 0;
56275 +}
56276 +
56277 +void
56278 +gr_copy_label(struct task_struct *tsk)
56279 +{
56280 + return;
56281 +}
56282 +
56283 +void
56284 +gr_set_pax_flags(struct task_struct *task)
56285 +{
56286 + return;
56287 +}
56288 +
56289 +int
56290 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56291 + const int unsafe_share)
56292 +{
56293 + return 0;
56294 +}
56295 +
56296 +void
56297 +gr_handle_delete(const ino_t ino, const dev_t dev)
56298 +{
56299 + return;
56300 +}
56301 +
56302 +void
56303 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56304 +{
56305 + return;
56306 +}
56307 +
56308 +void
56309 +gr_handle_crash(struct task_struct *task, const int sig)
56310 +{
56311 + return;
56312 +}
56313 +
56314 +int
56315 +gr_check_crash_exec(const struct file *filp)
56316 +{
56317 + return 0;
56318 +}
56319 +
56320 +int
56321 +gr_check_crash_uid(const uid_t uid)
56322 +{
56323 + return 0;
56324 +}
56325 +
56326 +void
56327 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56328 + struct dentry *old_dentry,
56329 + struct dentry *new_dentry,
56330 + struct vfsmount *mnt, const __u8 replace)
56331 +{
56332 + return;
56333 +}
56334 +
56335 +int
56336 +gr_search_socket(const int family, const int type, const int protocol)
56337 +{
56338 + return 1;
56339 +}
56340 +
56341 +int
56342 +gr_search_connectbind(const int mode, const struct socket *sock,
56343 + const struct sockaddr_in *addr)
56344 +{
56345 + return 0;
56346 +}
56347 +
56348 +void
56349 +gr_handle_alertkill(struct task_struct *task)
56350 +{
56351 + return;
56352 +}
56353 +
56354 +__u32
56355 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56356 +{
56357 + return 1;
56358 +}
56359 +
56360 +__u32
56361 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56362 + const struct vfsmount * mnt)
56363 +{
56364 + return 1;
56365 +}
56366 +
56367 +__u32
56368 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56369 + int acc_mode)
56370 +{
56371 + return 1;
56372 +}
56373 +
56374 +__u32
56375 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56376 +{
56377 + return 1;
56378 +}
56379 +
56380 +__u32
56381 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56382 +{
56383 + return 1;
56384 +}
56385 +
56386 +int
56387 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56388 + unsigned int *vm_flags)
56389 +{
56390 + return 1;
56391 +}
56392 +
56393 +__u32
56394 +gr_acl_handle_truncate(const struct dentry * dentry,
56395 + const struct vfsmount * mnt)
56396 +{
56397 + return 1;
56398 +}
56399 +
56400 +__u32
56401 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56402 +{
56403 + return 1;
56404 +}
56405 +
56406 +__u32
56407 +gr_acl_handle_access(const struct dentry * dentry,
56408 + const struct vfsmount * mnt, const int fmode)
56409 +{
56410 + return 1;
56411 +}
56412 +
56413 +__u32
56414 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56415 + umode_t *mode)
56416 +{
56417 + return 1;
56418 +}
56419 +
56420 +__u32
56421 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56422 +{
56423 + return 1;
56424 +}
56425 +
56426 +__u32
56427 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56428 +{
56429 + return 1;
56430 +}
56431 +
56432 +void
56433 +grsecurity_init(void)
56434 +{
56435 + return;
56436 +}
56437 +
56438 +umode_t gr_acl_umask(void)
56439 +{
56440 + return 0;
56441 +}
56442 +
56443 +__u32
56444 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56445 + const struct dentry * parent_dentry,
56446 + const struct vfsmount * parent_mnt,
56447 + const int mode)
56448 +{
56449 + return 1;
56450 +}
56451 +
56452 +__u32
56453 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
56454 + const struct dentry * parent_dentry,
56455 + const struct vfsmount * parent_mnt)
56456 +{
56457 + return 1;
56458 +}
56459 +
56460 +__u32
56461 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56462 + const struct dentry * parent_dentry,
56463 + const struct vfsmount * parent_mnt, const char *from)
56464 +{
56465 + return 1;
56466 +}
56467 +
56468 +__u32
56469 +gr_acl_handle_link(const struct dentry * new_dentry,
56470 + const struct dentry * parent_dentry,
56471 + const struct vfsmount * parent_mnt,
56472 + const struct dentry * old_dentry,
56473 + const struct vfsmount * old_mnt, const char *to)
56474 +{
56475 + return 1;
56476 +}
56477 +
56478 +int
56479 +gr_acl_handle_rename(const struct dentry *new_dentry,
56480 + const struct dentry *parent_dentry,
56481 + const struct vfsmount *parent_mnt,
56482 + const struct dentry *old_dentry,
56483 + const struct inode *old_parent_inode,
56484 + const struct vfsmount *old_mnt, const char *newname)
56485 +{
56486 + return 0;
56487 +}
56488 +
56489 +int
56490 +gr_acl_handle_filldir(const struct file *file, const char *name,
56491 + const int namelen, const ino_t ino)
56492 +{
56493 + return 1;
56494 +}
56495 +
56496 +int
56497 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56498 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56499 +{
56500 + return 1;
56501 +}
56502 +
56503 +int
56504 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56505 +{
56506 + return 0;
56507 +}
56508 +
56509 +int
56510 +gr_search_accept(const struct socket *sock)
56511 +{
56512 + return 0;
56513 +}
56514 +
56515 +int
56516 +gr_search_listen(const struct socket *sock)
56517 +{
56518 + return 0;
56519 +}
56520 +
56521 +int
56522 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56523 +{
56524 + return 0;
56525 +}
56526 +
56527 +__u32
56528 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56529 +{
56530 + return 1;
56531 +}
56532 +
56533 +__u32
56534 +gr_acl_handle_creat(const struct dentry * dentry,
56535 + const struct dentry * p_dentry,
56536 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56537 + const int imode)
56538 +{
56539 + return 1;
56540 +}
56541 +
56542 +void
56543 +gr_acl_handle_exit(void)
56544 +{
56545 + return;
56546 +}
56547 +
56548 +int
56549 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56550 +{
56551 + return 1;
56552 +}
56553 +
56554 +void
56555 +gr_set_role_label(const uid_t uid, const gid_t gid)
56556 +{
56557 + return;
56558 +}
56559 +
56560 +int
56561 +gr_acl_handle_procpidmem(const struct task_struct *task)
56562 +{
56563 + return 0;
56564 +}
56565 +
56566 +int
56567 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56568 +{
56569 + return 0;
56570 +}
56571 +
56572 +int
56573 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56574 +{
56575 + return 0;
56576 +}
56577 +
56578 +void
56579 +gr_set_kernel_label(struct task_struct *task)
56580 +{
56581 + return;
56582 +}
56583 +
56584 +int
56585 +gr_check_user_change(int real, int effective, int fs)
56586 +{
56587 + return 0;
56588 +}
56589 +
56590 +int
56591 +gr_check_group_change(int real, int effective, int fs)
56592 +{
56593 + return 0;
56594 +}
56595 +
56596 +int gr_acl_enable_at_secure(void)
56597 +{
56598 + return 0;
56599 +}
56600 +
56601 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56602 +{
56603 + return dentry->d_inode->i_sb->s_dev;
56604 +}
56605 +
56606 +EXPORT_SYMBOL(gr_learn_resource);
56607 +EXPORT_SYMBOL(gr_set_kernel_label);
56608 +#ifdef CONFIG_SECURITY
56609 +EXPORT_SYMBOL(gr_check_user_change);
56610 +EXPORT_SYMBOL(gr_check_group_change);
56611 +#endif
56612 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56613 new file mode 100644
56614 index 0000000..abfa971
56615 --- /dev/null
56616 +++ b/grsecurity/grsec_exec.c
56617 @@ -0,0 +1,174 @@
56618 +#include <linux/kernel.h>
56619 +#include <linux/sched.h>
56620 +#include <linux/file.h>
56621 +#include <linux/binfmts.h>
56622 +#include <linux/fs.h>
56623 +#include <linux/types.h>
56624 +#include <linux/grdefs.h>
56625 +#include <linux/grsecurity.h>
56626 +#include <linux/grinternal.h>
56627 +#include <linux/capability.h>
56628 +#include <linux/module.h>
56629 +
56630 +#include <asm/uaccess.h>
56631 +
56632 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56633 +static char gr_exec_arg_buf[132];
56634 +static DEFINE_MUTEX(gr_exec_arg_mutex);
56635 +#endif
56636 +
56637 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56638 +
56639 +void
56640 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56641 +{
56642 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56643 + char *grarg = gr_exec_arg_buf;
56644 + unsigned int i, x, execlen = 0;
56645 + char c;
56646 +
56647 + if (!((grsec_enable_execlog && grsec_enable_group &&
56648 + in_group_p(grsec_audit_gid))
56649 + || (grsec_enable_execlog && !grsec_enable_group)))
56650 + return;
56651 +
56652 + mutex_lock(&gr_exec_arg_mutex);
56653 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
56654 +
56655 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
56656 + const char __user *p;
56657 + unsigned int len;
56658 +
56659 + p = get_user_arg_ptr(argv, i);
56660 + if (IS_ERR(p))
56661 + goto log;
56662 +
56663 + len = strnlen_user(p, 128 - execlen);
56664 + if (len > 128 - execlen)
56665 + len = 128 - execlen;
56666 + else if (len > 0)
56667 + len--;
56668 + if (copy_from_user(grarg + execlen, p, len))
56669 + goto log;
56670 +
56671 + /* rewrite unprintable characters */
56672 + for (x = 0; x < len; x++) {
56673 + c = *(grarg + execlen + x);
56674 + if (c < 32 || c > 126)
56675 + *(grarg + execlen + x) = ' ';
56676 + }
56677 +
56678 + execlen += len;
56679 + *(grarg + execlen) = ' ';
56680 + *(grarg + execlen + 1) = '\0';
56681 + execlen++;
56682 + }
56683 +
56684 + log:
56685 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56686 + bprm->file->f_path.mnt, grarg);
56687 + mutex_unlock(&gr_exec_arg_mutex);
56688 +#endif
56689 + return;
56690 +}
56691 +
56692 +#ifdef CONFIG_GRKERNSEC
56693 +extern int gr_acl_is_capable(const int cap);
56694 +extern int gr_acl_is_capable_nolog(const int cap);
56695 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56696 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
56697 +extern int gr_chroot_is_capable(const int cap);
56698 +extern int gr_chroot_is_capable_nolog(const int cap);
56699 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56700 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
56701 +#endif
56702 +
56703 +const char *captab_log[] = {
56704 + "CAP_CHOWN",
56705 + "CAP_DAC_OVERRIDE",
56706 + "CAP_DAC_READ_SEARCH",
56707 + "CAP_FOWNER",
56708 + "CAP_FSETID",
56709 + "CAP_KILL",
56710 + "CAP_SETGID",
56711 + "CAP_SETUID",
56712 + "CAP_SETPCAP",
56713 + "CAP_LINUX_IMMUTABLE",
56714 + "CAP_NET_BIND_SERVICE",
56715 + "CAP_NET_BROADCAST",
56716 + "CAP_NET_ADMIN",
56717 + "CAP_NET_RAW",
56718 + "CAP_IPC_LOCK",
56719 + "CAP_IPC_OWNER",
56720 + "CAP_SYS_MODULE",
56721 + "CAP_SYS_RAWIO",
56722 + "CAP_SYS_CHROOT",
56723 + "CAP_SYS_PTRACE",
56724 + "CAP_SYS_PACCT",
56725 + "CAP_SYS_ADMIN",
56726 + "CAP_SYS_BOOT",
56727 + "CAP_SYS_NICE",
56728 + "CAP_SYS_RESOURCE",
56729 + "CAP_SYS_TIME",
56730 + "CAP_SYS_TTY_CONFIG",
56731 + "CAP_MKNOD",
56732 + "CAP_LEASE",
56733 + "CAP_AUDIT_WRITE",
56734 + "CAP_AUDIT_CONTROL",
56735 + "CAP_SETFCAP",
56736 + "CAP_MAC_OVERRIDE",
56737 + "CAP_MAC_ADMIN",
56738 + "CAP_SYSLOG",
56739 + "CAP_WAKE_ALARM"
56740 +};
56741 +
56742 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56743 +
56744 +int gr_is_capable(const int cap)
56745 +{
56746 +#ifdef CONFIG_GRKERNSEC
56747 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56748 + return 1;
56749 + return 0;
56750 +#else
56751 + return 1;
56752 +#endif
56753 +}
56754 +
56755 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56756 +{
56757 +#ifdef CONFIG_GRKERNSEC
56758 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
56759 + return 1;
56760 + return 0;
56761 +#else
56762 + return 1;
56763 +#endif
56764 +}
56765 +
56766 +int gr_is_capable_nolog(const int cap)
56767 +{
56768 +#ifdef CONFIG_GRKERNSEC
56769 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56770 + return 1;
56771 + return 0;
56772 +#else
56773 + return 1;
56774 +#endif
56775 +}
56776 +
56777 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
56778 +{
56779 +#ifdef CONFIG_GRKERNSEC
56780 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
56781 + return 1;
56782 + return 0;
56783 +#else
56784 + return 1;
56785 +#endif
56786 +}
56787 +
56788 +EXPORT_SYMBOL(gr_is_capable);
56789 +EXPORT_SYMBOL(gr_is_capable_nolog);
56790 +EXPORT_SYMBOL(gr_task_is_capable);
56791 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
56792 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56793 new file mode 100644
56794 index 0000000..d3ee748
56795 --- /dev/null
56796 +++ b/grsecurity/grsec_fifo.c
56797 @@ -0,0 +1,24 @@
56798 +#include <linux/kernel.h>
56799 +#include <linux/sched.h>
56800 +#include <linux/fs.h>
56801 +#include <linux/file.h>
56802 +#include <linux/grinternal.h>
56803 +
56804 +int
56805 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56806 + const struct dentry *dir, const int flag, const int acc_mode)
56807 +{
56808 +#ifdef CONFIG_GRKERNSEC_FIFO
56809 + const struct cred *cred = current_cred();
56810 +
56811 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56812 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56813 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56814 + (cred->fsuid != dentry->d_inode->i_uid)) {
56815 + if (!inode_permission(dentry->d_inode, acc_mode))
56816 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56817 + return -EACCES;
56818 + }
56819 +#endif
56820 + return 0;
56821 +}
56822 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56823 new file mode 100644
56824 index 0000000..8ca18bf
56825 --- /dev/null
56826 +++ b/grsecurity/grsec_fork.c
56827 @@ -0,0 +1,23 @@
56828 +#include <linux/kernel.h>
56829 +#include <linux/sched.h>
56830 +#include <linux/grsecurity.h>
56831 +#include <linux/grinternal.h>
56832 +#include <linux/errno.h>
56833 +
56834 +void
56835 +gr_log_forkfail(const int retval)
56836 +{
56837 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56838 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56839 + switch (retval) {
56840 + case -EAGAIN:
56841 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56842 + break;
56843 + case -ENOMEM:
56844 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56845 + break;
56846 + }
56847 + }
56848 +#endif
56849 + return;
56850 +}
56851 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
56852 new file mode 100644
56853 index 0000000..01ddde4
56854 --- /dev/null
56855 +++ b/grsecurity/grsec_init.c
56856 @@ -0,0 +1,277 @@
56857 +#include <linux/kernel.h>
56858 +#include <linux/sched.h>
56859 +#include <linux/mm.h>
56860 +#include <linux/gracl.h>
56861 +#include <linux/slab.h>
56862 +#include <linux/vmalloc.h>
56863 +#include <linux/percpu.h>
56864 +#include <linux/module.h>
56865 +
56866 +int grsec_enable_ptrace_readexec;
56867 +int grsec_enable_setxid;
56868 +int grsec_enable_brute;
56869 +int grsec_enable_link;
56870 +int grsec_enable_dmesg;
56871 +int grsec_enable_harden_ptrace;
56872 +int grsec_enable_fifo;
56873 +int grsec_enable_execlog;
56874 +int grsec_enable_signal;
56875 +int grsec_enable_forkfail;
56876 +int grsec_enable_audit_ptrace;
56877 +int grsec_enable_time;
56878 +int grsec_enable_audit_textrel;
56879 +int grsec_enable_group;
56880 +int grsec_audit_gid;
56881 +int grsec_enable_chdir;
56882 +int grsec_enable_mount;
56883 +int grsec_enable_rofs;
56884 +int grsec_enable_chroot_findtask;
56885 +int grsec_enable_chroot_mount;
56886 +int grsec_enable_chroot_shmat;
56887 +int grsec_enable_chroot_fchdir;
56888 +int grsec_enable_chroot_double;
56889 +int grsec_enable_chroot_pivot;
56890 +int grsec_enable_chroot_chdir;
56891 +int grsec_enable_chroot_chmod;
56892 +int grsec_enable_chroot_mknod;
56893 +int grsec_enable_chroot_nice;
56894 +int grsec_enable_chroot_execlog;
56895 +int grsec_enable_chroot_caps;
56896 +int grsec_enable_chroot_sysctl;
56897 +int grsec_enable_chroot_unix;
56898 +int grsec_enable_tpe;
56899 +int grsec_tpe_gid;
56900 +int grsec_enable_blackhole;
56901 +#ifdef CONFIG_IPV6_MODULE
56902 +EXPORT_SYMBOL(grsec_enable_blackhole);
56903 +#endif
56904 +int grsec_lastack_retries;
56905 +int grsec_enable_tpe_all;
56906 +int grsec_enable_tpe_invert;
56907 +int grsec_enable_socket_all;
56908 +int grsec_socket_all_gid;
56909 +int grsec_enable_socket_client;
56910 +int grsec_socket_client_gid;
56911 +int grsec_enable_socket_server;
56912 +int grsec_socket_server_gid;
56913 +int grsec_resource_logging;
56914 +int grsec_disable_privio;
56915 +int grsec_enable_log_rwxmaps;
56916 +int grsec_lock;
56917 +
56918 +DEFINE_SPINLOCK(grsec_alert_lock);
56919 +unsigned long grsec_alert_wtime = 0;
56920 +unsigned long grsec_alert_fyet = 0;
56921 +
56922 +DEFINE_SPINLOCK(grsec_audit_lock);
56923 +
56924 +DEFINE_RWLOCK(grsec_exec_file_lock);
56925 +
56926 +char *gr_shared_page[4];
56927 +
56928 +char *gr_alert_log_fmt;
56929 +char *gr_audit_log_fmt;
56930 +char *gr_alert_log_buf;
56931 +char *gr_audit_log_buf;
56932 +
56933 +extern struct gr_arg *gr_usermode;
56934 +extern unsigned char *gr_system_salt;
56935 +extern unsigned char *gr_system_sum;
56936 +
56937 +void __init
56938 +grsecurity_init(void)
56939 +{
56940 + int j;
56941 + /* create the per-cpu shared pages */
56942 +
56943 +#ifdef CONFIG_X86
56944 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
56945 +#endif
56946 +
56947 + for (j = 0; j < 4; j++) {
56948 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
56949 + if (gr_shared_page[j] == NULL) {
56950 + panic("Unable to allocate grsecurity shared page");
56951 + return;
56952 + }
56953 + }
56954 +
56955 + /* allocate log buffers */
56956 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
56957 + if (!gr_alert_log_fmt) {
56958 + panic("Unable to allocate grsecurity alert log format buffer");
56959 + return;
56960 + }
56961 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
56962 + if (!gr_audit_log_fmt) {
56963 + panic("Unable to allocate grsecurity audit log format buffer");
56964 + return;
56965 + }
56966 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56967 + if (!gr_alert_log_buf) {
56968 + panic("Unable to allocate grsecurity alert log buffer");
56969 + return;
56970 + }
56971 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56972 + if (!gr_audit_log_buf) {
56973 + panic("Unable to allocate grsecurity audit log buffer");
56974 + return;
56975 + }
56976 +
56977 + /* allocate memory for authentication structure */
56978 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
56979 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
56980 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
56981 +
56982 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
56983 + panic("Unable to allocate grsecurity authentication structure");
56984 + return;
56985 + }
56986 +
56987 +
56988 +#ifdef CONFIG_GRKERNSEC_IO
56989 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
56990 + grsec_disable_privio = 1;
56991 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56992 + grsec_disable_privio = 1;
56993 +#else
56994 + grsec_disable_privio = 0;
56995 +#endif
56996 +#endif
56997 +
56998 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56999 + /* for backward compatibility, tpe_invert always defaults to on if
57000 + enabled in the kernel
57001 + */
57002 + grsec_enable_tpe_invert = 1;
57003 +#endif
57004 +
57005 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57006 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57007 + grsec_lock = 1;
57008 +#endif
57009 +
57010 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57011 + grsec_enable_audit_textrel = 1;
57012 +#endif
57013 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57014 + grsec_enable_log_rwxmaps = 1;
57015 +#endif
57016 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57017 + grsec_enable_group = 1;
57018 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57019 +#endif
57020 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57021 + grsec_enable_ptrace_readexec = 1;
57022 +#endif
57023 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57024 + grsec_enable_chdir = 1;
57025 +#endif
57026 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57027 + grsec_enable_harden_ptrace = 1;
57028 +#endif
57029 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57030 + grsec_enable_mount = 1;
57031 +#endif
57032 +#ifdef CONFIG_GRKERNSEC_LINK
57033 + grsec_enable_link = 1;
57034 +#endif
57035 +#ifdef CONFIG_GRKERNSEC_BRUTE
57036 + grsec_enable_brute = 1;
57037 +#endif
57038 +#ifdef CONFIG_GRKERNSEC_DMESG
57039 + grsec_enable_dmesg = 1;
57040 +#endif
57041 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57042 + grsec_enable_blackhole = 1;
57043 + grsec_lastack_retries = 4;
57044 +#endif
57045 +#ifdef CONFIG_GRKERNSEC_FIFO
57046 + grsec_enable_fifo = 1;
57047 +#endif
57048 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57049 + grsec_enable_execlog = 1;
57050 +#endif
57051 +#ifdef CONFIG_GRKERNSEC_SETXID
57052 + grsec_enable_setxid = 1;
57053 +#endif
57054 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57055 + grsec_enable_signal = 1;
57056 +#endif
57057 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57058 + grsec_enable_forkfail = 1;
57059 +#endif
57060 +#ifdef CONFIG_GRKERNSEC_TIME
57061 + grsec_enable_time = 1;
57062 +#endif
57063 +#ifdef CONFIG_GRKERNSEC_RESLOG
57064 + grsec_resource_logging = 1;
57065 +#endif
57066 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57067 + grsec_enable_chroot_findtask = 1;
57068 +#endif
57069 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57070 + grsec_enable_chroot_unix = 1;
57071 +#endif
57072 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57073 + grsec_enable_chroot_mount = 1;
57074 +#endif
57075 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57076 + grsec_enable_chroot_fchdir = 1;
57077 +#endif
57078 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57079 + grsec_enable_chroot_shmat = 1;
57080 +#endif
57081 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57082 + grsec_enable_audit_ptrace = 1;
57083 +#endif
57084 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57085 + grsec_enable_chroot_double = 1;
57086 +#endif
57087 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57088 + grsec_enable_chroot_pivot = 1;
57089 +#endif
57090 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57091 + grsec_enable_chroot_chdir = 1;
57092 +#endif
57093 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57094 + grsec_enable_chroot_chmod = 1;
57095 +#endif
57096 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57097 + grsec_enable_chroot_mknod = 1;
57098 +#endif
57099 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57100 + grsec_enable_chroot_nice = 1;
57101 +#endif
57102 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57103 + grsec_enable_chroot_execlog = 1;
57104 +#endif
57105 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57106 + grsec_enable_chroot_caps = 1;
57107 +#endif
57108 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57109 + grsec_enable_chroot_sysctl = 1;
57110 +#endif
57111 +#ifdef CONFIG_GRKERNSEC_TPE
57112 + grsec_enable_tpe = 1;
57113 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57114 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57115 + grsec_enable_tpe_all = 1;
57116 +#endif
57117 +#endif
57118 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57119 + grsec_enable_socket_all = 1;
57120 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57121 +#endif
57122 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57123 + grsec_enable_socket_client = 1;
57124 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57125 +#endif
57126 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57127 + grsec_enable_socket_server = 1;
57128 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57129 +#endif
57130 +#endif
57131 +
57132 + return;
57133 +}
57134 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57135 new file mode 100644
57136 index 0000000..3efe141
57137 --- /dev/null
57138 +++ b/grsecurity/grsec_link.c
57139 @@ -0,0 +1,43 @@
57140 +#include <linux/kernel.h>
57141 +#include <linux/sched.h>
57142 +#include <linux/fs.h>
57143 +#include <linux/file.h>
57144 +#include <linux/grinternal.h>
57145 +
57146 +int
57147 +gr_handle_follow_link(const struct inode *parent,
57148 + const struct inode *inode,
57149 + const struct dentry *dentry, const struct vfsmount *mnt)
57150 +{
57151 +#ifdef CONFIG_GRKERNSEC_LINK
57152 + const struct cred *cred = current_cred();
57153 +
57154 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57155 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57156 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57157 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57158 + return -EACCES;
57159 + }
57160 +#endif
57161 + return 0;
57162 +}
57163 +
57164 +int
57165 +gr_handle_hardlink(const struct dentry *dentry,
57166 + const struct vfsmount *mnt,
57167 + struct inode *inode, const int mode, const char *to)
57168 +{
57169 +#ifdef CONFIG_GRKERNSEC_LINK
57170 + const struct cred *cred = current_cred();
57171 +
57172 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57173 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57174 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57175 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57176 + !capable(CAP_FOWNER) && cred->uid) {
57177 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57178 + return -EPERM;
57179 + }
57180 +#endif
57181 + return 0;
57182 +}
57183 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57184 new file mode 100644
57185 index 0000000..a45d2e9
57186 --- /dev/null
57187 +++ b/grsecurity/grsec_log.c
57188 @@ -0,0 +1,322 @@
57189 +#include <linux/kernel.h>
57190 +#include <linux/sched.h>
57191 +#include <linux/file.h>
57192 +#include <linux/tty.h>
57193 +#include <linux/fs.h>
57194 +#include <linux/grinternal.h>
57195 +
57196 +#ifdef CONFIG_TREE_PREEMPT_RCU
57197 +#define DISABLE_PREEMPT() preempt_disable()
57198 +#define ENABLE_PREEMPT() preempt_enable()
57199 +#else
57200 +#define DISABLE_PREEMPT()
57201 +#define ENABLE_PREEMPT()
57202 +#endif
57203 +
57204 +#define BEGIN_LOCKS(x) \
57205 + DISABLE_PREEMPT(); \
57206 + rcu_read_lock(); \
57207 + read_lock(&tasklist_lock); \
57208 + read_lock(&grsec_exec_file_lock); \
57209 + if (x != GR_DO_AUDIT) \
57210 + spin_lock(&grsec_alert_lock); \
57211 + else \
57212 + spin_lock(&grsec_audit_lock)
57213 +
57214 +#define END_LOCKS(x) \
57215 + if (x != GR_DO_AUDIT) \
57216 + spin_unlock(&grsec_alert_lock); \
57217 + else \
57218 + spin_unlock(&grsec_audit_lock); \
57219 + read_unlock(&grsec_exec_file_lock); \
57220 + read_unlock(&tasklist_lock); \
57221 + rcu_read_unlock(); \
57222 + ENABLE_PREEMPT(); \
57223 + if (x == GR_DONT_AUDIT) \
57224 + gr_handle_alertkill(current)
57225 +
57226 +enum {
57227 + FLOODING,
57228 + NO_FLOODING
57229 +};
57230 +
57231 +extern char *gr_alert_log_fmt;
57232 +extern char *gr_audit_log_fmt;
57233 +extern char *gr_alert_log_buf;
57234 +extern char *gr_audit_log_buf;
57235 +
57236 +static int gr_log_start(int audit)
57237 +{
57238 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57239 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57240 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57241 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57242 + unsigned long curr_secs = get_seconds();
57243 +
57244 + if (audit == GR_DO_AUDIT)
57245 + goto set_fmt;
57246 +
57247 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57248 + grsec_alert_wtime = curr_secs;
57249 + grsec_alert_fyet = 0;
57250 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57251 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57252 + grsec_alert_fyet++;
57253 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57254 + grsec_alert_wtime = curr_secs;
57255 + grsec_alert_fyet++;
57256 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57257 + return FLOODING;
57258 + }
57259 + else return FLOODING;
57260 +
57261 +set_fmt:
57262 +#endif
57263 + memset(buf, 0, PAGE_SIZE);
57264 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57265 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57266 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57267 + } else if (current->signal->curr_ip) {
57268 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57269 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57270 + } else if (gr_acl_is_enabled()) {
57271 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57272 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57273 + } else {
57274 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
57275 + strcpy(buf, fmt);
57276 + }
57277 +
57278 + return NO_FLOODING;
57279 +}
57280 +
57281 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57282 + __attribute__ ((format (printf, 2, 0)));
57283 +
57284 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57285 +{
57286 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57287 + unsigned int len = strlen(buf);
57288 +
57289 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57290 +
57291 + return;
57292 +}
57293 +
57294 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57295 + __attribute__ ((format (printf, 2, 3)));
57296 +
57297 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57298 +{
57299 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57300 + unsigned int len = strlen(buf);
57301 + va_list ap;
57302 +
57303 + va_start(ap, msg);
57304 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57305 + va_end(ap);
57306 +
57307 + return;
57308 +}
57309 +
57310 +static void gr_log_end(int audit, int append_default)
57311 +{
57312 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57313 +
57314 + if (append_default) {
57315 + unsigned int len = strlen(buf);
57316 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57317 + }
57318 +
57319 + printk("%s\n", buf);
57320 +
57321 + return;
57322 +}
57323 +
57324 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57325 +{
57326 + int logtype;
57327 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57328 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57329 + void *voidptr = NULL;
57330 + int num1 = 0, num2 = 0;
57331 + unsigned long ulong1 = 0, ulong2 = 0;
57332 + struct dentry *dentry = NULL;
57333 + struct vfsmount *mnt = NULL;
57334 + struct file *file = NULL;
57335 + struct task_struct *task = NULL;
57336 + const struct cred *cred, *pcred;
57337 + va_list ap;
57338 +
57339 + BEGIN_LOCKS(audit);
57340 + logtype = gr_log_start(audit);
57341 + if (logtype == FLOODING) {
57342 + END_LOCKS(audit);
57343 + return;
57344 + }
57345 + va_start(ap, argtypes);
57346 + switch (argtypes) {
57347 + case GR_TTYSNIFF:
57348 + task = va_arg(ap, struct task_struct *);
57349 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57350 + break;
57351 + case GR_SYSCTL_HIDDEN:
57352 + str1 = va_arg(ap, char *);
57353 + gr_log_middle_varargs(audit, msg, result, str1);
57354 + break;
57355 + case GR_RBAC:
57356 + dentry = va_arg(ap, struct dentry *);
57357 + mnt = va_arg(ap, struct vfsmount *);
57358 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57359 + break;
57360 + case GR_RBAC_STR:
57361 + dentry = va_arg(ap, struct dentry *);
57362 + mnt = va_arg(ap, struct vfsmount *);
57363 + str1 = va_arg(ap, char *);
57364 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57365 + break;
57366 + case GR_STR_RBAC:
57367 + str1 = va_arg(ap, char *);
57368 + dentry = va_arg(ap, struct dentry *);
57369 + mnt = va_arg(ap, struct vfsmount *);
57370 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57371 + break;
57372 + case GR_RBAC_MODE2:
57373 + dentry = va_arg(ap, struct dentry *);
57374 + mnt = va_arg(ap, struct vfsmount *);
57375 + str1 = va_arg(ap, char *);
57376 + str2 = va_arg(ap, char *);
57377 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57378 + break;
57379 + case GR_RBAC_MODE3:
57380 + dentry = va_arg(ap, struct dentry *);
57381 + mnt = va_arg(ap, struct vfsmount *);
57382 + str1 = va_arg(ap, char *);
57383 + str2 = va_arg(ap, char *);
57384 + str3 = va_arg(ap, char *);
57385 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57386 + break;
57387 + case GR_FILENAME:
57388 + dentry = va_arg(ap, struct dentry *);
57389 + mnt = va_arg(ap, struct vfsmount *);
57390 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57391 + break;
57392 + case GR_STR_FILENAME:
57393 + str1 = va_arg(ap, char *);
57394 + dentry = va_arg(ap, struct dentry *);
57395 + mnt = va_arg(ap, struct vfsmount *);
57396 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57397 + break;
57398 + case GR_FILENAME_STR:
57399 + dentry = va_arg(ap, struct dentry *);
57400 + mnt = va_arg(ap, struct vfsmount *);
57401 + str1 = va_arg(ap, char *);
57402 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57403 + break;
57404 + case GR_FILENAME_TWO_INT:
57405 + dentry = va_arg(ap, struct dentry *);
57406 + mnt = va_arg(ap, struct vfsmount *);
57407 + num1 = va_arg(ap, int);
57408 + num2 = va_arg(ap, int);
57409 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57410 + break;
57411 + case GR_FILENAME_TWO_INT_STR:
57412 + dentry = va_arg(ap, struct dentry *);
57413 + mnt = va_arg(ap, struct vfsmount *);
57414 + num1 = va_arg(ap, int);
57415 + num2 = va_arg(ap, int);
57416 + str1 = va_arg(ap, char *);
57417 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57418 + break;
57419 + case GR_TEXTREL:
57420 + file = va_arg(ap, struct file *);
57421 + ulong1 = va_arg(ap, unsigned long);
57422 + ulong2 = va_arg(ap, unsigned long);
57423 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57424 + break;
57425 + case GR_PTRACE:
57426 + task = va_arg(ap, struct task_struct *);
57427 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57428 + break;
57429 + case GR_RESOURCE:
57430 + task = va_arg(ap, struct task_struct *);
57431 + cred = __task_cred(task);
57432 + pcred = __task_cred(task->real_parent);
57433 + ulong1 = va_arg(ap, unsigned long);
57434 + str1 = va_arg(ap, char *);
57435 + ulong2 = va_arg(ap, unsigned long);
57436 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57437 + break;
57438 + case GR_CAP:
57439 + task = va_arg(ap, struct task_struct *);
57440 + cred = __task_cred(task);
57441 + pcred = __task_cred(task->real_parent);
57442 + str1 = va_arg(ap, char *);
57443 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57444 + break;
57445 + case GR_SIG:
57446 + str1 = va_arg(ap, char *);
57447 + voidptr = va_arg(ap, void *);
57448 + gr_log_middle_varargs(audit, msg, str1, voidptr);
57449 + break;
57450 + case GR_SIG2:
57451 + task = va_arg(ap, struct task_struct *);
57452 + cred = __task_cred(task);
57453 + pcred = __task_cred(task->real_parent);
57454 + num1 = va_arg(ap, int);
57455 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57456 + break;
57457 + case GR_CRASH1:
57458 + task = va_arg(ap, struct task_struct *);
57459 + cred = __task_cred(task);
57460 + pcred = __task_cred(task->real_parent);
57461 + ulong1 = va_arg(ap, unsigned long);
57462 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57463 + break;
57464 + case GR_CRASH2:
57465 + task = va_arg(ap, struct task_struct *);
57466 + cred = __task_cred(task);
57467 + pcred = __task_cred(task->real_parent);
57468 + ulong1 = va_arg(ap, unsigned long);
57469 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57470 + break;
57471 + case GR_RWXMAP:
57472 + file = va_arg(ap, struct file *);
57473 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57474 + break;
57475 + case GR_PSACCT:
57476 + {
57477 + unsigned int wday, cday;
57478 + __u8 whr, chr;
57479 + __u8 wmin, cmin;
57480 + __u8 wsec, csec;
57481 + char cur_tty[64] = { 0 };
57482 + char parent_tty[64] = { 0 };
57483 +
57484 + task = va_arg(ap, struct task_struct *);
57485 + wday = va_arg(ap, unsigned int);
57486 + cday = va_arg(ap, unsigned int);
57487 + whr = va_arg(ap, int);
57488 + chr = va_arg(ap, int);
57489 + wmin = va_arg(ap, int);
57490 + cmin = va_arg(ap, int);
57491 + wsec = va_arg(ap, int);
57492 + csec = va_arg(ap, int);
57493 + ulong1 = va_arg(ap, unsigned long);
57494 + cred = __task_cred(task);
57495 + pcred = __task_cred(task->real_parent);
57496 +
57497 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57498 + }
57499 + break;
57500 + default:
57501 + gr_log_middle(audit, msg, ap);
57502 + }
57503 + va_end(ap);
57504 + // these don't need DEFAULTSECARGS printed on the end
57505 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57506 + gr_log_end(audit, 0);
57507 + else
57508 + gr_log_end(audit, 1);
57509 + END_LOCKS(audit);
57510 +}
57511 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57512 new file mode 100644
57513 index 0000000..f536303
57514 --- /dev/null
57515 +++ b/grsecurity/grsec_mem.c
57516 @@ -0,0 +1,40 @@
57517 +#include <linux/kernel.h>
57518 +#include <linux/sched.h>
57519 +#include <linux/mm.h>
57520 +#include <linux/mman.h>
57521 +#include <linux/grinternal.h>
57522 +
57523 +void
57524 +gr_handle_ioperm(void)
57525 +{
57526 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57527 + return;
57528 +}
57529 +
57530 +void
57531 +gr_handle_iopl(void)
57532 +{
57533 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57534 + return;
57535 +}
57536 +
57537 +void
57538 +gr_handle_mem_readwrite(u64 from, u64 to)
57539 +{
57540 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57541 + return;
57542 +}
57543 +
57544 +void
57545 +gr_handle_vm86(void)
57546 +{
57547 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57548 + return;
57549 +}
57550 +
57551 +void
57552 +gr_log_badprocpid(const char *entry)
57553 +{
57554 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57555 + return;
57556 +}
57557 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57558 new file mode 100644
57559 index 0000000..2131422
57560 --- /dev/null
57561 +++ b/grsecurity/grsec_mount.c
57562 @@ -0,0 +1,62 @@
57563 +#include <linux/kernel.h>
57564 +#include <linux/sched.h>
57565 +#include <linux/mount.h>
57566 +#include <linux/grsecurity.h>
57567 +#include <linux/grinternal.h>
57568 +
57569 +void
57570 +gr_log_remount(const char *devname, const int retval)
57571 +{
57572 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57573 + if (grsec_enable_mount && (retval >= 0))
57574 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57575 +#endif
57576 + return;
57577 +}
57578 +
57579 +void
57580 +gr_log_unmount(const char *devname, const int retval)
57581 +{
57582 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57583 + if (grsec_enable_mount && (retval >= 0))
57584 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57585 +#endif
57586 + return;
57587 +}
57588 +
57589 +void
57590 +gr_log_mount(const char *from, const char *to, const int retval)
57591 +{
57592 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57593 + if (grsec_enable_mount && (retval >= 0))
57594 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57595 +#endif
57596 + return;
57597 +}
57598 +
57599 +int
57600 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57601 +{
57602 +#ifdef CONFIG_GRKERNSEC_ROFS
57603 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57604 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57605 + return -EPERM;
57606 + } else
57607 + return 0;
57608 +#endif
57609 + return 0;
57610 +}
57611 +
57612 +int
57613 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57614 +{
57615 +#ifdef CONFIG_GRKERNSEC_ROFS
57616 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57617 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57618 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57619 + return -EPERM;
57620 + } else
57621 + return 0;
57622 +#endif
57623 + return 0;
57624 +}
57625 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57626 new file mode 100644
57627 index 0000000..a3b12a0
57628 --- /dev/null
57629 +++ b/grsecurity/grsec_pax.c
57630 @@ -0,0 +1,36 @@
57631 +#include <linux/kernel.h>
57632 +#include <linux/sched.h>
57633 +#include <linux/mm.h>
57634 +#include <linux/file.h>
57635 +#include <linux/grinternal.h>
57636 +#include <linux/grsecurity.h>
57637 +
57638 +void
57639 +gr_log_textrel(struct vm_area_struct * vma)
57640 +{
57641 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57642 + if (grsec_enable_audit_textrel)
57643 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57644 +#endif
57645 + return;
57646 +}
57647 +
57648 +void
57649 +gr_log_rwxmmap(struct file *file)
57650 +{
57651 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57652 + if (grsec_enable_log_rwxmaps)
57653 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57654 +#endif
57655 + return;
57656 +}
57657 +
57658 +void
57659 +gr_log_rwxmprotect(struct file *file)
57660 +{
57661 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57662 + if (grsec_enable_log_rwxmaps)
57663 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57664 +#endif
57665 + return;
57666 +}
57667 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57668 new file mode 100644
57669 index 0000000..f7f29aa
57670 --- /dev/null
57671 +++ b/grsecurity/grsec_ptrace.c
57672 @@ -0,0 +1,30 @@
57673 +#include <linux/kernel.h>
57674 +#include <linux/sched.h>
57675 +#include <linux/grinternal.h>
57676 +#include <linux/security.h>
57677 +
57678 +void
57679 +gr_audit_ptrace(struct task_struct *task)
57680 +{
57681 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57682 + if (grsec_enable_audit_ptrace)
57683 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57684 +#endif
57685 + return;
57686 +}
57687 +
57688 +int
57689 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
57690 +{
57691 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57692 + const struct dentry *dentry = file->f_path.dentry;
57693 + const struct vfsmount *mnt = file->f_path.mnt;
57694 +
57695 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57696 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57697 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57698 + return -EACCES;
57699 + }
57700 +#endif
57701 + return 0;
57702 +}
57703 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57704 new file mode 100644
57705 index 0000000..7a5b2de
57706 --- /dev/null
57707 +++ b/grsecurity/grsec_sig.c
57708 @@ -0,0 +1,207 @@
57709 +#include <linux/kernel.h>
57710 +#include <linux/sched.h>
57711 +#include <linux/delay.h>
57712 +#include <linux/grsecurity.h>
57713 +#include <linux/grinternal.h>
57714 +#include <linux/hardirq.h>
57715 +
57716 +char *signames[] = {
57717 + [SIGSEGV] = "Segmentation fault",
57718 + [SIGILL] = "Illegal instruction",
57719 + [SIGABRT] = "Abort",
57720 + [SIGBUS] = "Invalid alignment/Bus error"
57721 +};
57722 +
57723 +void
57724 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57725 +{
57726 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57727 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57728 + (sig == SIGABRT) || (sig == SIGBUS))) {
57729 + if (t->pid == current->pid) {
57730 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57731 + } else {
57732 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57733 + }
57734 + }
57735 +#endif
57736 + return;
57737 +}
57738 +
57739 +int
57740 +gr_handle_signal(const struct task_struct *p, const int sig)
57741 +{
57742 +#ifdef CONFIG_GRKERNSEC
57743 + /* ignore the 0 signal for protected task checks */
57744 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57745 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57746 + return -EPERM;
57747 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57748 + return -EPERM;
57749 + }
57750 +#endif
57751 + return 0;
57752 +}
57753 +
57754 +#ifdef CONFIG_GRKERNSEC
57755 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57756 +
57757 +int gr_fake_force_sig(int sig, struct task_struct *t)
57758 +{
57759 + unsigned long int flags;
57760 + int ret, blocked, ignored;
57761 + struct k_sigaction *action;
57762 +
57763 + spin_lock_irqsave(&t->sighand->siglock, flags);
57764 + action = &t->sighand->action[sig-1];
57765 + ignored = action->sa.sa_handler == SIG_IGN;
57766 + blocked = sigismember(&t->blocked, sig);
57767 + if (blocked || ignored) {
57768 + action->sa.sa_handler = SIG_DFL;
57769 + if (blocked) {
57770 + sigdelset(&t->blocked, sig);
57771 + recalc_sigpending_and_wake(t);
57772 + }
57773 + }
57774 + if (action->sa.sa_handler == SIG_DFL)
57775 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
57776 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57777 +
57778 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
57779 +
57780 + return ret;
57781 +}
57782 +#endif
57783 +
57784 +#ifdef CONFIG_GRKERNSEC_BRUTE
57785 +#define GR_USER_BAN_TIME (15 * 60)
57786 +
57787 +static int __get_dumpable(unsigned long mm_flags)
57788 +{
57789 + int ret;
57790 +
57791 + ret = mm_flags & MMF_DUMPABLE_MASK;
57792 + return (ret >= 2) ? 2 : ret;
57793 +}
57794 +#endif
57795 +
57796 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57797 +{
57798 +#ifdef CONFIG_GRKERNSEC_BRUTE
57799 + uid_t uid = 0;
57800 +
57801 + if (!grsec_enable_brute)
57802 + return;
57803 +
57804 + rcu_read_lock();
57805 + read_lock(&tasklist_lock);
57806 + read_lock(&grsec_exec_file_lock);
57807 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57808 + p->real_parent->brute = 1;
57809 + else {
57810 + const struct cred *cred = __task_cred(p), *cred2;
57811 + struct task_struct *tsk, *tsk2;
57812 +
57813 + if (!__get_dumpable(mm_flags) && cred->uid) {
57814 + struct user_struct *user;
57815 +
57816 + uid = cred->uid;
57817 +
57818 + /* this is put upon execution past expiration */
57819 + user = find_user(uid);
57820 + if (user == NULL)
57821 + goto unlock;
57822 + user->banned = 1;
57823 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57824 + if (user->ban_expires == ~0UL)
57825 + user->ban_expires--;
57826 +
57827 + do_each_thread(tsk2, tsk) {
57828 + cred2 = __task_cred(tsk);
57829 + if (tsk != p && cred2->uid == uid)
57830 + gr_fake_force_sig(SIGKILL, tsk);
57831 + } while_each_thread(tsk2, tsk);
57832 + }
57833 + }
57834 +unlock:
57835 + read_unlock(&grsec_exec_file_lock);
57836 + read_unlock(&tasklist_lock);
57837 + rcu_read_unlock();
57838 +
57839 + if (uid)
57840 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57841 +
57842 +#endif
57843 + return;
57844 +}
57845 +
57846 +void gr_handle_brute_check(void)
57847 +{
57848 +#ifdef CONFIG_GRKERNSEC_BRUTE
57849 + if (current->brute)
57850 + msleep(30 * 1000);
57851 +#endif
57852 + return;
57853 +}
57854 +
57855 +void gr_handle_kernel_exploit(void)
57856 +{
57857 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
57858 + const struct cred *cred;
57859 + struct task_struct *tsk, *tsk2;
57860 + struct user_struct *user;
57861 + uid_t uid;
57862 +
57863 + if (in_irq() || in_serving_softirq() || in_nmi())
57864 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
57865 +
57866 + uid = current_uid();
57867 +
57868 + if (uid == 0)
57869 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
57870 + else {
57871 + /* kill all the processes of this user, hold a reference
57872 + to their creds struct, and prevent them from creating
57873 + another process until system reset
57874 + */
57875 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
57876 + /* we intentionally leak this ref */
57877 + user = get_uid(current->cred->user);
57878 + if (user) {
57879 + user->banned = 1;
57880 + user->ban_expires = ~0UL;
57881 + }
57882 +
57883 + read_lock(&tasklist_lock);
57884 + do_each_thread(tsk2, tsk) {
57885 + cred = __task_cred(tsk);
57886 + if (cred->uid == uid)
57887 + gr_fake_force_sig(SIGKILL, tsk);
57888 + } while_each_thread(tsk2, tsk);
57889 + read_unlock(&tasklist_lock);
57890 + }
57891 +#endif
57892 +}
57893 +
57894 +int __gr_process_user_ban(struct user_struct *user)
57895 +{
57896 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57897 + if (unlikely(user->banned)) {
57898 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
57899 + user->banned = 0;
57900 + user->ban_expires = 0;
57901 + free_uid(user);
57902 + } else
57903 + return -EPERM;
57904 + }
57905 +#endif
57906 + return 0;
57907 +}
57908 +
57909 +int gr_process_user_ban(void)
57910 +{
57911 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57912 + return __gr_process_user_ban(current->cred->user);
57913 +#endif
57914 + return 0;
57915 +}
57916 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
57917 new file mode 100644
57918 index 0000000..4030d57
57919 --- /dev/null
57920 +++ b/grsecurity/grsec_sock.c
57921 @@ -0,0 +1,244 @@
57922 +#include <linux/kernel.h>
57923 +#include <linux/module.h>
57924 +#include <linux/sched.h>
57925 +#include <linux/file.h>
57926 +#include <linux/net.h>
57927 +#include <linux/in.h>
57928 +#include <linux/ip.h>
57929 +#include <net/sock.h>
57930 +#include <net/inet_sock.h>
57931 +#include <linux/grsecurity.h>
57932 +#include <linux/grinternal.h>
57933 +#include <linux/gracl.h>
57934 +
57935 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
57936 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
57937 +
57938 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
57939 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
57940 +
57941 +#ifdef CONFIG_UNIX_MODULE
57942 +EXPORT_SYMBOL(gr_acl_handle_unix);
57943 +EXPORT_SYMBOL(gr_acl_handle_mknod);
57944 +EXPORT_SYMBOL(gr_handle_chroot_unix);
57945 +EXPORT_SYMBOL(gr_handle_create);
57946 +#endif
57947 +
57948 +#ifdef CONFIG_GRKERNSEC
57949 +#define gr_conn_table_size 32749
57950 +struct conn_table_entry {
57951 + struct conn_table_entry *next;
57952 + struct signal_struct *sig;
57953 +};
57954 +
57955 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
57956 +DEFINE_SPINLOCK(gr_conn_table_lock);
57957 +
57958 +extern const char * gr_socktype_to_name(unsigned char type);
57959 +extern const char * gr_proto_to_name(unsigned char proto);
57960 +extern const char * gr_sockfamily_to_name(unsigned char family);
57961 +
57962 +static __inline__ int
57963 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
57964 +{
57965 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
57966 +}
57967 +
57968 +static __inline__ int
57969 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
57970 + __u16 sport, __u16 dport)
57971 +{
57972 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
57973 + sig->gr_sport == sport && sig->gr_dport == dport))
57974 + return 1;
57975 + else
57976 + return 0;
57977 +}
57978 +
57979 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
57980 +{
57981 + struct conn_table_entry **match;
57982 + unsigned int index;
57983 +
57984 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57985 + sig->gr_sport, sig->gr_dport,
57986 + gr_conn_table_size);
57987 +
57988 + newent->sig = sig;
57989 +
57990 + match = &gr_conn_table[index];
57991 + newent->next = *match;
57992 + *match = newent;
57993 +
57994 + return;
57995 +}
57996 +
57997 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
57998 +{
57999 + struct conn_table_entry *match, *last = NULL;
58000 + unsigned int index;
58001 +
58002 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58003 + sig->gr_sport, sig->gr_dport,
58004 + gr_conn_table_size);
58005 +
58006 + match = gr_conn_table[index];
58007 + while (match && !conn_match(match->sig,
58008 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58009 + sig->gr_dport)) {
58010 + last = match;
58011 + match = match->next;
58012 + }
58013 +
58014 + if (match) {
58015 + if (last)
58016 + last->next = match->next;
58017 + else
58018 + gr_conn_table[index] = NULL;
58019 + kfree(match);
58020 + }
58021 +
58022 + return;
58023 +}
58024 +
58025 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58026 + __u16 sport, __u16 dport)
58027 +{
58028 + struct conn_table_entry *match;
58029 + unsigned int index;
58030 +
58031 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58032 +
58033 + match = gr_conn_table[index];
58034 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58035 + match = match->next;
58036 +
58037 + if (match)
58038 + return match->sig;
58039 + else
58040 + return NULL;
58041 +}
58042 +
58043 +#endif
58044 +
58045 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58046 +{
58047 +#ifdef CONFIG_GRKERNSEC
58048 + struct signal_struct *sig = task->signal;
58049 + struct conn_table_entry *newent;
58050 +
58051 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58052 + if (newent == NULL)
58053 + return;
58054 + /* no bh lock needed since we are called with bh disabled */
58055 + spin_lock(&gr_conn_table_lock);
58056 + gr_del_task_from_ip_table_nolock(sig);
58057 + sig->gr_saddr = inet->inet_rcv_saddr;
58058 + sig->gr_daddr = inet->inet_daddr;
58059 + sig->gr_sport = inet->inet_sport;
58060 + sig->gr_dport = inet->inet_dport;
58061 + gr_add_to_task_ip_table_nolock(sig, newent);
58062 + spin_unlock(&gr_conn_table_lock);
58063 +#endif
58064 + return;
58065 +}
58066 +
58067 +void gr_del_task_from_ip_table(struct task_struct *task)
58068 +{
58069 +#ifdef CONFIG_GRKERNSEC
58070 + spin_lock_bh(&gr_conn_table_lock);
58071 + gr_del_task_from_ip_table_nolock(task->signal);
58072 + spin_unlock_bh(&gr_conn_table_lock);
58073 +#endif
58074 + return;
58075 +}
58076 +
58077 +void
58078 +gr_attach_curr_ip(const struct sock *sk)
58079 +{
58080 +#ifdef CONFIG_GRKERNSEC
58081 + struct signal_struct *p, *set;
58082 + const struct inet_sock *inet = inet_sk(sk);
58083 +
58084 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58085 + return;
58086 +
58087 + set = current->signal;
58088 +
58089 + spin_lock_bh(&gr_conn_table_lock);
58090 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58091 + inet->inet_dport, inet->inet_sport);
58092 + if (unlikely(p != NULL)) {
58093 + set->curr_ip = p->curr_ip;
58094 + set->used_accept = 1;
58095 + gr_del_task_from_ip_table_nolock(p);
58096 + spin_unlock_bh(&gr_conn_table_lock);
58097 + return;
58098 + }
58099 + spin_unlock_bh(&gr_conn_table_lock);
58100 +
58101 + set->curr_ip = inet->inet_daddr;
58102 + set->used_accept = 1;
58103 +#endif
58104 + return;
58105 +}
58106 +
58107 +int
58108 +gr_handle_sock_all(const int family, const int type, const int protocol)
58109 +{
58110 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58111 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58112 + (family != AF_UNIX)) {
58113 + if (family == AF_INET)
58114 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58115 + else
58116 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58117 + return -EACCES;
58118 + }
58119 +#endif
58120 + return 0;
58121 +}
58122 +
58123 +int
58124 +gr_handle_sock_server(const struct sockaddr *sck)
58125 +{
58126 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58127 + if (grsec_enable_socket_server &&
58128 + in_group_p(grsec_socket_server_gid) &&
58129 + sck && (sck->sa_family != AF_UNIX) &&
58130 + (sck->sa_family != AF_LOCAL)) {
58131 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58132 + return -EACCES;
58133 + }
58134 +#endif
58135 + return 0;
58136 +}
58137 +
58138 +int
58139 +gr_handle_sock_server_other(const struct sock *sck)
58140 +{
58141 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58142 + if (grsec_enable_socket_server &&
58143 + in_group_p(grsec_socket_server_gid) &&
58144 + sck && (sck->sk_family != AF_UNIX) &&
58145 + (sck->sk_family != AF_LOCAL)) {
58146 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58147 + return -EACCES;
58148 + }
58149 +#endif
58150 + return 0;
58151 +}
58152 +
58153 +int
58154 +gr_handle_sock_client(const struct sockaddr *sck)
58155 +{
58156 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58157 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58158 + sck && (sck->sa_family != AF_UNIX) &&
58159 + (sck->sa_family != AF_LOCAL)) {
58160 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58161 + return -EACCES;
58162 + }
58163 +#endif
58164 + return 0;
58165 +}
58166 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58167 new file mode 100644
58168 index 0000000..8316f6f
58169 --- /dev/null
58170 +++ b/grsecurity/grsec_sysctl.c
58171 @@ -0,0 +1,453 @@
58172 +#include <linux/kernel.h>
58173 +#include <linux/sched.h>
58174 +#include <linux/sysctl.h>
58175 +#include <linux/grsecurity.h>
58176 +#include <linux/grinternal.h>
58177 +
58178 +int
58179 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58180 +{
58181 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58182 + if (dirname == NULL || name == NULL)
58183 + return 0;
58184 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58185 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58186 + return -EACCES;
58187 + }
58188 +#endif
58189 + return 0;
58190 +}
58191 +
58192 +#ifdef CONFIG_GRKERNSEC_ROFS
58193 +static int __maybe_unused one = 1;
58194 +#endif
58195 +
58196 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58197 +struct ctl_table grsecurity_table[] = {
58198 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58199 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58200 +#ifdef CONFIG_GRKERNSEC_IO
58201 + {
58202 + .procname = "disable_priv_io",
58203 + .data = &grsec_disable_privio,
58204 + .maxlen = sizeof(int),
58205 + .mode = 0600,
58206 + .proc_handler = &proc_dointvec,
58207 + },
58208 +#endif
58209 +#endif
58210 +#ifdef CONFIG_GRKERNSEC_LINK
58211 + {
58212 + .procname = "linking_restrictions",
58213 + .data = &grsec_enable_link,
58214 + .maxlen = sizeof(int),
58215 + .mode = 0600,
58216 + .proc_handler = &proc_dointvec,
58217 + },
58218 +#endif
58219 +#ifdef CONFIG_GRKERNSEC_BRUTE
58220 + {
58221 + .procname = "deter_bruteforce",
58222 + .data = &grsec_enable_brute,
58223 + .maxlen = sizeof(int),
58224 + .mode = 0600,
58225 + .proc_handler = &proc_dointvec,
58226 + },
58227 +#endif
58228 +#ifdef CONFIG_GRKERNSEC_FIFO
58229 + {
58230 + .procname = "fifo_restrictions",
58231 + .data = &grsec_enable_fifo,
58232 + .maxlen = sizeof(int),
58233 + .mode = 0600,
58234 + .proc_handler = &proc_dointvec,
58235 + },
58236 +#endif
58237 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58238 + {
58239 + .procname = "ptrace_readexec",
58240 + .data = &grsec_enable_ptrace_readexec,
58241 + .maxlen = sizeof(int),
58242 + .mode = 0600,
58243 + .proc_handler = &proc_dointvec,
58244 + },
58245 +#endif
58246 +#ifdef CONFIG_GRKERNSEC_SETXID
58247 + {
58248 + .procname = "consistent_setxid",
58249 + .data = &grsec_enable_setxid,
58250 + .maxlen = sizeof(int),
58251 + .mode = 0600,
58252 + .proc_handler = &proc_dointvec,
58253 + },
58254 +#endif
58255 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58256 + {
58257 + .procname = "ip_blackhole",
58258 + .data = &grsec_enable_blackhole,
58259 + .maxlen = sizeof(int),
58260 + .mode = 0600,
58261 + .proc_handler = &proc_dointvec,
58262 + },
58263 + {
58264 + .procname = "lastack_retries",
58265 + .data = &grsec_lastack_retries,
58266 + .maxlen = sizeof(int),
58267 + .mode = 0600,
58268 + .proc_handler = &proc_dointvec,
58269 + },
58270 +#endif
58271 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58272 + {
58273 + .procname = "exec_logging",
58274 + .data = &grsec_enable_execlog,
58275 + .maxlen = sizeof(int),
58276 + .mode = 0600,
58277 + .proc_handler = &proc_dointvec,
58278 + },
58279 +#endif
58280 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58281 + {
58282 + .procname = "rwxmap_logging",
58283 + .data = &grsec_enable_log_rwxmaps,
58284 + .maxlen = sizeof(int),
58285 + .mode = 0600,
58286 + .proc_handler = &proc_dointvec,
58287 + },
58288 +#endif
58289 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58290 + {
58291 + .procname = "signal_logging",
58292 + .data = &grsec_enable_signal,
58293 + .maxlen = sizeof(int),
58294 + .mode = 0600,
58295 + .proc_handler = &proc_dointvec,
58296 + },
58297 +#endif
58298 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58299 + {
58300 + .procname = "forkfail_logging",
58301 + .data = &grsec_enable_forkfail,
58302 + .maxlen = sizeof(int),
58303 + .mode = 0600,
58304 + .proc_handler = &proc_dointvec,
58305 + },
58306 +#endif
58307 +#ifdef CONFIG_GRKERNSEC_TIME
58308 + {
58309 + .procname = "timechange_logging",
58310 + .data = &grsec_enable_time,
58311 + .maxlen = sizeof(int),
58312 + .mode = 0600,
58313 + .proc_handler = &proc_dointvec,
58314 + },
58315 +#endif
58316 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58317 + {
58318 + .procname = "chroot_deny_shmat",
58319 + .data = &grsec_enable_chroot_shmat,
58320 + .maxlen = sizeof(int),
58321 + .mode = 0600,
58322 + .proc_handler = &proc_dointvec,
58323 + },
58324 +#endif
58325 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58326 + {
58327 + .procname = "chroot_deny_unix",
58328 + .data = &grsec_enable_chroot_unix,
58329 + .maxlen = sizeof(int),
58330 + .mode = 0600,
58331 + .proc_handler = &proc_dointvec,
58332 + },
58333 +#endif
58334 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58335 + {
58336 + .procname = "chroot_deny_mount",
58337 + .data = &grsec_enable_chroot_mount,
58338 + .maxlen = sizeof(int),
58339 + .mode = 0600,
58340 + .proc_handler = &proc_dointvec,
58341 + },
58342 +#endif
58343 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58344 + {
58345 + .procname = "chroot_deny_fchdir",
58346 + .data = &grsec_enable_chroot_fchdir,
58347 + .maxlen = sizeof(int),
58348 + .mode = 0600,
58349 + .proc_handler = &proc_dointvec,
58350 + },
58351 +#endif
58352 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58353 + {
58354 + .procname = "chroot_deny_chroot",
58355 + .data = &grsec_enable_chroot_double,
58356 + .maxlen = sizeof(int),
58357 + .mode = 0600,
58358 + .proc_handler = &proc_dointvec,
58359 + },
58360 +#endif
58361 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58362 + {
58363 + .procname = "chroot_deny_pivot",
58364 + .data = &grsec_enable_chroot_pivot,
58365 + .maxlen = sizeof(int),
58366 + .mode = 0600,
58367 + .proc_handler = &proc_dointvec,
58368 + },
58369 +#endif
58370 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58371 + {
58372 + .procname = "chroot_enforce_chdir",
58373 + .data = &grsec_enable_chroot_chdir,
58374 + .maxlen = sizeof(int),
58375 + .mode = 0600,
58376 + .proc_handler = &proc_dointvec,
58377 + },
58378 +#endif
58379 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58380 + {
58381 + .procname = "chroot_deny_chmod",
58382 + .data = &grsec_enable_chroot_chmod,
58383 + .maxlen = sizeof(int),
58384 + .mode = 0600,
58385 + .proc_handler = &proc_dointvec,
58386 + },
58387 +#endif
58388 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58389 + {
58390 + .procname = "chroot_deny_mknod",
58391 + .data = &grsec_enable_chroot_mknod,
58392 + .maxlen = sizeof(int),
58393 + .mode = 0600,
58394 + .proc_handler = &proc_dointvec,
58395 + },
58396 +#endif
58397 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58398 + {
58399 + .procname = "chroot_restrict_nice",
58400 + .data = &grsec_enable_chroot_nice,
58401 + .maxlen = sizeof(int),
58402 + .mode = 0600,
58403 + .proc_handler = &proc_dointvec,
58404 + },
58405 +#endif
58406 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58407 + {
58408 + .procname = "chroot_execlog",
58409 + .data = &grsec_enable_chroot_execlog,
58410 + .maxlen = sizeof(int),
58411 + .mode = 0600,
58412 + .proc_handler = &proc_dointvec,
58413 + },
58414 +#endif
58415 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58416 + {
58417 + .procname = "chroot_caps",
58418 + .data = &grsec_enable_chroot_caps,
58419 + .maxlen = sizeof(int),
58420 + .mode = 0600,
58421 + .proc_handler = &proc_dointvec,
58422 + },
58423 +#endif
58424 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58425 + {
58426 + .procname = "chroot_deny_sysctl",
58427 + .data = &grsec_enable_chroot_sysctl,
58428 + .maxlen = sizeof(int),
58429 + .mode = 0600,
58430 + .proc_handler = &proc_dointvec,
58431 + },
58432 +#endif
58433 +#ifdef CONFIG_GRKERNSEC_TPE
58434 + {
58435 + .procname = "tpe",
58436 + .data = &grsec_enable_tpe,
58437 + .maxlen = sizeof(int),
58438 + .mode = 0600,
58439 + .proc_handler = &proc_dointvec,
58440 + },
58441 + {
58442 + .procname = "tpe_gid",
58443 + .data = &grsec_tpe_gid,
58444 + .maxlen = sizeof(int),
58445 + .mode = 0600,
58446 + .proc_handler = &proc_dointvec,
58447 + },
58448 +#endif
58449 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58450 + {
58451 + .procname = "tpe_invert",
58452 + .data = &grsec_enable_tpe_invert,
58453 + .maxlen = sizeof(int),
58454 + .mode = 0600,
58455 + .proc_handler = &proc_dointvec,
58456 + },
58457 +#endif
58458 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58459 + {
58460 + .procname = "tpe_restrict_all",
58461 + .data = &grsec_enable_tpe_all,
58462 + .maxlen = sizeof(int),
58463 + .mode = 0600,
58464 + .proc_handler = &proc_dointvec,
58465 + },
58466 +#endif
58467 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58468 + {
58469 + .procname = "socket_all",
58470 + .data = &grsec_enable_socket_all,
58471 + .maxlen = sizeof(int),
58472 + .mode = 0600,
58473 + .proc_handler = &proc_dointvec,
58474 + },
58475 + {
58476 + .procname = "socket_all_gid",
58477 + .data = &grsec_socket_all_gid,
58478 + .maxlen = sizeof(int),
58479 + .mode = 0600,
58480 + .proc_handler = &proc_dointvec,
58481 + },
58482 +#endif
58483 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58484 + {
58485 + .procname = "socket_client",
58486 + .data = &grsec_enable_socket_client,
58487 + .maxlen = sizeof(int),
58488 + .mode = 0600,
58489 + .proc_handler = &proc_dointvec,
58490 + },
58491 + {
58492 + .procname = "socket_client_gid",
58493 + .data = &grsec_socket_client_gid,
58494 + .maxlen = sizeof(int),
58495 + .mode = 0600,
58496 + .proc_handler = &proc_dointvec,
58497 + },
58498 +#endif
58499 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58500 + {
58501 + .procname = "socket_server",
58502 + .data = &grsec_enable_socket_server,
58503 + .maxlen = sizeof(int),
58504 + .mode = 0600,
58505 + .proc_handler = &proc_dointvec,
58506 + },
58507 + {
58508 + .procname = "socket_server_gid",
58509 + .data = &grsec_socket_server_gid,
58510 + .maxlen = sizeof(int),
58511 + .mode = 0600,
58512 + .proc_handler = &proc_dointvec,
58513 + },
58514 +#endif
58515 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58516 + {
58517 + .procname = "audit_group",
58518 + .data = &grsec_enable_group,
58519 + .maxlen = sizeof(int),
58520 + .mode = 0600,
58521 + .proc_handler = &proc_dointvec,
58522 + },
58523 + {
58524 + .procname = "audit_gid",
58525 + .data = &grsec_audit_gid,
58526 + .maxlen = sizeof(int),
58527 + .mode = 0600,
58528 + .proc_handler = &proc_dointvec,
58529 + },
58530 +#endif
58531 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58532 + {
58533 + .procname = "audit_chdir",
58534 + .data = &grsec_enable_chdir,
58535 + .maxlen = sizeof(int),
58536 + .mode = 0600,
58537 + .proc_handler = &proc_dointvec,
58538 + },
58539 +#endif
58540 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58541 + {
58542 + .procname = "audit_mount",
58543 + .data = &grsec_enable_mount,
58544 + .maxlen = sizeof(int),
58545 + .mode = 0600,
58546 + .proc_handler = &proc_dointvec,
58547 + },
58548 +#endif
58549 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58550 + {
58551 + .procname = "audit_textrel",
58552 + .data = &grsec_enable_audit_textrel,
58553 + .maxlen = sizeof(int),
58554 + .mode = 0600,
58555 + .proc_handler = &proc_dointvec,
58556 + },
58557 +#endif
58558 +#ifdef CONFIG_GRKERNSEC_DMESG
58559 + {
58560 + .procname = "dmesg",
58561 + .data = &grsec_enable_dmesg,
58562 + .maxlen = sizeof(int),
58563 + .mode = 0600,
58564 + .proc_handler = &proc_dointvec,
58565 + },
58566 +#endif
58567 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58568 + {
58569 + .procname = "chroot_findtask",
58570 + .data = &grsec_enable_chroot_findtask,
58571 + .maxlen = sizeof(int),
58572 + .mode = 0600,
58573 + .proc_handler = &proc_dointvec,
58574 + },
58575 +#endif
58576 +#ifdef CONFIG_GRKERNSEC_RESLOG
58577 + {
58578 + .procname = "resource_logging",
58579 + .data = &grsec_resource_logging,
58580 + .maxlen = sizeof(int),
58581 + .mode = 0600,
58582 + .proc_handler = &proc_dointvec,
58583 + },
58584 +#endif
58585 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58586 + {
58587 + .procname = "audit_ptrace",
58588 + .data = &grsec_enable_audit_ptrace,
58589 + .maxlen = sizeof(int),
58590 + .mode = 0600,
58591 + .proc_handler = &proc_dointvec,
58592 + },
58593 +#endif
58594 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58595 + {
58596 + .procname = "harden_ptrace",
58597 + .data = &grsec_enable_harden_ptrace,
58598 + .maxlen = sizeof(int),
58599 + .mode = 0600,
58600 + .proc_handler = &proc_dointvec,
58601 + },
58602 +#endif
58603 + {
58604 + .procname = "grsec_lock",
58605 + .data = &grsec_lock,
58606 + .maxlen = sizeof(int),
58607 + .mode = 0600,
58608 + .proc_handler = &proc_dointvec,
58609 + },
58610 +#endif
58611 +#ifdef CONFIG_GRKERNSEC_ROFS
58612 + {
58613 + .procname = "romount_protect",
58614 + .data = &grsec_enable_rofs,
58615 + .maxlen = sizeof(int),
58616 + .mode = 0600,
58617 + .proc_handler = &proc_dointvec_minmax,
58618 + .extra1 = &one,
58619 + .extra2 = &one,
58620 + },
58621 +#endif
58622 + { }
58623 +};
58624 +#endif
58625 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58626 new file mode 100644
58627 index 0000000..0dc13c3
58628 --- /dev/null
58629 +++ b/grsecurity/grsec_time.c
58630 @@ -0,0 +1,16 @@
58631 +#include <linux/kernel.h>
58632 +#include <linux/sched.h>
58633 +#include <linux/grinternal.h>
58634 +#include <linux/module.h>
58635 +
58636 +void
58637 +gr_log_timechange(void)
58638 +{
58639 +#ifdef CONFIG_GRKERNSEC_TIME
58640 + if (grsec_enable_time)
58641 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58642 +#endif
58643 + return;
58644 +}
58645 +
58646 +EXPORT_SYMBOL(gr_log_timechange);
58647 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58648 new file mode 100644
58649 index 0000000..07e0dc0
58650 --- /dev/null
58651 +++ b/grsecurity/grsec_tpe.c
58652 @@ -0,0 +1,73 @@
58653 +#include <linux/kernel.h>
58654 +#include <linux/sched.h>
58655 +#include <linux/file.h>
58656 +#include <linux/fs.h>
58657 +#include <linux/grinternal.h>
58658 +
58659 +extern int gr_acl_tpe_check(void);
58660 +
58661 +int
58662 +gr_tpe_allow(const struct file *file)
58663 +{
58664 +#ifdef CONFIG_GRKERNSEC
58665 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58666 + const struct cred *cred = current_cred();
58667 + char *msg = NULL;
58668 + char *msg2 = NULL;
58669 +
58670 + // never restrict root
58671 + if (!cred->uid)
58672 + return 1;
58673 +
58674 + if (grsec_enable_tpe) {
58675 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58676 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58677 + msg = "not being in trusted group";
58678 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58679 + msg = "being in untrusted group";
58680 +#else
58681 + if (in_group_p(grsec_tpe_gid))
58682 + msg = "being in untrusted group";
58683 +#endif
58684 + }
58685 + if (!msg && gr_acl_tpe_check())
58686 + msg = "being in untrusted role";
58687 +
58688 + // not in any affected group/role
58689 + if (!msg)
58690 + goto next_check;
58691 +
58692 + if (inode->i_uid)
58693 + msg2 = "file in non-root-owned directory";
58694 + else if (inode->i_mode & S_IWOTH)
58695 + msg2 = "file in world-writable directory";
58696 + else if (inode->i_mode & S_IWGRP)
58697 + msg2 = "file in group-writable directory";
58698 +
58699 + if (msg && msg2) {
58700 + char fullmsg[70] = {0};
58701 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58702 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58703 + return 0;
58704 + }
58705 + msg = NULL;
58706 +next_check:
58707 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58708 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58709 + return 1;
58710 +
58711 + if (inode->i_uid && (inode->i_uid != cred->uid))
58712 + msg = "directory not owned by user";
58713 + else if (inode->i_mode & S_IWOTH)
58714 + msg = "file in world-writable directory";
58715 + else if (inode->i_mode & S_IWGRP)
58716 + msg = "file in group-writable directory";
58717 +
58718 + if (msg) {
58719 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58720 + return 0;
58721 + }
58722 +#endif
58723 +#endif
58724 + return 1;
58725 +}
58726 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58727 new file mode 100644
58728 index 0000000..9f7b1ac
58729 --- /dev/null
58730 +++ b/grsecurity/grsum.c
58731 @@ -0,0 +1,61 @@
58732 +#include <linux/err.h>
58733 +#include <linux/kernel.h>
58734 +#include <linux/sched.h>
58735 +#include <linux/mm.h>
58736 +#include <linux/scatterlist.h>
58737 +#include <linux/crypto.h>
58738 +#include <linux/gracl.h>
58739 +
58740 +
58741 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58742 +#error "crypto and sha256 must be built into the kernel"
58743 +#endif
58744 +
58745 +int
58746 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58747 +{
58748 + char *p;
58749 + struct crypto_hash *tfm;
58750 + struct hash_desc desc;
58751 + struct scatterlist sg;
58752 + unsigned char temp_sum[GR_SHA_LEN];
58753 + volatile int retval = 0;
58754 + volatile int dummy = 0;
58755 + unsigned int i;
58756 +
58757 + sg_init_table(&sg, 1);
58758 +
58759 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58760 + if (IS_ERR(tfm)) {
58761 + /* should never happen, since sha256 should be built in */
58762 + return 1;
58763 + }
58764 +
58765 + desc.tfm = tfm;
58766 + desc.flags = 0;
58767 +
58768 + crypto_hash_init(&desc);
58769 +
58770 + p = salt;
58771 + sg_set_buf(&sg, p, GR_SALT_LEN);
58772 + crypto_hash_update(&desc, &sg, sg.length);
58773 +
58774 + p = entry->pw;
58775 + sg_set_buf(&sg, p, strlen(p));
58776 +
58777 + crypto_hash_update(&desc, &sg, sg.length);
58778 +
58779 + crypto_hash_final(&desc, temp_sum);
58780 +
58781 + memset(entry->pw, 0, GR_PW_LEN);
58782 +
58783 + for (i = 0; i < GR_SHA_LEN; i++)
58784 + if (sum[i] != temp_sum[i])
58785 + retval = 1;
58786 + else
58787 + dummy = 1; // waste a cycle
58788 +
58789 + crypto_free_hash(tfm);
58790 +
58791 + return retval;
58792 +}
58793 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58794 index f1c8ca6..b5c1cc7 100644
58795 --- a/include/acpi/acpi_bus.h
58796 +++ b/include/acpi/acpi_bus.h
58797 @@ -107,7 +107,7 @@ struct acpi_device_ops {
58798 acpi_op_bind bind;
58799 acpi_op_unbind unbind;
58800 acpi_op_notify notify;
58801 -};
58802 +} __no_const;
58803
58804 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58805
58806 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58807 index b7babf0..71e4e74 100644
58808 --- a/include/asm-generic/atomic-long.h
58809 +++ b/include/asm-generic/atomic-long.h
58810 @@ -22,6 +22,12 @@
58811
58812 typedef atomic64_t atomic_long_t;
58813
58814 +#ifdef CONFIG_PAX_REFCOUNT
58815 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
58816 +#else
58817 +typedef atomic64_t atomic_long_unchecked_t;
58818 +#endif
58819 +
58820 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58821
58822 static inline long atomic_long_read(atomic_long_t *l)
58823 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58824 return (long)atomic64_read(v);
58825 }
58826
58827 +#ifdef CONFIG_PAX_REFCOUNT
58828 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58829 +{
58830 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58831 +
58832 + return (long)atomic64_read_unchecked(v);
58833 +}
58834 +#endif
58835 +
58836 static inline void atomic_long_set(atomic_long_t *l, long i)
58837 {
58838 atomic64_t *v = (atomic64_t *)l;
58839 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58840 atomic64_set(v, i);
58841 }
58842
58843 +#ifdef CONFIG_PAX_REFCOUNT
58844 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58845 +{
58846 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58847 +
58848 + atomic64_set_unchecked(v, i);
58849 +}
58850 +#endif
58851 +
58852 static inline void atomic_long_inc(atomic_long_t *l)
58853 {
58854 atomic64_t *v = (atomic64_t *)l;
58855 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58856 atomic64_inc(v);
58857 }
58858
58859 +#ifdef CONFIG_PAX_REFCOUNT
58860 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58861 +{
58862 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58863 +
58864 + atomic64_inc_unchecked(v);
58865 +}
58866 +#endif
58867 +
58868 static inline void atomic_long_dec(atomic_long_t *l)
58869 {
58870 atomic64_t *v = (atomic64_t *)l;
58871 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58872 atomic64_dec(v);
58873 }
58874
58875 +#ifdef CONFIG_PAX_REFCOUNT
58876 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58877 +{
58878 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58879 +
58880 + atomic64_dec_unchecked(v);
58881 +}
58882 +#endif
58883 +
58884 static inline void atomic_long_add(long i, atomic_long_t *l)
58885 {
58886 atomic64_t *v = (atomic64_t *)l;
58887 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58888 atomic64_add(i, v);
58889 }
58890
58891 +#ifdef CONFIG_PAX_REFCOUNT
58892 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58893 +{
58894 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58895 +
58896 + atomic64_add_unchecked(i, v);
58897 +}
58898 +#endif
58899 +
58900 static inline void atomic_long_sub(long i, atomic_long_t *l)
58901 {
58902 atomic64_t *v = (atomic64_t *)l;
58903 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58904 atomic64_sub(i, v);
58905 }
58906
58907 +#ifdef CONFIG_PAX_REFCOUNT
58908 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58909 +{
58910 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58911 +
58912 + atomic64_sub_unchecked(i, v);
58913 +}
58914 +#endif
58915 +
58916 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58917 {
58918 atomic64_t *v = (atomic64_t *)l;
58919 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58920 return (long)atomic64_inc_return(v);
58921 }
58922
58923 +#ifdef CONFIG_PAX_REFCOUNT
58924 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58925 +{
58926 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58927 +
58928 + return (long)atomic64_inc_return_unchecked(v);
58929 +}
58930 +#endif
58931 +
58932 static inline long atomic_long_dec_return(atomic_long_t *l)
58933 {
58934 atomic64_t *v = (atomic64_t *)l;
58935 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58936
58937 typedef atomic_t atomic_long_t;
58938
58939 +#ifdef CONFIG_PAX_REFCOUNT
58940 +typedef atomic_unchecked_t atomic_long_unchecked_t;
58941 +#else
58942 +typedef atomic_t atomic_long_unchecked_t;
58943 +#endif
58944 +
58945 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
58946 static inline long atomic_long_read(atomic_long_t *l)
58947 {
58948 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58949 return (long)atomic_read(v);
58950 }
58951
58952 +#ifdef CONFIG_PAX_REFCOUNT
58953 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58954 +{
58955 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58956 +
58957 + return (long)atomic_read_unchecked(v);
58958 +}
58959 +#endif
58960 +
58961 static inline void atomic_long_set(atomic_long_t *l, long i)
58962 {
58963 atomic_t *v = (atomic_t *)l;
58964 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58965 atomic_set(v, i);
58966 }
58967
58968 +#ifdef CONFIG_PAX_REFCOUNT
58969 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58970 +{
58971 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58972 +
58973 + atomic_set_unchecked(v, i);
58974 +}
58975 +#endif
58976 +
58977 static inline void atomic_long_inc(atomic_long_t *l)
58978 {
58979 atomic_t *v = (atomic_t *)l;
58980 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58981 atomic_inc(v);
58982 }
58983
58984 +#ifdef CONFIG_PAX_REFCOUNT
58985 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58986 +{
58987 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58988 +
58989 + atomic_inc_unchecked(v);
58990 +}
58991 +#endif
58992 +
58993 static inline void atomic_long_dec(atomic_long_t *l)
58994 {
58995 atomic_t *v = (atomic_t *)l;
58996 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58997 atomic_dec(v);
58998 }
58999
59000 +#ifdef CONFIG_PAX_REFCOUNT
59001 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59002 +{
59003 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59004 +
59005 + atomic_dec_unchecked(v);
59006 +}
59007 +#endif
59008 +
59009 static inline void atomic_long_add(long i, atomic_long_t *l)
59010 {
59011 atomic_t *v = (atomic_t *)l;
59012 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59013 atomic_add(i, v);
59014 }
59015
59016 +#ifdef CONFIG_PAX_REFCOUNT
59017 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59018 +{
59019 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59020 +
59021 + atomic_add_unchecked(i, v);
59022 +}
59023 +#endif
59024 +
59025 static inline void atomic_long_sub(long i, atomic_long_t *l)
59026 {
59027 atomic_t *v = (atomic_t *)l;
59028 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59029 atomic_sub(i, v);
59030 }
59031
59032 +#ifdef CONFIG_PAX_REFCOUNT
59033 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59034 +{
59035 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59036 +
59037 + atomic_sub_unchecked(i, v);
59038 +}
59039 +#endif
59040 +
59041 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59042 {
59043 atomic_t *v = (atomic_t *)l;
59044 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59045 return (long)atomic_inc_return(v);
59046 }
59047
59048 +#ifdef CONFIG_PAX_REFCOUNT
59049 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59050 +{
59051 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59052 +
59053 + return (long)atomic_inc_return_unchecked(v);
59054 +}
59055 +#endif
59056 +
59057 static inline long atomic_long_dec_return(atomic_long_t *l)
59058 {
59059 atomic_t *v = (atomic_t *)l;
59060 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59061
59062 #endif /* BITS_PER_LONG == 64 */
59063
59064 +#ifdef CONFIG_PAX_REFCOUNT
59065 +static inline void pax_refcount_needs_these_functions(void)
59066 +{
59067 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59068 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59069 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59070 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59071 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59072 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59073 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59074 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59075 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59076 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59077 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59078 +
59079 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59080 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59081 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59082 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59083 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59084 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59085 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59086 +}
59087 +#else
59088 +#define atomic_read_unchecked(v) atomic_read(v)
59089 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59090 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59091 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59092 +#define atomic_inc_unchecked(v) atomic_inc(v)
59093 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59094 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59095 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59096 +#define atomic_dec_unchecked(v) atomic_dec(v)
59097 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59098 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59099 +
59100 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59101 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59102 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59103 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59104 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59105 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59106 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59107 +#endif
59108 +
59109 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59110 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59111 index b18ce4f..2ee2843 100644
59112 --- a/include/asm-generic/atomic64.h
59113 +++ b/include/asm-generic/atomic64.h
59114 @@ -16,6 +16,8 @@ typedef struct {
59115 long long counter;
59116 } atomic64_t;
59117
59118 +typedef atomic64_t atomic64_unchecked_t;
59119 +
59120 #define ATOMIC64_INIT(i) { (i) }
59121
59122 extern long long atomic64_read(const atomic64_t *v);
59123 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59124 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59125 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59126
59127 +#define atomic64_read_unchecked(v) atomic64_read(v)
59128 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59129 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59130 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59131 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59132 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59133 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59134 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59135 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59136 +
59137 #endif /* _ASM_GENERIC_ATOMIC64_H */
59138 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59139 index 1bfcfe5..e04c5c9 100644
59140 --- a/include/asm-generic/cache.h
59141 +++ b/include/asm-generic/cache.h
59142 @@ -6,7 +6,7 @@
59143 * cache lines need to provide their own cache.h.
59144 */
59145
59146 -#define L1_CACHE_SHIFT 5
59147 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59148 +#define L1_CACHE_SHIFT 5UL
59149 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59150
59151 #endif /* __ASM_GENERIC_CACHE_H */
59152 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59153 index 0d68a1e..b74a761 100644
59154 --- a/include/asm-generic/emergency-restart.h
59155 +++ b/include/asm-generic/emergency-restart.h
59156 @@ -1,7 +1,7 @@
59157 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59158 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59159
59160 -static inline void machine_emergency_restart(void)
59161 +static inline __noreturn void machine_emergency_restart(void)
59162 {
59163 machine_restart(NULL);
59164 }
59165 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59166 index 0232ccb..13d9165 100644
59167 --- a/include/asm-generic/kmap_types.h
59168 +++ b/include/asm-generic/kmap_types.h
59169 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59170 KMAP_D(17) KM_NMI,
59171 KMAP_D(18) KM_NMI_PTE,
59172 KMAP_D(19) KM_KDB,
59173 +KMAP_D(20) KM_CLEARPAGE,
59174 /*
59175 * Remember to update debug_kmap_atomic() when adding new kmap types!
59176 */
59177 -KMAP_D(20) KM_TYPE_NR
59178 +KMAP_D(21) KM_TYPE_NR
59179 };
59180
59181 #undef KMAP_D
59182 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59183 index 9ceb03b..2efbcbd 100644
59184 --- a/include/asm-generic/local.h
59185 +++ b/include/asm-generic/local.h
59186 @@ -39,6 +39,7 @@ typedef struct
59187 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59188 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59189 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59190 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59191
59192 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59193 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59194 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59195 index 725612b..9cc513a 100644
59196 --- a/include/asm-generic/pgtable-nopmd.h
59197 +++ b/include/asm-generic/pgtable-nopmd.h
59198 @@ -1,14 +1,19 @@
59199 #ifndef _PGTABLE_NOPMD_H
59200 #define _PGTABLE_NOPMD_H
59201
59202 -#ifndef __ASSEMBLY__
59203 -
59204 #include <asm-generic/pgtable-nopud.h>
59205
59206 -struct mm_struct;
59207 -
59208 #define __PAGETABLE_PMD_FOLDED
59209
59210 +#define PMD_SHIFT PUD_SHIFT
59211 +#define PTRS_PER_PMD 1
59212 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59213 +#define PMD_MASK (~(PMD_SIZE-1))
59214 +
59215 +#ifndef __ASSEMBLY__
59216 +
59217 +struct mm_struct;
59218 +
59219 /*
59220 * Having the pmd type consist of a pud gets the size right, and allows
59221 * us to conceptually access the pud entry that this pmd is folded into
59222 @@ -16,11 +21,6 @@ struct mm_struct;
59223 */
59224 typedef struct { pud_t pud; } pmd_t;
59225
59226 -#define PMD_SHIFT PUD_SHIFT
59227 -#define PTRS_PER_PMD 1
59228 -#define PMD_SIZE (1UL << PMD_SHIFT)
59229 -#define PMD_MASK (~(PMD_SIZE-1))
59230 -
59231 /*
59232 * The "pud_xxx()" functions here are trivial for a folded two-level
59233 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59234 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59235 index 810431d..0ec4804f 100644
59236 --- a/include/asm-generic/pgtable-nopud.h
59237 +++ b/include/asm-generic/pgtable-nopud.h
59238 @@ -1,10 +1,15 @@
59239 #ifndef _PGTABLE_NOPUD_H
59240 #define _PGTABLE_NOPUD_H
59241
59242 -#ifndef __ASSEMBLY__
59243 -
59244 #define __PAGETABLE_PUD_FOLDED
59245
59246 +#define PUD_SHIFT PGDIR_SHIFT
59247 +#define PTRS_PER_PUD 1
59248 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59249 +#define PUD_MASK (~(PUD_SIZE-1))
59250 +
59251 +#ifndef __ASSEMBLY__
59252 +
59253 /*
59254 * Having the pud type consist of a pgd gets the size right, and allows
59255 * us to conceptually access the pgd entry that this pud is folded into
59256 @@ -12,11 +17,6 @@
59257 */
59258 typedef struct { pgd_t pgd; } pud_t;
59259
59260 -#define PUD_SHIFT PGDIR_SHIFT
59261 -#define PTRS_PER_PUD 1
59262 -#define PUD_SIZE (1UL << PUD_SHIFT)
59263 -#define PUD_MASK (~(PUD_SIZE-1))
59264 -
59265 /*
59266 * The "pgd_xxx()" functions here are trivial for a folded two-level
59267 * setup: the pud is never bad, and a pud always exists (as it's folded
59268 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59269 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59270
59271 #define pgd_populate(mm, pgd, pud) do { } while (0)
59272 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59273 /*
59274 * (puds are folded into pgds so this doesn't get actually called,
59275 * but the define is needed for a generic inline function.)
59276 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59277 index 125c54e..e95c18e 100644
59278 --- a/include/asm-generic/pgtable.h
59279 +++ b/include/asm-generic/pgtable.h
59280 @@ -446,6 +446,18 @@ static inline int pmd_write(pmd_t pmd)
59281 #endif /* __HAVE_ARCH_PMD_WRITE */
59282 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
59283
59284 +#ifndef __HAVE_ARCH_READ_PMD_ATOMIC
59285 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
59286 +{
59287 + /*
59288 + * Depend on compiler for an atomic pmd read. NOTE: this is
59289 + * only going to work, if the pmdval_t isn't larger than
59290 + * an unsigned long.
59291 + */
59292 + return *pmdp;
59293 +}
59294 +#endif /* __HAVE_ARCH_READ_PMD_ATOMIC */
59295 +
59296 /*
59297 * This function is meant to be used by sites walking pagetables with
59298 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
59299 @@ -459,11 +471,17 @@ static inline int pmd_write(pmd_t pmd)
59300 * undefined so behaving like if the pmd was none is safe (because it
59301 * can return none anyway). The compiler level barrier() is critically
59302 * important to compute the two checks atomically on the same pmdval.
59303 + *
59304 + * For 32bit kernels with a 64bit large pmd_t this automatically takes
59305 + * care of reading the pmd atomically to avoid SMP race conditions
59306 + * against pmd_populate() when the mmap_sem is hold for reading by the
59307 + * caller (a special atomic read not done by "gcc" as in the generic
59308 + * version above, is also needed when THP is disabled because the page
59309 + * fault can populate the pmd from under us).
59310 */
59311 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
59312 {
59313 - /* depend on compiler for an atomic pmd read */
59314 - pmd_t pmdval = *pmd;
59315 + pmd_t pmdval = read_pmd_atomic(pmd);
59316 /*
59317 * The barrier will stabilize the pmdval in a register or on
59318 * the stack so that it will stop changing under the code.
59319 @@ -503,6 +521,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59320 #endif
59321 }
59322
59323 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59324 +static inline unsigned long pax_open_kernel(void) { return 0; }
59325 +#endif
59326 +
59327 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59328 +static inline unsigned long pax_close_kernel(void) { return 0; }
59329 +#endif
59330 +
59331 #endif /* CONFIG_MMU */
59332
59333 #endif /* !__ASSEMBLY__ */
59334 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59335 index 8aeadf6..f1dc019 100644
59336 --- a/include/asm-generic/vmlinux.lds.h
59337 +++ b/include/asm-generic/vmlinux.lds.h
59338 @@ -218,6 +218,7 @@
59339 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59340 VMLINUX_SYMBOL(__start_rodata) = .; \
59341 *(.rodata) *(.rodata.*) \
59342 + *(.data..read_only) \
59343 *(__vermagic) /* Kernel version magic */ \
59344 . = ALIGN(8); \
59345 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59346 @@ -716,17 +717,18 @@
59347 * section in the linker script will go there too. @phdr should have
59348 * a leading colon.
59349 *
59350 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59351 + * Note that this macros defines per_cpu_load as an absolute symbol.
59352 * If there is no need to put the percpu section at a predetermined
59353 * address, use PERCPU_SECTION.
59354 */
59355 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59356 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59357 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59358 + per_cpu_load = .; \
59359 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59360 - LOAD_OFFSET) { \
59361 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59362 PERCPU_INPUT(cacheline) \
59363 } phdr \
59364 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59365 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59366
59367 /**
59368 * PERCPU_SECTION - define output section for percpu area, simple version
59369 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59370 index dd73104..fde86bd 100644
59371 --- a/include/drm/drmP.h
59372 +++ b/include/drm/drmP.h
59373 @@ -72,6 +72,7 @@
59374 #include <linux/workqueue.h>
59375 #include <linux/poll.h>
59376 #include <asm/pgalloc.h>
59377 +#include <asm/local.h>
59378 #include "drm.h"
59379
59380 #include <linux/idr.h>
59381 @@ -1074,7 +1075,7 @@ struct drm_device {
59382
59383 /** \name Usage Counters */
59384 /*@{ */
59385 - int open_count; /**< Outstanding files open */
59386 + local_t open_count; /**< Outstanding files open */
59387 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59388 atomic_t vma_count; /**< Outstanding vma areas open */
59389 int buf_use; /**< Buffers in use -- cannot alloc */
59390 @@ -1085,7 +1086,7 @@ struct drm_device {
59391 /*@{ */
59392 unsigned long counters;
59393 enum drm_stat_type types[15];
59394 - atomic_t counts[15];
59395 + atomic_unchecked_t counts[15];
59396 /*@} */
59397
59398 struct list_head filelist;
59399 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59400 index 37515d1..34fa8b0 100644
59401 --- a/include/drm/drm_crtc_helper.h
59402 +++ b/include/drm/drm_crtc_helper.h
59403 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59404
59405 /* disable crtc when not in use - more explicit than dpms off */
59406 void (*disable)(struct drm_crtc *crtc);
59407 -};
59408 +} __no_const;
59409
59410 struct drm_encoder_helper_funcs {
59411 void (*dpms)(struct drm_encoder *encoder, int mode);
59412 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59413 struct drm_connector *connector);
59414 /* disable encoder when not in use - more explicit than dpms off */
59415 void (*disable)(struct drm_encoder *encoder);
59416 -};
59417 +} __no_const;
59418
59419 struct drm_connector_helper_funcs {
59420 int (*get_modes)(struct drm_connector *connector);
59421 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59422 index d6d1da4..fdd1ac5 100644
59423 --- a/include/drm/ttm/ttm_memory.h
59424 +++ b/include/drm/ttm/ttm_memory.h
59425 @@ -48,7 +48,7 @@
59426
59427 struct ttm_mem_shrink {
59428 int (*do_shrink) (struct ttm_mem_shrink *);
59429 -};
59430 +} __no_const;
59431
59432 /**
59433 * struct ttm_mem_global - Global memory accounting structure.
59434 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59435 index e86dfca..40cc55f 100644
59436 --- a/include/linux/a.out.h
59437 +++ b/include/linux/a.out.h
59438 @@ -39,6 +39,14 @@ enum machine_type {
59439 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59440 };
59441
59442 +/* Constants for the N_FLAGS field */
59443 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59444 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59445 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59446 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59447 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59448 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59449 +
59450 #if !defined (N_MAGIC)
59451 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59452 #endif
59453 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59454 index 06fd4bb..1caec0d 100644
59455 --- a/include/linux/atmdev.h
59456 +++ b/include/linux/atmdev.h
59457 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59458 #endif
59459
59460 struct k_atm_aal_stats {
59461 -#define __HANDLE_ITEM(i) atomic_t i
59462 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
59463 __AAL_STAT_ITEMS
59464 #undef __HANDLE_ITEM
59465 };
59466 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59467 index 366422b..1fa7f84 100644
59468 --- a/include/linux/binfmts.h
59469 +++ b/include/linux/binfmts.h
59470 @@ -89,6 +89,7 @@ struct linux_binfmt {
59471 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59472 int (*load_shlib)(struct file *);
59473 int (*core_dump)(struct coredump_params *cprm);
59474 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59475 unsigned long min_coredump; /* minimal dump size */
59476 };
59477
59478 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59479 index 4d4ac24..2c3ccce 100644
59480 --- a/include/linux/blkdev.h
59481 +++ b/include/linux/blkdev.h
59482 @@ -1376,7 +1376,7 @@ struct block_device_operations {
59483 /* this callback is with swap_lock and sometimes page table lock held */
59484 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59485 struct module *owner;
59486 -};
59487 +} __do_const;
59488
59489 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59490 unsigned long);
59491 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59492 index 4d1a074..88f929a 100644
59493 --- a/include/linux/blktrace_api.h
59494 +++ b/include/linux/blktrace_api.h
59495 @@ -162,7 +162,7 @@ struct blk_trace {
59496 struct dentry *dir;
59497 struct dentry *dropped_file;
59498 struct dentry *msg_file;
59499 - atomic_t dropped;
59500 + atomic_unchecked_t dropped;
59501 };
59502
59503 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59504 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59505 index 83195fb..0b0f77d 100644
59506 --- a/include/linux/byteorder/little_endian.h
59507 +++ b/include/linux/byteorder/little_endian.h
59508 @@ -42,51 +42,51 @@
59509
59510 static inline __le64 __cpu_to_le64p(const __u64 *p)
59511 {
59512 - return (__force __le64)*p;
59513 + return (__force const __le64)*p;
59514 }
59515 static inline __u64 __le64_to_cpup(const __le64 *p)
59516 {
59517 - return (__force __u64)*p;
59518 + return (__force const __u64)*p;
59519 }
59520 static inline __le32 __cpu_to_le32p(const __u32 *p)
59521 {
59522 - return (__force __le32)*p;
59523 + return (__force const __le32)*p;
59524 }
59525 static inline __u32 __le32_to_cpup(const __le32 *p)
59526 {
59527 - return (__force __u32)*p;
59528 + return (__force const __u32)*p;
59529 }
59530 static inline __le16 __cpu_to_le16p(const __u16 *p)
59531 {
59532 - return (__force __le16)*p;
59533 + return (__force const __le16)*p;
59534 }
59535 static inline __u16 __le16_to_cpup(const __le16 *p)
59536 {
59537 - return (__force __u16)*p;
59538 + return (__force const __u16)*p;
59539 }
59540 static inline __be64 __cpu_to_be64p(const __u64 *p)
59541 {
59542 - return (__force __be64)__swab64p(p);
59543 + return (__force const __be64)__swab64p(p);
59544 }
59545 static inline __u64 __be64_to_cpup(const __be64 *p)
59546 {
59547 - return __swab64p((__u64 *)p);
59548 + return __swab64p((const __u64 *)p);
59549 }
59550 static inline __be32 __cpu_to_be32p(const __u32 *p)
59551 {
59552 - return (__force __be32)__swab32p(p);
59553 + return (__force const __be32)__swab32p(p);
59554 }
59555 static inline __u32 __be32_to_cpup(const __be32 *p)
59556 {
59557 - return __swab32p((__u32 *)p);
59558 + return __swab32p((const __u32 *)p);
59559 }
59560 static inline __be16 __cpu_to_be16p(const __u16 *p)
59561 {
59562 - return (__force __be16)__swab16p(p);
59563 + return (__force const __be16)__swab16p(p);
59564 }
59565 static inline __u16 __be16_to_cpup(const __be16 *p)
59566 {
59567 - return __swab16p((__u16 *)p);
59568 + return __swab16p((const __u16 *)p);
59569 }
59570 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59571 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59572 diff --git a/include/linux/cache.h b/include/linux/cache.h
59573 index 4c57065..4307975 100644
59574 --- a/include/linux/cache.h
59575 +++ b/include/linux/cache.h
59576 @@ -16,6 +16,10 @@
59577 #define __read_mostly
59578 #endif
59579
59580 +#ifndef __read_only
59581 +#define __read_only __read_mostly
59582 +#endif
59583 +
59584 #ifndef ____cacheline_aligned
59585 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59586 #endif
59587 diff --git a/include/linux/capability.h b/include/linux/capability.h
59588 index 12d52de..b5f7fa7 100644
59589 --- a/include/linux/capability.h
59590 +++ b/include/linux/capability.h
59591 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
59592 extern bool capable(int cap);
59593 extern bool ns_capable(struct user_namespace *ns, int cap);
59594 extern bool nsown_capable(int cap);
59595 +extern bool capable_nolog(int cap);
59596 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59597
59598 /* audit system wants to get cap info from files as well */
59599 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59600 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59601 index 42e55de..1cd0e66 100644
59602 --- a/include/linux/cleancache.h
59603 +++ b/include/linux/cleancache.h
59604 @@ -31,7 +31,7 @@ struct cleancache_ops {
59605 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
59606 void (*invalidate_inode)(int, struct cleancache_filekey);
59607 void (*invalidate_fs)(int);
59608 -};
59609 +} __no_const;
59610
59611 extern struct cleancache_ops
59612 cleancache_register_ops(struct cleancache_ops *ops);
59613 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59614 index 2f40791..a62d196 100644
59615 --- a/include/linux/compiler-gcc4.h
59616 +++ b/include/linux/compiler-gcc4.h
59617 @@ -32,6 +32,16 @@
59618 #define __linktime_error(message) __attribute__((__error__(message)))
59619
59620 #if __GNUC_MINOR__ >= 5
59621 +
59622 +#ifdef CONSTIFY_PLUGIN
59623 +#define __no_const __attribute__((no_const))
59624 +#define __do_const __attribute__((do_const))
59625 +#endif
59626 +
59627 +#ifdef SIZE_OVERFLOW_PLUGIN
59628 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
59629 +#endif
59630 +
59631 /*
59632 * Mark a position in code as unreachable. This can be used to
59633 * suppress control flow warnings after asm blocks that transfer
59634 @@ -47,6 +57,11 @@
59635 #define __noclone __attribute__((__noclone__))
59636
59637 #endif
59638 +
59639 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59640 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59641 +#define __bos0(ptr) __bos((ptr), 0)
59642 +#define __bos1(ptr) __bos((ptr), 1)
59643 #endif
59644
59645 #if __GNUC_MINOR__ > 0
59646 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59647 index 923d093..726c17f 100644
59648 --- a/include/linux/compiler.h
59649 +++ b/include/linux/compiler.h
59650 @@ -5,31 +5,62 @@
59651
59652 #ifdef __CHECKER__
59653 # define __user __attribute__((noderef, address_space(1)))
59654 +# define __force_user __force __user
59655 # define __kernel __attribute__((address_space(0)))
59656 +# define __force_kernel __force __kernel
59657 # define __safe __attribute__((safe))
59658 # define __force __attribute__((force))
59659 # define __nocast __attribute__((nocast))
59660 # define __iomem __attribute__((noderef, address_space(2)))
59661 +# define __force_iomem __force __iomem
59662 # define __acquires(x) __attribute__((context(x,0,1)))
59663 # define __releases(x) __attribute__((context(x,1,0)))
59664 # define __acquire(x) __context__(x,1)
59665 # define __release(x) __context__(x,-1)
59666 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59667 # define __percpu __attribute__((noderef, address_space(3)))
59668 +# define __force_percpu __force __percpu
59669 #ifdef CONFIG_SPARSE_RCU_POINTER
59670 # define __rcu __attribute__((noderef, address_space(4)))
59671 +# define __force_rcu __force __rcu
59672 #else
59673 # define __rcu
59674 +# define __force_rcu
59675 #endif
59676 extern void __chk_user_ptr(const volatile void __user *);
59677 extern void __chk_io_ptr(const volatile void __iomem *);
59678 +#elif defined(CHECKER_PLUGIN)
59679 +//# define __user
59680 +//# define __force_user
59681 +//# define __kernel
59682 +//# define __force_kernel
59683 +# define __safe
59684 +# define __force
59685 +# define __nocast
59686 +# define __iomem
59687 +# define __force_iomem
59688 +# define __chk_user_ptr(x) (void)0
59689 +# define __chk_io_ptr(x) (void)0
59690 +# define __builtin_warning(x, y...) (1)
59691 +# define __acquires(x)
59692 +# define __releases(x)
59693 +# define __acquire(x) (void)0
59694 +# define __release(x) (void)0
59695 +# define __cond_lock(x,c) (c)
59696 +# define __percpu
59697 +# define __force_percpu
59698 +# define __rcu
59699 +# define __force_rcu
59700 #else
59701 # define __user
59702 +# define __force_user
59703 # define __kernel
59704 +# define __force_kernel
59705 # define __safe
59706 # define __force
59707 # define __nocast
59708 # define __iomem
59709 +# define __force_iomem
59710 # define __chk_user_ptr(x) (void)0
59711 # define __chk_io_ptr(x) (void)0
59712 # define __builtin_warning(x, y...) (1)
59713 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59714 # define __release(x) (void)0
59715 # define __cond_lock(x,c) (c)
59716 # define __percpu
59717 +# define __force_percpu
59718 # define __rcu
59719 +# define __force_rcu
59720 #endif
59721
59722 #ifdef __KERNEL__
59723 @@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59724 # define __attribute_const__ /* unimplemented */
59725 #endif
59726
59727 +#ifndef __no_const
59728 +# define __no_const
59729 +#endif
59730 +
59731 +#ifndef __do_const
59732 +# define __do_const
59733 +#endif
59734 +
59735 +#ifndef __size_overflow
59736 +# define __size_overflow(...)
59737 +#endif
59738 +
59739 /*
59740 * Tell gcc if a function is cold. The compiler will assume any path
59741 * directly leading to the call is unlikely.
59742 @@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59743 #define __cold
59744 #endif
59745
59746 +#ifndef __alloc_size
59747 +#define __alloc_size(...)
59748 +#endif
59749 +
59750 +#ifndef __bos
59751 +#define __bos(ptr, arg)
59752 +#endif
59753 +
59754 +#ifndef __bos0
59755 +#define __bos0(ptr)
59756 +#endif
59757 +
59758 +#ifndef __bos1
59759 +#define __bos1(ptr)
59760 +#endif
59761 +
59762 /* Simple shorthand for a section definition */
59763 #ifndef __section
59764 # define __section(S) __attribute__ ((__section__(#S)))
59765 @@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59766 * use is to mediate communication between process-level code and irq/NMI
59767 * handlers, all running on the same CPU.
59768 */
59769 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59770 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59771 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59772
59773 #endif /* __LINUX_COMPILER_H */
59774 diff --git a/include/linux/cred.h b/include/linux/cred.h
59775 index adadf71..6af5560 100644
59776 --- a/include/linux/cred.h
59777 +++ b/include/linux/cred.h
59778 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59779 static inline void validate_process_creds(void)
59780 {
59781 }
59782 +static inline void validate_task_creds(struct task_struct *task)
59783 +{
59784 +}
59785 #endif
59786
59787 /**
59788 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59789 index b92eadf..b4ecdc1 100644
59790 --- a/include/linux/crypto.h
59791 +++ b/include/linux/crypto.h
59792 @@ -373,7 +373,7 @@ struct cipher_tfm {
59793 const u8 *key, unsigned int keylen);
59794 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59795 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59796 -};
59797 +} __no_const;
59798
59799 struct hash_tfm {
59800 int (*init)(struct hash_desc *desc);
59801 @@ -394,13 +394,13 @@ struct compress_tfm {
59802 int (*cot_decompress)(struct crypto_tfm *tfm,
59803 const u8 *src, unsigned int slen,
59804 u8 *dst, unsigned int *dlen);
59805 -};
59806 +} __no_const;
59807
59808 struct rng_tfm {
59809 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59810 unsigned int dlen);
59811 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59812 -};
59813 +} __no_const;
59814
59815 #define crt_ablkcipher crt_u.ablkcipher
59816 #define crt_aead crt_u.aead
59817 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59818 index 7925bf0..d5143d2 100644
59819 --- a/include/linux/decompress/mm.h
59820 +++ b/include/linux/decompress/mm.h
59821 @@ -77,7 +77,7 @@ static void free(void *where)
59822 * warnings when not needed (indeed large_malloc / large_free are not
59823 * needed by inflate */
59824
59825 -#define malloc(a) kmalloc(a, GFP_KERNEL)
59826 +#define malloc(a) kmalloc((a), GFP_KERNEL)
59827 #define free(a) kfree(a)
59828
59829 #define large_malloc(a) vmalloc(a)
59830 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59831 index dfc099e..e583e66 100644
59832 --- a/include/linux/dma-mapping.h
59833 +++ b/include/linux/dma-mapping.h
59834 @@ -51,7 +51,7 @@ struct dma_map_ops {
59835 u64 (*get_required_mask)(struct device *dev);
59836 #endif
59837 int is_phys;
59838 -};
59839 +} __do_const;
59840
59841 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59842
59843 diff --git a/include/linux/efi.h b/include/linux/efi.h
59844 index ec45ccd..9923c32 100644
59845 --- a/include/linux/efi.h
59846 +++ b/include/linux/efi.h
59847 @@ -635,7 +635,7 @@ struct efivar_operations {
59848 efi_get_variable_t *get_variable;
59849 efi_get_next_variable_t *get_next_variable;
59850 efi_set_variable_t *set_variable;
59851 -};
59852 +} __no_const;
59853
59854 struct efivars {
59855 /*
59856 diff --git a/include/linux/elf.h b/include/linux/elf.h
59857 index 999b4f5..57753b4 100644
59858 --- a/include/linux/elf.h
59859 +++ b/include/linux/elf.h
59860 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
59861 #define PT_GNU_EH_FRAME 0x6474e550
59862
59863 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
59864 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
59865 +
59866 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
59867 +
59868 +/* Constants for the e_flags field */
59869 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59870 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
59871 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
59872 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
59873 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59874 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59875
59876 /*
59877 * Extended Numbering
59878 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
59879 #define DT_DEBUG 21
59880 #define DT_TEXTREL 22
59881 #define DT_JMPREL 23
59882 +#define DT_FLAGS 30
59883 + #define DF_TEXTREL 0x00000004
59884 #define DT_ENCODING 32
59885 #define OLD_DT_LOOS 0x60000000
59886 #define DT_LOOS 0x6000000d
59887 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
59888 #define PF_W 0x2
59889 #define PF_X 0x1
59890
59891 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
59892 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
59893 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
59894 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
59895 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
59896 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
59897 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
59898 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
59899 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
59900 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
59901 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
59902 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
59903 +
59904 typedef struct elf32_phdr{
59905 Elf32_Word p_type;
59906 Elf32_Off p_offset;
59907 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
59908 #define EI_OSABI 7
59909 #define EI_PAD 8
59910
59911 +#define EI_PAX 14
59912 +
59913 #define ELFMAG0 0x7f /* EI_MAG */
59914 #define ELFMAG1 'E'
59915 #define ELFMAG2 'L'
59916 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
59917 #define elf_note elf32_note
59918 #define elf_addr_t Elf32_Off
59919 #define Elf_Half Elf32_Half
59920 +#define elf_dyn Elf32_Dyn
59921
59922 #else
59923
59924 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
59925 #define elf_note elf64_note
59926 #define elf_addr_t Elf64_Off
59927 #define Elf_Half Elf64_Half
59928 +#define elf_dyn Elf64_Dyn
59929
59930 #endif
59931
59932 diff --git a/include/linux/filter.h b/include/linux/filter.h
59933 index 8eeb205..d59bfa2 100644
59934 --- a/include/linux/filter.h
59935 +++ b/include/linux/filter.h
59936 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
59937
59938 struct sk_buff;
59939 struct sock;
59940 +struct bpf_jit_work;
59941
59942 struct sk_filter
59943 {
59944 @@ -141,6 +142,9 @@ struct sk_filter
59945 unsigned int len; /* Number of filter blocks */
59946 unsigned int (*bpf_func)(const struct sk_buff *skb,
59947 const struct sock_filter *filter);
59948 +#ifdef CONFIG_BPF_JIT
59949 + struct bpf_jit_work *work;
59950 +#endif
59951 struct rcu_head rcu;
59952 struct sock_filter insns[0];
59953 };
59954 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
59955 index cdc9b71..ce69fb5 100644
59956 --- a/include/linux/firewire.h
59957 +++ b/include/linux/firewire.h
59958 @@ -413,7 +413,7 @@ struct fw_iso_context {
59959 union {
59960 fw_iso_callback_t sc;
59961 fw_iso_mc_callback_t mc;
59962 - } callback;
59963 + } __no_const callback;
59964 void *callback_data;
59965 };
59966
59967 diff --git a/include/linux/fs.h b/include/linux/fs.h
59968 index 25c40b9..1bfd4f4 100644
59969 --- a/include/linux/fs.h
59970 +++ b/include/linux/fs.h
59971 @@ -1634,7 +1634,8 @@ struct file_operations {
59972 int (*setlease)(struct file *, long, struct file_lock **);
59973 long (*fallocate)(struct file *file, int mode, loff_t offset,
59974 loff_t len);
59975 -};
59976 +} __do_const;
59977 +typedef struct file_operations __no_const file_operations_no_const;
59978
59979 struct inode_operations {
59980 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
59981 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
59982 index 003dc0f..3c4ea97 100644
59983 --- a/include/linux/fs_struct.h
59984 +++ b/include/linux/fs_struct.h
59985 @@ -6,7 +6,7 @@
59986 #include <linux/seqlock.h>
59987
59988 struct fs_struct {
59989 - int users;
59990 + atomic_t users;
59991 spinlock_t lock;
59992 seqcount_t seq;
59993 int umask;
59994 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
59995 index ce31408..b1ad003 100644
59996 --- a/include/linux/fscache-cache.h
59997 +++ b/include/linux/fscache-cache.h
59998 @@ -102,7 +102,7 @@ struct fscache_operation {
59999 fscache_operation_release_t release;
60000 };
60001
60002 -extern atomic_t fscache_op_debug_id;
60003 +extern atomic_unchecked_t fscache_op_debug_id;
60004 extern void fscache_op_work_func(struct work_struct *work);
60005
60006 extern void fscache_enqueue_operation(struct fscache_operation *);
60007 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60008 {
60009 INIT_WORK(&op->work, fscache_op_work_func);
60010 atomic_set(&op->usage, 1);
60011 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60012 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60013 op->processor = processor;
60014 op->release = release;
60015 INIT_LIST_HEAD(&op->pend_link);
60016 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60017 index a6dfe69..569586df 100644
60018 --- a/include/linux/fsnotify.h
60019 +++ b/include/linux/fsnotify.h
60020 @@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60021 */
60022 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60023 {
60024 - return kstrdup(name, GFP_KERNEL);
60025 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60026 }
60027
60028 /*
60029 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60030 index 91d0e0a3..035666b 100644
60031 --- a/include/linux/fsnotify_backend.h
60032 +++ b/include/linux/fsnotify_backend.h
60033 @@ -105,6 +105,7 @@ struct fsnotify_ops {
60034 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60035 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60036 };
60037 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60038
60039 /*
60040 * A group is a "thing" that wants to receive notification about filesystem
60041 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60042 index 176a939..1462211 100644
60043 --- a/include/linux/ftrace_event.h
60044 +++ b/include/linux/ftrace_event.h
60045 @@ -97,7 +97,7 @@ struct trace_event_functions {
60046 trace_print_func raw;
60047 trace_print_func hex;
60048 trace_print_func binary;
60049 -};
60050 +} __no_const;
60051
60052 struct trace_event {
60053 struct hlist_node node;
60054 @@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60055 extern int trace_add_event_call(struct ftrace_event_call *call);
60056 extern void trace_remove_event_call(struct ftrace_event_call *call);
60057
60058 -#define is_signed_type(type) (((type)(-1)) < 0)
60059 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60060
60061 int trace_set_clr_event(const char *system, const char *event, int set);
60062
60063 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60064 index 017a7fb..33a8507 100644
60065 --- a/include/linux/genhd.h
60066 +++ b/include/linux/genhd.h
60067 @@ -185,7 +185,7 @@ struct gendisk {
60068 struct kobject *slave_dir;
60069
60070 struct timer_rand_state *random;
60071 - atomic_t sync_io; /* RAID */
60072 + atomic_unchecked_t sync_io; /* RAID */
60073 struct disk_events *ev;
60074 #ifdef CONFIG_BLK_DEV_INTEGRITY
60075 struct blk_integrity *integrity;
60076 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60077 new file mode 100644
60078 index 0000000..c938b1f
60079 --- /dev/null
60080 +++ b/include/linux/gracl.h
60081 @@ -0,0 +1,319 @@
60082 +#ifndef GR_ACL_H
60083 +#define GR_ACL_H
60084 +
60085 +#include <linux/grdefs.h>
60086 +#include <linux/resource.h>
60087 +#include <linux/capability.h>
60088 +#include <linux/dcache.h>
60089 +#include <asm/resource.h>
60090 +
60091 +/* Major status information */
60092 +
60093 +#define GR_VERSION "grsecurity 2.9.1"
60094 +#define GRSECURITY_VERSION 0x2901
60095 +
60096 +enum {
60097 + GR_SHUTDOWN = 0,
60098 + GR_ENABLE = 1,
60099 + GR_SPROLE = 2,
60100 + GR_RELOAD = 3,
60101 + GR_SEGVMOD = 4,
60102 + GR_STATUS = 5,
60103 + GR_UNSPROLE = 6,
60104 + GR_PASSSET = 7,
60105 + GR_SPROLEPAM = 8,
60106 +};
60107 +
60108 +/* Password setup definitions
60109 + * kernel/grhash.c */
60110 +enum {
60111 + GR_PW_LEN = 128,
60112 + GR_SALT_LEN = 16,
60113 + GR_SHA_LEN = 32,
60114 +};
60115 +
60116 +enum {
60117 + GR_SPROLE_LEN = 64,
60118 +};
60119 +
60120 +enum {
60121 + GR_NO_GLOB = 0,
60122 + GR_REG_GLOB,
60123 + GR_CREATE_GLOB
60124 +};
60125 +
60126 +#define GR_NLIMITS 32
60127 +
60128 +/* Begin Data Structures */
60129 +
60130 +struct sprole_pw {
60131 + unsigned char *rolename;
60132 + unsigned char salt[GR_SALT_LEN];
60133 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60134 +};
60135 +
60136 +struct name_entry {
60137 + __u32 key;
60138 + ino_t inode;
60139 + dev_t device;
60140 + char *name;
60141 + __u16 len;
60142 + __u8 deleted;
60143 + struct name_entry *prev;
60144 + struct name_entry *next;
60145 +};
60146 +
60147 +struct inodev_entry {
60148 + struct name_entry *nentry;
60149 + struct inodev_entry *prev;
60150 + struct inodev_entry *next;
60151 +};
60152 +
60153 +struct acl_role_db {
60154 + struct acl_role_label **r_hash;
60155 + __u32 r_size;
60156 +};
60157 +
60158 +struct inodev_db {
60159 + struct inodev_entry **i_hash;
60160 + __u32 i_size;
60161 +};
60162 +
60163 +struct name_db {
60164 + struct name_entry **n_hash;
60165 + __u32 n_size;
60166 +};
60167 +
60168 +struct crash_uid {
60169 + uid_t uid;
60170 + unsigned long expires;
60171 +};
60172 +
60173 +struct gr_hash_struct {
60174 + void **table;
60175 + void **nametable;
60176 + void *first;
60177 + __u32 table_size;
60178 + __u32 used_size;
60179 + int type;
60180 +};
60181 +
60182 +/* Userspace Grsecurity ACL data structures */
60183 +
60184 +struct acl_subject_label {
60185 + char *filename;
60186 + ino_t inode;
60187 + dev_t device;
60188 + __u32 mode;
60189 + kernel_cap_t cap_mask;
60190 + kernel_cap_t cap_lower;
60191 + kernel_cap_t cap_invert_audit;
60192 +
60193 + struct rlimit res[GR_NLIMITS];
60194 + __u32 resmask;
60195 +
60196 + __u8 user_trans_type;
60197 + __u8 group_trans_type;
60198 + uid_t *user_transitions;
60199 + gid_t *group_transitions;
60200 + __u16 user_trans_num;
60201 + __u16 group_trans_num;
60202 +
60203 + __u32 sock_families[2];
60204 + __u32 ip_proto[8];
60205 + __u32 ip_type;
60206 + struct acl_ip_label **ips;
60207 + __u32 ip_num;
60208 + __u32 inaddr_any_override;
60209 +
60210 + __u32 crashes;
60211 + unsigned long expires;
60212 +
60213 + struct acl_subject_label *parent_subject;
60214 + struct gr_hash_struct *hash;
60215 + struct acl_subject_label *prev;
60216 + struct acl_subject_label *next;
60217 +
60218 + struct acl_object_label **obj_hash;
60219 + __u32 obj_hash_size;
60220 + __u16 pax_flags;
60221 +};
60222 +
60223 +struct role_allowed_ip {
60224 + __u32 addr;
60225 + __u32 netmask;
60226 +
60227 + struct role_allowed_ip *prev;
60228 + struct role_allowed_ip *next;
60229 +};
60230 +
60231 +struct role_transition {
60232 + char *rolename;
60233 +
60234 + struct role_transition *prev;
60235 + struct role_transition *next;
60236 +};
60237 +
60238 +struct acl_role_label {
60239 + char *rolename;
60240 + uid_t uidgid;
60241 + __u16 roletype;
60242 +
60243 + __u16 auth_attempts;
60244 + unsigned long expires;
60245 +
60246 + struct acl_subject_label *root_label;
60247 + struct gr_hash_struct *hash;
60248 +
60249 + struct acl_role_label *prev;
60250 + struct acl_role_label *next;
60251 +
60252 + struct role_transition *transitions;
60253 + struct role_allowed_ip *allowed_ips;
60254 + uid_t *domain_children;
60255 + __u16 domain_child_num;
60256 +
60257 + umode_t umask;
60258 +
60259 + struct acl_subject_label **subj_hash;
60260 + __u32 subj_hash_size;
60261 +};
60262 +
60263 +struct user_acl_role_db {
60264 + struct acl_role_label **r_table;
60265 + __u32 num_pointers; /* Number of allocations to track */
60266 + __u32 num_roles; /* Number of roles */
60267 + __u32 num_domain_children; /* Number of domain children */
60268 + __u32 num_subjects; /* Number of subjects */
60269 + __u32 num_objects; /* Number of objects */
60270 +};
60271 +
60272 +struct acl_object_label {
60273 + char *filename;
60274 + ino_t inode;
60275 + dev_t device;
60276 + __u32 mode;
60277 +
60278 + struct acl_subject_label *nested;
60279 + struct acl_object_label *globbed;
60280 +
60281 + /* next two structures not used */
60282 +
60283 + struct acl_object_label *prev;
60284 + struct acl_object_label *next;
60285 +};
60286 +
60287 +struct acl_ip_label {
60288 + char *iface;
60289 + __u32 addr;
60290 + __u32 netmask;
60291 + __u16 low, high;
60292 + __u8 mode;
60293 + __u32 type;
60294 + __u32 proto[8];
60295 +
60296 + /* next two structures not used */
60297 +
60298 + struct acl_ip_label *prev;
60299 + struct acl_ip_label *next;
60300 +};
60301 +
60302 +struct gr_arg {
60303 + struct user_acl_role_db role_db;
60304 + unsigned char pw[GR_PW_LEN];
60305 + unsigned char salt[GR_SALT_LEN];
60306 + unsigned char sum[GR_SHA_LEN];
60307 + unsigned char sp_role[GR_SPROLE_LEN];
60308 + struct sprole_pw *sprole_pws;
60309 + dev_t segv_device;
60310 + ino_t segv_inode;
60311 + uid_t segv_uid;
60312 + __u16 num_sprole_pws;
60313 + __u16 mode;
60314 +};
60315 +
60316 +struct gr_arg_wrapper {
60317 + struct gr_arg *arg;
60318 + __u32 version;
60319 + __u32 size;
60320 +};
60321 +
60322 +struct subject_map {
60323 + struct acl_subject_label *user;
60324 + struct acl_subject_label *kernel;
60325 + struct subject_map *prev;
60326 + struct subject_map *next;
60327 +};
60328 +
60329 +struct acl_subj_map_db {
60330 + struct subject_map **s_hash;
60331 + __u32 s_size;
60332 +};
60333 +
60334 +/* End Data Structures Section */
60335 +
60336 +/* Hash functions generated by empirical testing by Brad Spengler
60337 + Makes good use of the low bits of the inode. Generally 0-1 times
60338 + in loop for successful match. 0-3 for unsuccessful match.
60339 + Shift/add algorithm with modulus of table size and an XOR*/
60340 +
60341 +static __inline__ unsigned int
60342 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60343 +{
60344 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60345 +}
60346 +
60347 + static __inline__ unsigned int
60348 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60349 +{
60350 + return ((const unsigned long)userp % sz);
60351 +}
60352 +
60353 +static __inline__ unsigned int
60354 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60355 +{
60356 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60357 +}
60358 +
60359 +static __inline__ unsigned int
60360 +nhash(const char *name, const __u16 len, const unsigned int sz)
60361 +{
60362 + return full_name_hash((const unsigned char *)name, len) % sz;
60363 +}
60364 +
60365 +#define FOR_EACH_ROLE_START(role) \
60366 + role = role_list; \
60367 + while (role) {
60368 +
60369 +#define FOR_EACH_ROLE_END(role) \
60370 + role = role->prev; \
60371 + }
60372 +
60373 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60374 + subj = NULL; \
60375 + iter = 0; \
60376 + while (iter < role->subj_hash_size) { \
60377 + if (subj == NULL) \
60378 + subj = role->subj_hash[iter]; \
60379 + if (subj == NULL) { \
60380 + iter++; \
60381 + continue; \
60382 + }
60383 +
60384 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60385 + subj = subj->next; \
60386 + if (subj == NULL) \
60387 + iter++; \
60388 + }
60389 +
60390 +
60391 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60392 + subj = role->hash->first; \
60393 + while (subj != NULL) {
60394 +
60395 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60396 + subj = subj->next; \
60397 + }
60398 +
60399 +#endif
60400 +
60401 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60402 new file mode 100644
60403 index 0000000..323ecf2
60404 --- /dev/null
60405 +++ b/include/linux/gralloc.h
60406 @@ -0,0 +1,9 @@
60407 +#ifndef __GRALLOC_H
60408 +#define __GRALLOC_H
60409 +
60410 +void acl_free_all(void);
60411 +int acl_alloc_stack_init(unsigned long size);
60412 +void *acl_alloc(unsigned long len);
60413 +void *acl_alloc_num(unsigned long num, unsigned long len);
60414 +
60415 +#endif
60416 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60417 new file mode 100644
60418 index 0000000..b30e9bc
60419 --- /dev/null
60420 +++ b/include/linux/grdefs.h
60421 @@ -0,0 +1,140 @@
60422 +#ifndef GRDEFS_H
60423 +#define GRDEFS_H
60424 +
60425 +/* Begin grsecurity status declarations */
60426 +
60427 +enum {
60428 + GR_READY = 0x01,
60429 + GR_STATUS_INIT = 0x00 // disabled state
60430 +};
60431 +
60432 +/* Begin ACL declarations */
60433 +
60434 +/* Role flags */
60435 +
60436 +enum {
60437 + GR_ROLE_USER = 0x0001,
60438 + GR_ROLE_GROUP = 0x0002,
60439 + GR_ROLE_DEFAULT = 0x0004,
60440 + GR_ROLE_SPECIAL = 0x0008,
60441 + GR_ROLE_AUTH = 0x0010,
60442 + GR_ROLE_NOPW = 0x0020,
60443 + GR_ROLE_GOD = 0x0040,
60444 + GR_ROLE_LEARN = 0x0080,
60445 + GR_ROLE_TPE = 0x0100,
60446 + GR_ROLE_DOMAIN = 0x0200,
60447 + GR_ROLE_PAM = 0x0400,
60448 + GR_ROLE_PERSIST = 0x0800
60449 +};
60450 +
60451 +/* ACL Subject and Object mode flags */
60452 +enum {
60453 + GR_DELETED = 0x80000000
60454 +};
60455 +
60456 +/* ACL Object-only mode flags */
60457 +enum {
60458 + GR_READ = 0x00000001,
60459 + GR_APPEND = 0x00000002,
60460 + GR_WRITE = 0x00000004,
60461 + GR_EXEC = 0x00000008,
60462 + GR_FIND = 0x00000010,
60463 + GR_INHERIT = 0x00000020,
60464 + GR_SETID = 0x00000040,
60465 + GR_CREATE = 0x00000080,
60466 + GR_DELETE = 0x00000100,
60467 + GR_LINK = 0x00000200,
60468 + GR_AUDIT_READ = 0x00000400,
60469 + GR_AUDIT_APPEND = 0x00000800,
60470 + GR_AUDIT_WRITE = 0x00001000,
60471 + GR_AUDIT_EXEC = 0x00002000,
60472 + GR_AUDIT_FIND = 0x00004000,
60473 + GR_AUDIT_INHERIT= 0x00008000,
60474 + GR_AUDIT_SETID = 0x00010000,
60475 + GR_AUDIT_CREATE = 0x00020000,
60476 + GR_AUDIT_DELETE = 0x00040000,
60477 + GR_AUDIT_LINK = 0x00080000,
60478 + GR_PTRACERD = 0x00100000,
60479 + GR_NOPTRACE = 0x00200000,
60480 + GR_SUPPRESS = 0x00400000,
60481 + GR_NOLEARN = 0x00800000,
60482 + GR_INIT_TRANSFER= 0x01000000
60483 +};
60484 +
60485 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60486 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60487 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60488 +
60489 +/* ACL subject-only mode flags */
60490 +enum {
60491 + GR_KILL = 0x00000001,
60492 + GR_VIEW = 0x00000002,
60493 + GR_PROTECTED = 0x00000004,
60494 + GR_LEARN = 0x00000008,
60495 + GR_OVERRIDE = 0x00000010,
60496 + /* just a placeholder, this mode is only used in userspace */
60497 + GR_DUMMY = 0x00000020,
60498 + GR_PROTSHM = 0x00000040,
60499 + GR_KILLPROC = 0x00000080,
60500 + GR_KILLIPPROC = 0x00000100,
60501 + /* just a placeholder, this mode is only used in userspace */
60502 + GR_NOTROJAN = 0x00000200,
60503 + GR_PROTPROCFD = 0x00000400,
60504 + GR_PROCACCT = 0x00000800,
60505 + GR_RELAXPTRACE = 0x00001000,
60506 + GR_NESTED = 0x00002000,
60507 + GR_INHERITLEARN = 0x00004000,
60508 + GR_PROCFIND = 0x00008000,
60509 + GR_POVERRIDE = 0x00010000,
60510 + GR_KERNELAUTH = 0x00020000,
60511 + GR_ATSECURE = 0x00040000,
60512 + GR_SHMEXEC = 0x00080000
60513 +};
60514 +
60515 +enum {
60516 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60517 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60518 + GR_PAX_ENABLE_MPROTECT = 0x0004,
60519 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
60520 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60521 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60522 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60523 + GR_PAX_DISABLE_MPROTECT = 0x0400,
60524 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
60525 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60526 +};
60527 +
60528 +enum {
60529 + GR_ID_USER = 0x01,
60530 + GR_ID_GROUP = 0x02,
60531 +};
60532 +
60533 +enum {
60534 + GR_ID_ALLOW = 0x01,
60535 + GR_ID_DENY = 0x02,
60536 +};
60537 +
60538 +#define GR_CRASH_RES 31
60539 +#define GR_UIDTABLE_MAX 500
60540 +
60541 +/* begin resource learning section */
60542 +enum {
60543 + GR_RLIM_CPU_BUMP = 60,
60544 + GR_RLIM_FSIZE_BUMP = 50000,
60545 + GR_RLIM_DATA_BUMP = 10000,
60546 + GR_RLIM_STACK_BUMP = 1000,
60547 + GR_RLIM_CORE_BUMP = 10000,
60548 + GR_RLIM_RSS_BUMP = 500000,
60549 + GR_RLIM_NPROC_BUMP = 1,
60550 + GR_RLIM_NOFILE_BUMP = 5,
60551 + GR_RLIM_MEMLOCK_BUMP = 50000,
60552 + GR_RLIM_AS_BUMP = 500000,
60553 + GR_RLIM_LOCKS_BUMP = 2,
60554 + GR_RLIM_SIGPENDING_BUMP = 5,
60555 + GR_RLIM_MSGQUEUE_BUMP = 10000,
60556 + GR_RLIM_NICE_BUMP = 1,
60557 + GR_RLIM_RTPRIO_BUMP = 1,
60558 + GR_RLIM_RTTIME_BUMP = 1000000
60559 +};
60560 +
60561 +#endif
60562 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60563 new file mode 100644
60564 index 0000000..da390f1
60565 --- /dev/null
60566 +++ b/include/linux/grinternal.h
60567 @@ -0,0 +1,221 @@
60568 +#ifndef __GRINTERNAL_H
60569 +#define __GRINTERNAL_H
60570 +
60571 +#ifdef CONFIG_GRKERNSEC
60572 +
60573 +#include <linux/fs.h>
60574 +#include <linux/mnt_namespace.h>
60575 +#include <linux/nsproxy.h>
60576 +#include <linux/gracl.h>
60577 +#include <linux/grdefs.h>
60578 +#include <linux/grmsg.h>
60579 +
60580 +void gr_add_learn_entry(const char *fmt, ...)
60581 + __attribute__ ((format (printf, 1, 2)));
60582 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60583 + const struct vfsmount *mnt);
60584 +__u32 gr_check_create(const struct dentry *new_dentry,
60585 + const struct dentry *parent,
60586 + const struct vfsmount *mnt, const __u32 mode);
60587 +int gr_check_protected_task(const struct task_struct *task);
60588 +__u32 to_gr_audit(const __u32 reqmode);
60589 +int gr_set_acls(const int type);
60590 +int gr_apply_subject_to_task(struct task_struct *task);
60591 +int gr_acl_is_enabled(void);
60592 +char gr_roletype_to_char(void);
60593 +
60594 +void gr_handle_alertkill(struct task_struct *task);
60595 +char *gr_to_filename(const struct dentry *dentry,
60596 + const struct vfsmount *mnt);
60597 +char *gr_to_filename1(const struct dentry *dentry,
60598 + const struct vfsmount *mnt);
60599 +char *gr_to_filename2(const struct dentry *dentry,
60600 + const struct vfsmount *mnt);
60601 +char *gr_to_filename3(const struct dentry *dentry,
60602 + const struct vfsmount *mnt);
60603 +
60604 +extern int grsec_enable_ptrace_readexec;
60605 +extern int grsec_enable_harden_ptrace;
60606 +extern int grsec_enable_link;
60607 +extern int grsec_enable_fifo;
60608 +extern int grsec_enable_execve;
60609 +extern int grsec_enable_shm;
60610 +extern int grsec_enable_execlog;
60611 +extern int grsec_enable_signal;
60612 +extern int grsec_enable_audit_ptrace;
60613 +extern int grsec_enable_forkfail;
60614 +extern int grsec_enable_time;
60615 +extern int grsec_enable_rofs;
60616 +extern int grsec_enable_chroot_shmat;
60617 +extern int grsec_enable_chroot_mount;
60618 +extern int grsec_enable_chroot_double;
60619 +extern int grsec_enable_chroot_pivot;
60620 +extern int grsec_enable_chroot_chdir;
60621 +extern int grsec_enable_chroot_chmod;
60622 +extern int grsec_enable_chroot_mknod;
60623 +extern int grsec_enable_chroot_fchdir;
60624 +extern int grsec_enable_chroot_nice;
60625 +extern int grsec_enable_chroot_execlog;
60626 +extern int grsec_enable_chroot_caps;
60627 +extern int grsec_enable_chroot_sysctl;
60628 +extern int grsec_enable_chroot_unix;
60629 +extern int grsec_enable_tpe;
60630 +extern int grsec_tpe_gid;
60631 +extern int grsec_enable_tpe_all;
60632 +extern int grsec_enable_tpe_invert;
60633 +extern int grsec_enable_socket_all;
60634 +extern int grsec_socket_all_gid;
60635 +extern int grsec_enable_socket_client;
60636 +extern int grsec_socket_client_gid;
60637 +extern int grsec_enable_socket_server;
60638 +extern int grsec_socket_server_gid;
60639 +extern int grsec_audit_gid;
60640 +extern int grsec_enable_group;
60641 +extern int grsec_enable_audit_textrel;
60642 +extern int grsec_enable_log_rwxmaps;
60643 +extern int grsec_enable_mount;
60644 +extern int grsec_enable_chdir;
60645 +extern int grsec_resource_logging;
60646 +extern int grsec_enable_blackhole;
60647 +extern int grsec_lastack_retries;
60648 +extern int grsec_enable_brute;
60649 +extern int grsec_lock;
60650 +
60651 +extern spinlock_t grsec_alert_lock;
60652 +extern unsigned long grsec_alert_wtime;
60653 +extern unsigned long grsec_alert_fyet;
60654 +
60655 +extern spinlock_t grsec_audit_lock;
60656 +
60657 +extern rwlock_t grsec_exec_file_lock;
60658 +
60659 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60660 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60661 + (tsk)->exec_file->f_vfsmnt) : "/")
60662 +
60663 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60664 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60665 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60666 +
60667 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60668 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
60669 + (tsk)->exec_file->f_vfsmnt) : "/")
60670 +
60671 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60672 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60673 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60674 +
60675 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60676 +
60677 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60678 +
60679 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60680 + (task)->pid, (cred)->uid, \
60681 + (cred)->euid, (cred)->gid, (cred)->egid, \
60682 + gr_parent_task_fullpath(task), \
60683 + (task)->real_parent->comm, (task)->real_parent->pid, \
60684 + (pcred)->uid, (pcred)->euid, \
60685 + (pcred)->gid, (pcred)->egid
60686 +
60687 +#define GR_CHROOT_CAPS {{ \
60688 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60689 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60690 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60691 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60692 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60693 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60694 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60695 +
60696 +#define security_learn(normal_msg,args...) \
60697 +({ \
60698 + read_lock(&grsec_exec_file_lock); \
60699 + gr_add_learn_entry(normal_msg "\n", ## args); \
60700 + read_unlock(&grsec_exec_file_lock); \
60701 +})
60702 +
60703 +enum {
60704 + GR_DO_AUDIT,
60705 + GR_DONT_AUDIT,
60706 + /* used for non-audit messages that we shouldn't kill the task on */
60707 + GR_DONT_AUDIT_GOOD
60708 +};
60709 +
60710 +enum {
60711 + GR_TTYSNIFF,
60712 + GR_RBAC,
60713 + GR_RBAC_STR,
60714 + GR_STR_RBAC,
60715 + GR_RBAC_MODE2,
60716 + GR_RBAC_MODE3,
60717 + GR_FILENAME,
60718 + GR_SYSCTL_HIDDEN,
60719 + GR_NOARGS,
60720 + GR_ONE_INT,
60721 + GR_ONE_INT_TWO_STR,
60722 + GR_ONE_STR,
60723 + GR_STR_INT,
60724 + GR_TWO_STR_INT,
60725 + GR_TWO_INT,
60726 + GR_TWO_U64,
60727 + GR_THREE_INT,
60728 + GR_FIVE_INT_TWO_STR,
60729 + GR_TWO_STR,
60730 + GR_THREE_STR,
60731 + GR_FOUR_STR,
60732 + GR_STR_FILENAME,
60733 + GR_FILENAME_STR,
60734 + GR_FILENAME_TWO_INT,
60735 + GR_FILENAME_TWO_INT_STR,
60736 + GR_TEXTREL,
60737 + GR_PTRACE,
60738 + GR_RESOURCE,
60739 + GR_CAP,
60740 + GR_SIG,
60741 + GR_SIG2,
60742 + GR_CRASH1,
60743 + GR_CRASH2,
60744 + GR_PSACCT,
60745 + GR_RWXMAP
60746 +};
60747 +
60748 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60749 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60750 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60751 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60752 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60753 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60754 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60755 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60756 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60757 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60758 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60759 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60760 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60761 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60762 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60763 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60764 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60765 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60766 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60767 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60768 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60769 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60770 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60771 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60772 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60773 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60774 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60775 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60776 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60777 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60778 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60779 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60780 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60781 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60782 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60783 +
60784 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60785 +
60786 +#endif
60787 +
60788 +#endif
60789 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60790 new file mode 100644
60791 index 0000000..ae576a1
60792 --- /dev/null
60793 +++ b/include/linux/grmsg.h
60794 @@ -0,0 +1,109 @@
60795 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60796 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60797 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60798 +#define GR_STOPMOD_MSG "denied modification of module state by "
60799 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60800 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60801 +#define GR_IOPERM_MSG "denied use of ioperm() by "
60802 +#define GR_IOPL_MSG "denied use of iopl() by "
60803 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60804 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60805 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60806 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60807 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60808 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60809 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60810 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60811 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60812 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60813 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60814 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60815 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60816 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60817 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60818 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60819 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60820 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60821 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60822 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60823 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60824 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60825 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60826 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60827 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60828 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60829 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60830 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60831 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60832 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60833 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60834 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60835 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60836 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60837 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60838 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60839 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60840 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60841 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60842 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60843 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60844 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60845 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60846 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60847 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60848 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60849 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60850 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60851 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
60852 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
60853 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
60854 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
60855 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
60856 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
60857 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
60858 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
60859 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
60860 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
60861 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
60862 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
60863 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
60864 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
60865 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
60866 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
60867 +#define GR_NICE_CHROOT_MSG "denied priority change by "
60868 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
60869 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
60870 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
60871 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
60872 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
60873 +#define GR_TIME_MSG "time set by "
60874 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
60875 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
60876 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
60877 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
60878 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
60879 +#define GR_BIND_MSG "denied bind() by "
60880 +#define GR_CONNECT_MSG "denied connect() by "
60881 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
60882 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
60883 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
60884 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
60885 +#define GR_CAP_ACL_MSG "use of %s denied for "
60886 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
60887 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
60888 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
60889 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
60890 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
60891 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
60892 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
60893 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
60894 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
60895 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
60896 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
60897 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
60898 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
60899 +#define GR_VM86_MSG "denied use of vm86 by "
60900 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
60901 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
60902 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
60903 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
60904 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
60905 new file mode 100644
60906 index 0000000..acd05db
60907 --- /dev/null
60908 +++ b/include/linux/grsecurity.h
60909 @@ -0,0 +1,232 @@
60910 +#ifndef GR_SECURITY_H
60911 +#define GR_SECURITY_H
60912 +#include <linux/fs.h>
60913 +#include <linux/fs_struct.h>
60914 +#include <linux/binfmts.h>
60915 +#include <linux/gracl.h>
60916 +
60917 +/* notify of brain-dead configs */
60918 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60919 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
60920 +#endif
60921 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
60922 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
60923 +#endif
60924 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
60925 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
60926 +#endif
60927 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
60928 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
60929 +#endif
60930 +
60931 +#include <linux/compat.h>
60932 +
60933 +struct user_arg_ptr {
60934 +#ifdef CONFIG_COMPAT
60935 + bool is_compat;
60936 +#endif
60937 + union {
60938 + const char __user *const __user *native;
60939 +#ifdef CONFIG_COMPAT
60940 + compat_uptr_t __user *compat;
60941 +#endif
60942 + } ptr;
60943 +};
60944 +
60945 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
60946 +void gr_handle_brute_check(void);
60947 +void gr_handle_kernel_exploit(void);
60948 +int gr_process_user_ban(void);
60949 +
60950 +char gr_roletype_to_char(void);
60951 +
60952 +int gr_acl_enable_at_secure(void);
60953 +
60954 +int gr_check_user_change(int real, int effective, int fs);
60955 +int gr_check_group_change(int real, int effective, int fs);
60956 +
60957 +void gr_del_task_from_ip_table(struct task_struct *p);
60958 +
60959 +int gr_pid_is_chrooted(struct task_struct *p);
60960 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
60961 +int gr_handle_chroot_nice(void);
60962 +int gr_handle_chroot_sysctl(const int op);
60963 +int gr_handle_chroot_setpriority(struct task_struct *p,
60964 + const int niceval);
60965 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
60966 +int gr_handle_chroot_chroot(const struct dentry *dentry,
60967 + const struct vfsmount *mnt);
60968 +void gr_handle_chroot_chdir(struct path *path);
60969 +int gr_handle_chroot_chmod(const struct dentry *dentry,
60970 + const struct vfsmount *mnt, const int mode);
60971 +int gr_handle_chroot_mknod(const struct dentry *dentry,
60972 + const struct vfsmount *mnt, const int mode);
60973 +int gr_handle_chroot_mount(const struct dentry *dentry,
60974 + const struct vfsmount *mnt,
60975 + const char *dev_name);
60976 +int gr_handle_chroot_pivot(void);
60977 +int gr_handle_chroot_unix(const pid_t pid);
60978 +
60979 +int gr_handle_rawio(const struct inode *inode);
60980 +
60981 +void gr_handle_ioperm(void);
60982 +void gr_handle_iopl(void);
60983 +
60984 +umode_t gr_acl_umask(void);
60985 +
60986 +int gr_tpe_allow(const struct file *file);
60987 +
60988 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
60989 +void gr_clear_chroot_entries(struct task_struct *task);
60990 +
60991 +void gr_log_forkfail(const int retval);
60992 +void gr_log_timechange(void);
60993 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
60994 +void gr_log_chdir(const struct dentry *dentry,
60995 + const struct vfsmount *mnt);
60996 +void gr_log_chroot_exec(const struct dentry *dentry,
60997 + const struct vfsmount *mnt);
60998 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
60999 +void gr_log_remount(const char *devname, const int retval);
61000 +void gr_log_unmount(const char *devname, const int retval);
61001 +void gr_log_mount(const char *from, const char *to, const int retval);
61002 +void gr_log_textrel(struct vm_area_struct *vma);
61003 +void gr_log_rwxmmap(struct file *file);
61004 +void gr_log_rwxmprotect(struct file *file);
61005 +
61006 +int gr_handle_follow_link(const struct inode *parent,
61007 + const struct inode *inode,
61008 + const struct dentry *dentry,
61009 + const struct vfsmount *mnt);
61010 +int gr_handle_fifo(const struct dentry *dentry,
61011 + const struct vfsmount *mnt,
61012 + const struct dentry *dir, const int flag,
61013 + const int acc_mode);
61014 +int gr_handle_hardlink(const struct dentry *dentry,
61015 + const struct vfsmount *mnt,
61016 + struct inode *inode,
61017 + const int mode, const char *to);
61018 +
61019 +int gr_is_capable(const int cap);
61020 +int gr_is_capable_nolog(const int cap);
61021 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61022 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61023 +
61024 +void gr_learn_resource(const struct task_struct *task, const int limit,
61025 + const unsigned long wanted, const int gt);
61026 +void gr_copy_label(struct task_struct *tsk);
61027 +void gr_handle_crash(struct task_struct *task, const int sig);
61028 +int gr_handle_signal(const struct task_struct *p, const int sig);
61029 +int gr_check_crash_uid(const uid_t uid);
61030 +int gr_check_protected_task(const struct task_struct *task);
61031 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61032 +int gr_acl_handle_mmap(const struct file *file,
61033 + const unsigned long prot);
61034 +int gr_acl_handle_mprotect(const struct file *file,
61035 + const unsigned long prot);
61036 +int gr_check_hidden_task(const struct task_struct *tsk);
61037 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61038 + const struct vfsmount *mnt);
61039 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61040 + const struct vfsmount *mnt);
61041 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61042 + const struct vfsmount *mnt, const int fmode);
61043 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61044 + const struct vfsmount *mnt, umode_t *mode);
61045 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61046 + const struct vfsmount *mnt);
61047 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61048 + const struct vfsmount *mnt);
61049 +int gr_handle_ptrace(struct task_struct *task, const long request);
61050 +int gr_handle_proc_ptrace(struct task_struct *task);
61051 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61052 + const struct vfsmount *mnt);
61053 +int gr_check_crash_exec(const struct file *filp);
61054 +int gr_acl_is_enabled(void);
61055 +void gr_set_kernel_label(struct task_struct *task);
61056 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61057 + const gid_t gid);
61058 +int gr_set_proc_label(const struct dentry *dentry,
61059 + const struct vfsmount *mnt,
61060 + const int unsafe_flags);
61061 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61062 + const struct vfsmount *mnt);
61063 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61064 + const struct vfsmount *mnt, int acc_mode);
61065 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61066 + const struct dentry *p_dentry,
61067 + const struct vfsmount *p_mnt,
61068 + int open_flags, int acc_mode, const int imode);
61069 +void gr_handle_create(const struct dentry *dentry,
61070 + const struct vfsmount *mnt);
61071 +void gr_handle_proc_create(const struct dentry *dentry,
61072 + const struct inode *inode);
61073 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61074 + const struct dentry *parent_dentry,
61075 + const struct vfsmount *parent_mnt,
61076 + const int mode);
61077 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61078 + const struct dentry *parent_dentry,
61079 + const struct vfsmount *parent_mnt);
61080 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61081 + const struct vfsmount *mnt);
61082 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61083 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61084 + const struct vfsmount *mnt);
61085 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61086 + const struct dentry *parent_dentry,
61087 + const struct vfsmount *parent_mnt,
61088 + const char *from);
61089 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61090 + const struct dentry *parent_dentry,
61091 + const struct vfsmount *parent_mnt,
61092 + const struct dentry *old_dentry,
61093 + const struct vfsmount *old_mnt, const char *to);
61094 +int gr_acl_handle_rename(struct dentry *new_dentry,
61095 + struct dentry *parent_dentry,
61096 + const struct vfsmount *parent_mnt,
61097 + struct dentry *old_dentry,
61098 + struct inode *old_parent_inode,
61099 + struct vfsmount *old_mnt, const char *newname);
61100 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61101 + struct dentry *old_dentry,
61102 + struct dentry *new_dentry,
61103 + struct vfsmount *mnt, const __u8 replace);
61104 +__u32 gr_check_link(const struct dentry *new_dentry,
61105 + const struct dentry *parent_dentry,
61106 + const struct vfsmount *parent_mnt,
61107 + const struct dentry *old_dentry,
61108 + const struct vfsmount *old_mnt);
61109 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61110 + const unsigned int namelen, const ino_t ino);
61111 +
61112 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61113 + const struct vfsmount *mnt);
61114 +void gr_acl_handle_exit(void);
61115 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61116 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61117 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61118 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61119 +void gr_audit_ptrace(struct task_struct *task);
61120 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61121 +
61122 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61123 +
61124 +#ifdef CONFIG_GRKERNSEC
61125 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61126 +void gr_handle_vm86(void);
61127 +void gr_handle_mem_readwrite(u64 from, u64 to);
61128 +
61129 +void gr_log_badprocpid(const char *entry);
61130 +
61131 +extern int grsec_enable_dmesg;
61132 +extern int grsec_disable_privio;
61133 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61134 +extern int grsec_enable_chroot_findtask;
61135 +#endif
61136 +#ifdef CONFIG_GRKERNSEC_SETXID
61137 +extern int grsec_enable_setxid;
61138 +#endif
61139 +#endif
61140 +
61141 +#endif
61142 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61143 new file mode 100644
61144 index 0000000..e7ffaaf
61145 --- /dev/null
61146 +++ b/include/linux/grsock.h
61147 @@ -0,0 +1,19 @@
61148 +#ifndef __GRSOCK_H
61149 +#define __GRSOCK_H
61150 +
61151 +extern void gr_attach_curr_ip(const struct sock *sk);
61152 +extern int gr_handle_sock_all(const int family, const int type,
61153 + const int protocol);
61154 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61155 +extern int gr_handle_sock_server_other(const struct sock *sck);
61156 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61157 +extern int gr_search_connect(struct socket * sock,
61158 + struct sockaddr_in * addr);
61159 +extern int gr_search_bind(struct socket * sock,
61160 + struct sockaddr_in * addr);
61161 +extern int gr_search_listen(struct socket * sock);
61162 +extern int gr_search_accept(struct socket * sock);
61163 +extern int gr_search_socket(const int domain, const int type,
61164 + const int protocol);
61165 +
61166 +#endif
61167 diff --git a/include/linux/hid.h b/include/linux/hid.h
61168 index 3a95da6..51986f1 100644
61169 --- a/include/linux/hid.h
61170 +++ b/include/linux/hid.h
61171 @@ -696,7 +696,7 @@ struct hid_ll_driver {
61172 unsigned int code, int value);
61173
61174 int (*parse)(struct hid_device *hdev);
61175 -};
61176 +} __no_const;
61177
61178 #define PM_HINT_FULLON 1<<5
61179 #define PM_HINT_NORMAL 1<<1
61180 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61181 index d3999b4..1304cb4 100644
61182 --- a/include/linux/highmem.h
61183 +++ b/include/linux/highmem.h
61184 @@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
61185 kunmap_atomic(kaddr);
61186 }
61187
61188 +static inline void sanitize_highpage(struct page *page)
61189 +{
61190 + void *kaddr;
61191 + unsigned long flags;
61192 +
61193 + local_irq_save(flags);
61194 + kaddr = kmap_atomic(page);
61195 + clear_page(kaddr);
61196 + kunmap_atomic(kaddr);
61197 + local_irq_restore(flags);
61198 +}
61199 +
61200 static inline void zero_user_segments(struct page *page,
61201 unsigned start1, unsigned end1,
61202 unsigned start2, unsigned end2)
61203 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61204 index 195d8b3..e20cfab 100644
61205 --- a/include/linux/i2c.h
61206 +++ b/include/linux/i2c.h
61207 @@ -365,6 +365,7 @@ struct i2c_algorithm {
61208 /* To determine what the adapter supports */
61209 u32 (*functionality) (struct i2c_adapter *);
61210 };
61211 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61212
61213 /*
61214 * i2c_adapter is the structure used to identify a physical i2c bus along
61215 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61216 index d23c3c2..eb63c81 100644
61217 --- a/include/linux/i2o.h
61218 +++ b/include/linux/i2o.h
61219 @@ -565,7 +565,7 @@ struct i2o_controller {
61220 struct i2o_device *exec; /* Executive */
61221 #if BITS_PER_LONG == 64
61222 spinlock_t context_list_lock; /* lock for context_list */
61223 - atomic_t context_list_counter; /* needed for unique contexts */
61224 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61225 struct list_head context_list; /* list of context id's
61226 and pointers */
61227 #endif
61228 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61229 index 58404b0..439ed95 100644
61230 --- a/include/linux/if_team.h
61231 +++ b/include/linux/if_team.h
61232 @@ -64,6 +64,7 @@ struct team_mode_ops {
61233 void (*port_leave)(struct team *team, struct team_port *port);
61234 void (*port_change_mac)(struct team *team, struct team_port *port);
61235 };
61236 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61237
61238 enum team_option_type {
61239 TEAM_OPTION_TYPE_U32,
61240 @@ -112,7 +113,7 @@ struct team {
61241 struct list_head option_list;
61242
61243 const struct team_mode *mode;
61244 - struct team_mode_ops ops;
61245 + team_mode_ops_no_const ops;
61246 long mode_priv[TEAM_MODE_PRIV_LONGS];
61247 };
61248
61249 diff --git a/include/linux/init.h b/include/linux/init.h
61250 index 6b95109..4aca62c 100644
61251 --- a/include/linux/init.h
61252 +++ b/include/linux/init.h
61253 @@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
61254
61255 /* Each module must use one module_init(). */
61256 #define module_init(initfn) \
61257 - static inline initcall_t __inittest(void) \
61258 + static inline __used initcall_t __inittest(void) \
61259 { return initfn; } \
61260 int init_module(void) __attribute__((alias(#initfn)));
61261
61262 /* This is only required if you want to be unloadable. */
61263 #define module_exit(exitfn) \
61264 - static inline exitcall_t __exittest(void) \
61265 + static inline __used exitcall_t __exittest(void) \
61266 { return exitfn; } \
61267 void cleanup_module(void) __attribute__((alias(#exitfn)));
61268
61269 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61270 index e4baff5..83bb175 100644
61271 --- a/include/linux/init_task.h
61272 +++ b/include/linux/init_task.h
61273 @@ -134,6 +134,12 @@ extern struct cred init_cred;
61274
61275 #define INIT_TASK_COMM "swapper"
61276
61277 +#ifdef CONFIG_X86
61278 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61279 +#else
61280 +#define INIT_TASK_THREAD_INFO
61281 +#endif
61282 +
61283 /*
61284 * INIT_TASK is used to set up the first task table, touch at
61285 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61286 @@ -172,6 +178,7 @@ extern struct cred init_cred;
61287 RCU_INIT_POINTER(.cred, &init_cred), \
61288 .comm = INIT_TASK_COMM, \
61289 .thread = INIT_THREAD, \
61290 + INIT_TASK_THREAD_INFO \
61291 .fs = &init_fs, \
61292 .files = &init_files, \
61293 .signal = &init_signals, \
61294 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61295 index e6ca56d..8583707 100644
61296 --- a/include/linux/intel-iommu.h
61297 +++ b/include/linux/intel-iommu.h
61298 @@ -296,7 +296,7 @@ struct iommu_flush {
61299 u8 fm, u64 type);
61300 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61301 unsigned int size_order, u64 type);
61302 -};
61303 +} __no_const;
61304
61305 enum {
61306 SR_DMAR_FECTL_REG,
61307 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61308 index 2aea5d2..0b82f0c 100644
61309 --- a/include/linux/interrupt.h
61310 +++ b/include/linux/interrupt.h
61311 @@ -439,7 +439,7 @@ enum
61312 /* map softirq index to softirq name. update 'softirq_to_name' in
61313 * kernel/softirq.c when adding a new softirq.
61314 */
61315 -extern char *softirq_to_name[NR_SOFTIRQS];
61316 +extern const char * const softirq_to_name[NR_SOFTIRQS];
61317
61318 /* softirq mask and active fields moved to irq_cpustat_t in
61319 * asm/hardirq.h to get better cache usage. KAO
61320 @@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61321
61322 struct softirq_action
61323 {
61324 - void (*action)(struct softirq_action *);
61325 + void (*action)(void);
61326 };
61327
61328 asmlinkage void do_softirq(void);
61329 asmlinkage void __do_softirq(void);
61330 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61331 +extern void open_softirq(int nr, void (*action)(void));
61332 extern void softirq_init(void);
61333 extern void __raise_softirq_irqoff(unsigned int nr);
61334
61335 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61336 index 3875719..4cd454c 100644
61337 --- a/include/linux/kallsyms.h
61338 +++ b/include/linux/kallsyms.h
61339 @@ -15,7 +15,8 @@
61340
61341 struct module;
61342
61343 -#ifdef CONFIG_KALLSYMS
61344 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61345 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61346 /* Lookup the address for a symbol. Returns 0 if not found. */
61347 unsigned long kallsyms_lookup_name(const char *name);
61348
61349 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61350 /* Stupid that this does nothing, but I didn't create this mess. */
61351 #define __print_symbol(fmt, addr)
61352 #endif /*CONFIG_KALLSYMS*/
61353 +#else /* when included by kallsyms.c, vsnprintf.c, or
61354 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61355 +extern void __print_symbol(const char *fmt, unsigned long address);
61356 +extern int sprint_backtrace(char *buffer, unsigned long address);
61357 +extern int sprint_symbol(char *buffer, unsigned long address);
61358 +const char *kallsyms_lookup(unsigned long addr,
61359 + unsigned long *symbolsize,
61360 + unsigned long *offset,
61361 + char **modname, char *namebuf);
61362 +#endif
61363
61364 /* This macro allows us to keep printk typechecking */
61365 static __printf(1, 2)
61366 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61367 index c4d2fc1..5df9c19 100644
61368 --- a/include/linux/kgdb.h
61369 +++ b/include/linux/kgdb.h
61370 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61371 extern int kgdb_io_module_registered;
61372
61373 extern atomic_t kgdb_setting_breakpoint;
61374 -extern atomic_t kgdb_cpu_doing_single_step;
61375 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61376
61377 extern struct task_struct *kgdb_usethread;
61378 extern struct task_struct *kgdb_contthread;
61379 @@ -252,7 +252,7 @@ struct kgdb_arch {
61380 void (*disable_hw_break)(struct pt_regs *regs);
61381 void (*remove_all_hw_break)(void);
61382 void (*correct_hw_break)(void);
61383 -};
61384 +} __do_const;
61385
61386 /**
61387 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61388 @@ -277,7 +277,7 @@ struct kgdb_io {
61389 void (*pre_exception) (void);
61390 void (*post_exception) (void);
61391 int is_console;
61392 -};
61393 +} __do_const;
61394
61395 extern struct kgdb_arch arch_kgdb_ops;
61396
61397 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61398 index dd99c32..da06047 100644
61399 --- a/include/linux/kmod.h
61400 +++ b/include/linux/kmod.h
61401 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61402 * usually useless though. */
61403 extern __printf(2, 3)
61404 int __request_module(bool wait, const char *name, ...);
61405 +extern __printf(3, 4)
61406 +int ___request_module(bool wait, char *param_name, const char *name, ...);
61407 #define request_module(mod...) __request_module(true, mod)
61408 #define request_module_nowait(mod...) __request_module(false, mod)
61409 #define try_then_request_module(x, mod...) \
61410 diff --git a/include/linux/kref.h b/include/linux/kref.h
61411 index 9c07dce..a92fa71 100644
61412 --- a/include/linux/kref.h
61413 +++ b/include/linux/kref.h
61414 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
61415 static inline int kref_sub(struct kref *kref, unsigned int count,
61416 void (*release)(struct kref *kref))
61417 {
61418 - WARN_ON(release == NULL);
61419 + BUG_ON(release == NULL);
61420
61421 if (atomic_sub_and_test((int) count, &kref->refcount)) {
61422 release(kref);
61423 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61424 index 72cbf08..dd0201d 100644
61425 --- a/include/linux/kvm_host.h
61426 +++ b/include/linux/kvm_host.h
61427 @@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61428 void vcpu_load(struct kvm_vcpu *vcpu);
61429 void vcpu_put(struct kvm_vcpu *vcpu);
61430
61431 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61432 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61433 struct module *module);
61434 void kvm_exit(void);
61435
61436 @@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61437 struct kvm_guest_debug *dbg);
61438 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61439
61440 -int kvm_arch_init(void *opaque);
61441 +int kvm_arch_init(const void *opaque);
61442 void kvm_arch_exit(void);
61443
61444 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61445 diff --git a/include/linux/libata.h b/include/linux/libata.h
61446 index e926df7..1713bd8 100644
61447 --- a/include/linux/libata.h
61448 +++ b/include/linux/libata.h
61449 @@ -909,7 +909,7 @@ struct ata_port_operations {
61450 * fields must be pointers.
61451 */
61452 const struct ata_port_operations *inherits;
61453 -};
61454 +} __do_const;
61455
61456 struct ata_port_info {
61457 unsigned long flags;
61458 diff --git a/include/linux/mca.h b/include/linux/mca.h
61459 index 3797270..7765ede 100644
61460 --- a/include/linux/mca.h
61461 +++ b/include/linux/mca.h
61462 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61463 int region);
61464 void * (*mca_transform_memory)(struct mca_device *,
61465 void *memory);
61466 -};
61467 +} __no_const;
61468
61469 struct mca_bus {
61470 u64 default_dma_mask;
61471 diff --git a/include/linux/memory.h b/include/linux/memory.h
61472 index 1ac7f6e..a5794d0 100644
61473 --- a/include/linux/memory.h
61474 +++ b/include/linux/memory.h
61475 @@ -143,7 +143,7 @@ struct memory_accessor {
61476 size_t count);
61477 ssize_t (*write)(struct memory_accessor *, const char *buf,
61478 off_t offset, size_t count);
61479 -};
61480 +} __no_const;
61481
61482 /*
61483 * Kernel text modification mutex, used for code patching. Users of this lock
61484 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61485 index ee96cd5..7823c3a 100644
61486 --- a/include/linux/mfd/abx500.h
61487 +++ b/include/linux/mfd/abx500.h
61488 @@ -455,6 +455,7 @@ struct abx500_ops {
61489 int (*event_registers_startup_state_get) (struct device *, u8 *);
61490 int (*startup_irq_enabled) (struct device *, unsigned int);
61491 };
61492 +typedef struct abx500_ops __no_const abx500_ops_no_const;
61493
61494 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61495 void abx500_remove_ops(struct device *dev);
61496 diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
61497 index 9b07725..3d55001 100644
61498 --- a/include/linux/mfd/abx500/ux500_chargalg.h
61499 +++ b/include/linux/mfd/abx500/ux500_chargalg.h
61500 @@ -19,7 +19,7 @@ struct ux500_charger_ops {
61501 int (*enable) (struct ux500_charger *, int, int, int);
61502 int (*kick_wd) (struct ux500_charger *);
61503 int (*update_curr) (struct ux500_charger *, int);
61504 -};
61505 +} __no_const;
61506
61507 /**
61508 * struct ux500_charger - power supply ux500 charger sub class
61509 diff --git a/include/linux/mm.h b/include/linux/mm.h
61510 index 74aa71b..4ae97ba 100644
61511 --- a/include/linux/mm.h
61512 +++ b/include/linux/mm.h
61513 @@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
61514
61515 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61516 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61517 +
61518 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61519 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61520 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61521 +#else
61522 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61523 +#endif
61524 +
61525 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61526 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61527
61528 @@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
61529 int set_page_dirty_lock(struct page *page);
61530 int clear_page_dirty_for_io(struct page *page);
61531
61532 -/* Is the vma a continuation of the stack vma above it? */
61533 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61534 -{
61535 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61536 -}
61537 -
61538 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
61539 - unsigned long addr)
61540 -{
61541 - return (vma->vm_flags & VM_GROWSDOWN) &&
61542 - (vma->vm_start == addr) &&
61543 - !vma_growsdown(vma->vm_prev, addr);
61544 -}
61545 -
61546 -/* Is the vma a continuation of the stack vma below it? */
61547 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61548 -{
61549 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61550 -}
61551 -
61552 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
61553 - unsigned long addr)
61554 -{
61555 - return (vma->vm_flags & VM_GROWSUP) &&
61556 - (vma->vm_end == addr) &&
61557 - !vma_growsup(vma->vm_next, addr);
61558 -}
61559 -
61560 extern pid_t
61561 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
61562
61563 @@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
61564 }
61565 #endif
61566
61567 +#ifdef CONFIG_MMU
61568 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61569 +#else
61570 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61571 +{
61572 + return __pgprot(0);
61573 +}
61574 +#endif
61575 +
61576 int vma_wants_writenotify(struct vm_area_struct *vma);
61577
61578 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61579 @@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
61580 {
61581 return 0;
61582 }
61583 +
61584 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
61585 + unsigned long address)
61586 +{
61587 + return 0;
61588 +}
61589 #else
61590 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61591 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61592 #endif
61593
61594 #ifdef __PAGETABLE_PMD_FOLDED
61595 @@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
61596 {
61597 return 0;
61598 }
61599 +
61600 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
61601 + unsigned long address)
61602 +{
61603 + return 0;
61604 +}
61605 #else
61606 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
61607 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
61608 #endif
61609
61610 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
61611 @@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
61612 NULL: pud_offset(pgd, address);
61613 }
61614
61615 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
61616 +{
61617 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
61618 + NULL: pud_offset(pgd, address);
61619 +}
61620 +
61621 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
61622 {
61623 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
61624 NULL: pmd_offset(pud, address);
61625 }
61626 +
61627 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
61628 +{
61629 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
61630 + NULL: pmd_offset(pud, address);
61631 +}
61632 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
61633
61634 #if USE_SPLIT_PTLOCKS
61635 @@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
61636 unsigned long, unsigned long,
61637 unsigned long, unsigned long);
61638 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61639 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61640
61641 /* These take the mm semaphore themselves */
61642 extern unsigned long vm_brk(unsigned long, unsigned long);
61643 @@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61644 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61645 struct vm_area_struct **pprev);
61646
61647 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61648 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61649 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61650 +
61651 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61652 NULL if none. Assume start_addr < end_addr. */
61653 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61654 @@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
61655 return vma;
61656 }
61657
61658 -#ifdef CONFIG_MMU
61659 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
61660 -#else
61661 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61662 -{
61663 - return __pgprot(0);
61664 -}
61665 -#endif
61666 -
61667 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61668 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61669 unsigned long pfn, unsigned long size, pgprot_t);
61670 @@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long pfn);
61671 extern int sysctl_memory_failure_early_kill;
61672 extern int sysctl_memory_failure_recovery;
61673 extern void shake_page(struct page *p, int access);
61674 -extern atomic_long_t mce_bad_pages;
61675 +extern atomic_long_unchecked_t mce_bad_pages;
61676 extern int soft_offline_page(struct page *page, int flags);
61677
61678 extern void dump_page(struct page *page);
61679 @@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
61680 static inline bool page_is_guard(struct page *page) { return false; }
61681 #endif /* CONFIG_DEBUG_PAGEALLOC */
61682
61683 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61684 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61685 +#else
61686 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61687 +#endif
61688 +
61689 #endif /* __KERNEL__ */
61690 #endif /* _LINUX_MM_H */
61691 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61692 index 3cc3062..efeaeb7 100644
61693 --- a/include/linux/mm_types.h
61694 +++ b/include/linux/mm_types.h
61695 @@ -252,6 +252,8 @@ struct vm_area_struct {
61696 #ifdef CONFIG_NUMA
61697 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61698 #endif
61699 +
61700 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61701 };
61702
61703 struct core_thread {
61704 @@ -326,7 +328,7 @@ struct mm_struct {
61705 unsigned long def_flags;
61706 unsigned long nr_ptes; /* Page table pages */
61707 unsigned long start_code, end_code, start_data, end_data;
61708 - unsigned long start_brk, brk, start_stack;
61709 + unsigned long brk_gap, start_brk, brk, start_stack;
61710 unsigned long arg_start, arg_end, env_start, env_end;
61711
61712 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
61713 @@ -388,6 +390,24 @@ struct mm_struct {
61714 #ifdef CONFIG_CPUMASK_OFFSTACK
61715 struct cpumask cpumask_allocation;
61716 #endif
61717 +
61718 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
61719 + unsigned long pax_flags;
61720 +#endif
61721 +
61722 +#ifdef CONFIG_PAX_DLRESOLVE
61723 + unsigned long call_dl_resolve;
61724 +#endif
61725 +
61726 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61727 + unsigned long call_syscall;
61728 +#endif
61729 +
61730 +#ifdef CONFIG_PAX_ASLR
61731 + unsigned long delta_mmap; /* randomized offset */
61732 + unsigned long delta_stack; /* randomized offset */
61733 +#endif
61734 +
61735 };
61736
61737 static inline void mm_init_cpumask(struct mm_struct *mm)
61738 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61739 index 1d1b1e1..2a13c78 100644
61740 --- a/include/linux/mmu_notifier.h
61741 +++ b/include/linux/mmu_notifier.h
61742 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61743 */
61744 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61745 ({ \
61746 - pte_t __pte; \
61747 + pte_t ___pte; \
61748 struct vm_area_struct *___vma = __vma; \
61749 unsigned long ___address = __address; \
61750 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61751 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61752 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61753 - __pte; \
61754 + ___pte; \
61755 })
61756
61757 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61758 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61759 index dff7115..0e001c8 100644
61760 --- a/include/linux/mmzone.h
61761 +++ b/include/linux/mmzone.h
61762 @@ -380,7 +380,7 @@ struct zone {
61763 unsigned long flags; /* zone flags, see below */
61764
61765 /* Zone statistics */
61766 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61767 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61768
61769 /*
61770 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61771 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61772 index 501da4c..ba79bb4 100644
61773 --- a/include/linux/mod_devicetable.h
61774 +++ b/include/linux/mod_devicetable.h
61775 @@ -12,7 +12,7 @@
61776 typedef unsigned long kernel_ulong_t;
61777 #endif
61778
61779 -#define PCI_ANY_ID (~0)
61780 +#define PCI_ANY_ID ((__u16)~0)
61781
61782 struct pci_device_id {
61783 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61784 @@ -131,7 +131,7 @@ struct usb_device_id {
61785 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61786 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61787
61788 -#define HID_ANY_ID (~0)
61789 +#define HID_ANY_ID (~0U)
61790
61791 struct hid_device_id {
61792 __u16 bus;
61793 diff --git a/include/linux/module.h b/include/linux/module.h
61794 index fbcafe2..e5d9587 100644
61795 --- a/include/linux/module.h
61796 +++ b/include/linux/module.h
61797 @@ -17,6 +17,7 @@
61798 #include <linux/moduleparam.h>
61799 #include <linux/tracepoint.h>
61800 #include <linux/export.h>
61801 +#include <linux/fs.h>
61802
61803 #include <linux/percpu.h>
61804 #include <asm/module.h>
61805 @@ -273,19 +274,16 @@ struct module
61806 int (*init)(void);
61807
61808 /* If this is non-NULL, vfree after init() returns */
61809 - void *module_init;
61810 + void *module_init_rx, *module_init_rw;
61811
61812 /* Here is the actual code + data, vfree'd on unload. */
61813 - void *module_core;
61814 + void *module_core_rx, *module_core_rw;
61815
61816 /* Here are the sizes of the init and core sections */
61817 - unsigned int init_size, core_size;
61818 + unsigned int init_size_rw, core_size_rw;
61819
61820 /* The size of the executable code in each section. */
61821 - unsigned int init_text_size, core_text_size;
61822 -
61823 - /* Size of RO sections of the module (text+rodata) */
61824 - unsigned int init_ro_size, core_ro_size;
61825 + unsigned int init_size_rx, core_size_rx;
61826
61827 /* Arch-specific module values */
61828 struct mod_arch_specific arch;
61829 @@ -341,6 +339,10 @@ struct module
61830 #ifdef CONFIG_EVENT_TRACING
61831 struct ftrace_event_call **trace_events;
61832 unsigned int num_trace_events;
61833 + struct file_operations trace_id;
61834 + struct file_operations trace_enable;
61835 + struct file_operations trace_format;
61836 + struct file_operations trace_filter;
61837 #endif
61838 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61839 unsigned int num_ftrace_callsites;
61840 @@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
61841 bool is_module_percpu_address(unsigned long addr);
61842 bool is_module_text_address(unsigned long addr);
61843
61844 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61845 +{
61846 +
61847 +#ifdef CONFIG_PAX_KERNEXEC
61848 + if (ktla_ktva(addr) >= (unsigned long)start &&
61849 + ktla_ktva(addr) < (unsigned long)start + size)
61850 + return 1;
61851 +#endif
61852 +
61853 + return ((void *)addr >= start && (void *)addr < start + size);
61854 +}
61855 +
61856 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
61857 +{
61858 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
61859 +}
61860 +
61861 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
61862 +{
61863 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
61864 +}
61865 +
61866 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
61867 +{
61868 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
61869 +}
61870 +
61871 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
61872 +{
61873 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
61874 +}
61875 +
61876 static inline int within_module_core(unsigned long addr, struct module *mod)
61877 {
61878 - return (unsigned long)mod->module_core <= addr &&
61879 - addr < (unsigned long)mod->module_core + mod->core_size;
61880 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
61881 }
61882
61883 static inline int within_module_init(unsigned long addr, struct module *mod)
61884 {
61885 - return (unsigned long)mod->module_init <= addr &&
61886 - addr < (unsigned long)mod->module_init + mod->init_size;
61887 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
61888 }
61889
61890 /* Search for module by name: must hold module_mutex. */
61891 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
61892 index b2be02e..72d2f78 100644
61893 --- a/include/linux/moduleloader.h
61894 +++ b/include/linux/moduleloader.h
61895 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
61896
61897 /* Allocator used for allocating struct module, core sections and init
61898 sections. Returns NULL on failure. */
61899 -void *module_alloc(unsigned long size);
61900 +void *module_alloc(unsigned long size) __size_overflow(1);
61901 +
61902 +#ifdef CONFIG_PAX_KERNEXEC
61903 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
61904 +#else
61905 +#define module_alloc_exec(x) module_alloc(x)
61906 +#endif
61907
61908 /* Free memory returned from module_alloc. */
61909 void module_free(struct module *mod, void *module_region);
61910
61911 +#ifdef CONFIG_PAX_KERNEXEC
61912 +void module_free_exec(struct module *mod, void *module_region);
61913 +#else
61914 +#define module_free_exec(x, y) module_free((x), (y))
61915 +#endif
61916 +
61917 /* Apply the given relocation to the (simplified) ELF. Return -error
61918 or 0. */
61919 int apply_relocate(Elf_Shdr *sechdrs,
61920 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
61921 index ea36486..91e70f4 100644
61922 --- a/include/linux/moduleparam.h
61923 +++ b/include/linux/moduleparam.h
61924 @@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
61925 * @len is usually just sizeof(string).
61926 */
61927 #define module_param_string(name, string, len, perm) \
61928 - static const struct kparam_string __param_string_##name \
61929 + static const struct kparam_string __param_string_##name __used \
61930 = { len, string }; \
61931 __module_param_call(MODULE_PARAM_PREFIX, name, \
61932 &param_ops_string, \
61933 @@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
61934 */
61935 #define module_param_array_named(name, array, type, nump, perm) \
61936 param_check_##type(name, &(array)[0]); \
61937 - static const struct kparam_array __param_arr_##name \
61938 + static const struct kparam_array __param_arr_##name __used \
61939 = { .max = ARRAY_SIZE(array), .num = nump, \
61940 .ops = &param_ops_##type, \
61941 .elemsize = sizeof(array[0]), .elem = array }; \
61942 diff --git a/include/linux/namei.h b/include/linux/namei.h
61943 index ffc0213..2c1f2cb 100644
61944 --- a/include/linux/namei.h
61945 +++ b/include/linux/namei.h
61946 @@ -24,7 +24,7 @@ struct nameidata {
61947 unsigned seq;
61948 int last_type;
61949 unsigned depth;
61950 - char *saved_names[MAX_NESTED_LINKS + 1];
61951 + const char *saved_names[MAX_NESTED_LINKS + 1];
61952
61953 /* Intent data */
61954 union {
61955 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
61956 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
61957 extern void unlock_rename(struct dentry *, struct dentry *);
61958
61959 -static inline void nd_set_link(struct nameidata *nd, char *path)
61960 +static inline void nd_set_link(struct nameidata *nd, const char *path)
61961 {
61962 nd->saved_names[nd->depth] = path;
61963 }
61964
61965 -static inline char *nd_get_link(struct nameidata *nd)
61966 +static inline const char *nd_get_link(const struct nameidata *nd)
61967 {
61968 return nd->saved_names[nd->depth];
61969 }
61970 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
61971 index 33900a5..2072000 100644
61972 --- a/include/linux/netdevice.h
61973 +++ b/include/linux/netdevice.h
61974 @@ -1003,6 +1003,7 @@ struct net_device_ops {
61975 int (*ndo_neigh_construct)(struct neighbour *n);
61976 void (*ndo_neigh_destroy)(struct neighbour *n);
61977 };
61978 +typedef struct net_device_ops __no_const net_device_ops_no_const;
61979
61980 /*
61981 * The DEVICE structure.
61982 @@ -1064,7 +1065,7 @@ struct net_device {
61983 int iflink;
61984
61985 struct net_device_stats stats;
61986 - atomic_long_t rx_dropped; /* dropped packets by core network
61987 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
61988 * Do not use this in drivers.
61989 */
61990
61991 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
61992 new file mode 100644
61993 index 0000000..33f4af8
61994 --- /dev/null
61995 +++ b/include/linux/netfilter/xt_gradm.h
61996 @@ -0,0 +1,9 @@
61997 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
61998 +#define _LINUX_NETFILTER_XT_GRADM_H 1
61999 +
62000 +struct xt_gradm_mtinfo {
62001 + __u16 flags;
62002 + __u16 invflags;
62003 +};
62004 +
62005 +#endif
62006 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62007 index c65a18a..0c05f3a 100644
62008 --- a/include/linux/of_pdt.h
62009 +++ b/include/linux/of_pdt.h
62010 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62011
62012 /* return 0 on success; fill in 'len' with number of bytes in path */
62013 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62014 -};
62015 +} __no_const;
62016
62017 extern void *prom_early_alloc(unsigned long size);
62018
62019 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62020 index a4c5624..79d6d88 100644
62021 --- a/include/linux/oprofile.h
62022 +++ b/include/linux/oprofile.h
62023 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62024 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62025 char const * name, ulong * val);
62026
62027 -/** Create a file for read-only access to an atomic_t. */
62028 +/** Create a file for read-only access to an atomic_unchecked_t. */
62029 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62030 - char const * name, atomic_t * val);
62031 + char const * name, atomic_unchecked_t * val);
62032
62033 /** create a directory */
62034 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62035 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62036 index ddbb6a9..be1680e 100644
62037 --- a/include/linux/perf_event.h
62038 +++ b/include/linux/perf_event.h
62039 @@ -879,8 +879,8 @@ struct perf_event {
62040
62041 enum perf_event_active_state state;
62042 unsigned int attach_state;
62043 - local64_t count;
62044 - atomic64_t child_count;
62045 + local64_t count; /* PaX: fix it one day */
62046 + atomic64_unchecked_t child_count;
62047
62048 /*
62049 * These are the total time in nanoseconds that the event
62050 @@ -931,8 +931,8 @@ struct perf_event {
62051 * These accumulate total time (in nanoseconds) that children
62052 * events have been enabled and running, respectively.
62053 */
62054 - atomic64_t child_total_time_enabled;
62055 - atomic64_t child_total_time_running;
62056 + atomic64_unchecked_t child_total_time_enabled;
62057 + atomic64_unchecked_t child_total_time_running;
62058
62059 /*
62060 * Protect attach/detach and child_list:
62061 diff --git a/include/linux/personality.h b/include/linux/personality.h
62062 index 8fc7dd1a..c19d89e 100644
62063 --- a/include/linux/personality.h
62064 +++ b/include/linux/personality.h
62065 @@ -44,6 +44,7 @@ enum {
62066 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62067 ADDR_NO_RANDOMIZE | \
62068 ADDR_COMPAT_LAYOUT | \
62069 + ADDR_LIMIT_3GB | \
62070 MMAP_PAGE_ZERO)
62071
62072 /*
62073 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62074 index e1ac1ce..0675fed 100644
62075 --- a/include/linux/pipe_fs_i.h
62076 +++ b/include/linux/pipe_fs_i.h
62077 @@ -45,9 +45,9 @@ struct pipe_buffer {
62078 struct pipe_inode_info {
62079 wait_queue_head_t wait;
62080 unsigned int nrbufs, curbuf, buffers;
62081 - unsigned int readers;
62082 - unsigned int writers;
62083 - unsigned int waiting_writers;
62084 + atomic_t readers;
62085 + atomic_t writers;
62086 + atomic_t waiting_writers;
62087 unsigned int r_counter;
62088 unsigned int w_counter;
62089 struct page *tmp_page;
62090 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62091 index 609daae..5392427 100644
62092 --- a/include/linux/pm_runtime.h
62093 +++ b/include/linux/pm_runtime.h
62094 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62095
62096 static inline void pm_runtime_mark_last_busy(struct device *dev)
62097 {
62098 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62099 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62100 }
62101
62102 #else /* !CONFIG_PM_RUNTIME */
62103 diff --git a/include/linux/poison.h b/include/linux/poison.h
62104 index 2110a81..13a11bb 100644
62105 --- a/include/linux/poison.h
62106 +++ b/include/linux/poison.h
62107 @@ -19,8 +19,8 @@
62108 * under normal circumstances, used to verify that nobody uses
62109 * non-initialized list entries.
62110 */
62111 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62112 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62113 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62114 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62115
62116 /********** include/linux/timer.h **********/
62117 /*
62118 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62119 index 5a710b9..0b0dab9 100644
62120 --- a/include/linux/preempt.h
62121 +++ b/include/linux/preempt.h
62122 @@ -126,7 +126,7 @@ struct preempt_ops {
62123 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62124 void (*sched_out)(struct preempt_notifier *notifier,
62125 struct task_struct *next);
62126 -};
62127 +} __no_const;
62128
62129 /**
62130 * preempt_notifier - key for installing preemption notifiers
62131 diff --git a/include/linux/printk.h b/include/linux/printk.h
62132 index 0525927..a5388b6 100644
62133 --- a/include/linux/printk.h
62134 +++ b/include/linux/printk.h
62135 @@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
62136 extern int printk_needs_cpu(int cpu);
62137 extern void printk_tick(void);
62138
62139 +extern int kptr_restrict;
62140 +
62141 #ifdef CONFIG_PRINTK
62142 asmlinkage __printf(1, 0)
62143 int vprintk(const char *fmt, va_list args);
62144 @@ -117,7 +119,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
62145
62146 extern int printk_delay_msec;
62147 extern int dmesg_restrict;
62148 -extern int kptr_restrict;
62149
62150 void log_buf_kexec_setup(void);
62151 void __init setup_log_buf(int early);
62152 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62153 index 85c5073..51fac8b 100644
62154 --- a/include/linux/proc_fs.h
62155 +++ b/include/linux/proc_fs.h
62156 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62157 return proc_create_data(name, mode, parent, proc_fops, NULL);
62158 }
62159
62160 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62161 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62162 +{
62163 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62164 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62165 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62166 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62167 +#else
62168 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62169 +#endif
62170 +}
62171 +
62172 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62173 umode_t mode, struct proc_dir_entry *base,
62174 read_proc_t *read_proc, void * data)
62175 @@ -258,7 +270,7 @@ union proc_op {
62176 int (*proc_show)(struct seq_file *m,
62177 struct pid_namespace *ns, struct pid *pid,
62178 struct task_struct *task);
62179 -};
62180 +} __no_const;
62181
62182 struct ctl_table_header;
62183 struct ctl_table;
62184 diff --git a/include/linux/random.h b/include/linux/random.h
62185 index 8f74538..02a1012 100644
62186 --- a/include/linux/random.h
62187 +++ b/include/linux/random.h
62188 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
62189
62190 u32 prandom32(struct rnd_state *);
62191
62192 +static inline unsigned long pax_get_random_long(void)
62193 +{
62194 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62195 +}
62196 +
62197 /*
62198 * Handle minimum values for seeds
62199 */
62200 static inline u32 __seed(u32 x, u32 m)
62201 {
62202 - return (x < m) ? x + m : x;
62203 + return (x <= m) ? x + m + 1 : x;
62204 }
62205
62206 /**
62207 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62208 index e0879a7..a12f962 100644
62209 --- a/include/linux/reboot.h
62210 +++ b/include/linux/reboot.h
62211 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62212 * Architecture-specific implementations of sys_reboot commands.
62213 */
62214
62215 -extern void machine_restart(char *cmd);
62216 -extern void machine_halt(void);
62217 -extern void machine_power_off(void);
62218 +extern void machine_restart(char *cmd) __noreturn;
62219 +extern void machine_halt(void) __noreturn;
62220 +extern void machine_power_off(void) __noreturn;
62221
62222 extern void machine_shutdown(void);
62223 struct pt_regs;
62224 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62225 */
62226
62227 extern void kernel_restart_prepare(char *cmd);
62228 -extern void kernel_restart(char *cmd);
62229 -extern void kernel_halt(void);
62230 -extern void kernel_power_off(void);
62231 +extern void kernel_restart(char *cmd) __noreturn;
62232 +extern void kernel_halt(void) __noreturn;
62233 +extern void kernel_power_off(void) __noreturn;
62234
62235 extern int C_A_D; /* for sysctl */
62236 void ctrl_alt_del(void);
62237 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62238 * Emergency restart, callable from an interrupt handler.
62239 */
62240
62241 -extern void emergency_restart(void);
62242 +extern void emergency_restart(void) __noreturn;
62243 #include <asm/emergency-restart.h>
62244
62245 #endif
62246 diff --git a/include/linux/relay.h b/include/linux/relay.h
62247 index 91cacc3..b55ff74 100644
62248 --- a/include/linux/relay.h
62249 +++ b/include/linux/relay.h
62250 @@ -160,7 +160,7 @@ struct rchan_callbacks
62251 * The callback should return 0 if successful, negative if not.
62252 */
62253 int (*remove_buf_file)(struct dentry *dentry);
62254 -};
62255 +} __no_const;
62256
62257 /*
62258 * CONFIG_RELAY kernel API, kernel/relay.c
62259 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62260 index 6fdf027..ff72610 100644
62261 --- a/include/linux/rfkill.h
62262 +++ b/include/linux/rfkill.h
62263 @@ -147,6 +147,7 @@ struct rfkill_ops {
62264 void (*query)(struct rfkill *rfkill, void *data);
62265 int (*set_block)(void *data, bool blocked);
62266 };
62267 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62268
62269 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62270 /**
62271 diff --git a/include/linux/rio.h b/include/linux/rio.h
62272 index 4d50611..c6858a2 100644
62273 --- a/include/linux/rio.h
62274 +++ b/include/linux/rio.h
62275 @@ -315,7 +315,7 @@ struct rio_ops {
62276 int mbox, void *buffer, size_t len);
62277 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62278 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62279 -};
62280 +} __no_const;
62281
62282 #define RIO_RESOURCE_MEM 0x00000100
62283 #define RIO_RESOURCE_DOORBELL 0x00000200
62284 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62285 index fd07c45..4676b8e 100644
62286 --- a/include/linux/rmap.h
62287 +++ b/include/linux/rmap.h
62288 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62289 void anon_vma_init(void); /* create anon_vma_cachep */
62290 int anon_vma_prepare(struct vm_area_struct *);
62291 void unlink_anon_vmas(struct vm_area_struct *);
62292 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62293 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62294 void anon_vma_moveto_tail(struct vm_area_struct *);
62295 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62296 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62297
62298 static inline void anon_vma_merge(struct vm_area_struct *vma,
62299 struct vm_area_struct *next)
62300 diff --git a/include/linux/sched.h b/include/linux/sched.h
62301 index 81a173c..85ccd8f 100644
62302 --- a/include/linux/sched.h
62303 +++ b/include/linux/sched.h
62304 @@ -100,6 +100,7 @@ struct bio_list;
62305 struct fs_struct;
62306 struct perf_event_context;
62307 struct blk_plug;
62308 +struct linux_binprm;
62309
62310 /*
62311 * List of flags we want to share for kernel threads,
62312 @@ -382,10 +383,13 @@ struct user_namespace;
62313 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62314
62315 extern int sysctl_max_map_count;
62316 +extern unsigned long sysctl_heap_stack_gap;
62317
62318 #include <linux/aio.h>
62319
62320 #ifdef CONFIG_MMU
62321 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62322 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62323 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62324 extern unsigned long
62325 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62326 @@ -643,6 +647,17 @@ struct signal_struct {
62327 #ifdef CONFIG_TASKSTATS
62328 struct taskstats *stats;
62329 #endif
62330 +
62331 +#ifdef CONFIG_GRKERNSEC
62332 + u32 curr_ip;
62333 + u32 saved_ip;
62334 + u32 gr_saddr;
62335 + u32 gr_daddr;
62336 + u16 gr_sport;
62337 + u16 gr_dport;
62338 + u8 used_accept:1;
62339 +#endif
62340 +
62341 #ifdef CONFIG_AUDIT
62342 unsigned audit_tty;
62343 struct tty_audit_buf *tty_audit_buf;
62344 @@ -726,6 +741,11 @@ struct user_struct {
62345 struct key *session_keyring; /* UID's default session keyring */
62346 #endif
62347
62348 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62349 + unsigned int banned;
62350 + unsigned long ban_expires;
62351 +#endif
62352 +
62353 /* Hash table maintenance information */
62354 struct hlist_node uidhash_node;
62355 uid_t uid;
62356 @@ -1386,8 +1406,8 @@ struct task_struct {
62357 struct list_head thread_group;
62358
62359 struct completion *vfork_done; /* for vfork() */
62360 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62361 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62362 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62363 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62364
62365 cputime_t utime, stime, utimescaled, stimescaled;
62366 cputime_t gtime;
62367 @@ -1403,13 +1423,6 @@ struct task_struct {
62368 struct task_cputime cputime_expires;
62369 struct list_head cpu_timers[3];
62370
62371 -/* process credentials */
62372 - const struct cred __rcu *real_cred; /* objective and real subjective task
62373 - * credentials (COW) */
62374 - const struct cred __rcu *cred; /* effective (overridable) subjective task
62375 - * credentials (COW) */
62376 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62377 -
62378 char comm[TASK_COMM_LEN]; /* executable name excluding path
62379 - access with [gs]et_task_comm (which lock
62380 it with task_lock())
62381 @@ -1426,8 +1439,16 @@ struct task_struct {
62382 #endif
62383 /* CPU-specific state of this task */
62384 struct thread_struct thread;
62385 +/* thread_info moved to task_struct */
62386 +#ifdef CONFIG_X86
62387 + struct thread_info tinfo;
62388 +#endif
62389 /* filesystem information */
62390 struct fs_struct *fs;
62391 +
62392 + const struct cred __rcu *cred; /* effective (overridable) subjective task
62393 + * credentials (COW) */
62394 +
62395 /* open file information */
62396 struct files_struct *files;
62397 /* namespaces */
62398 @@ -1469,6 +1490,11 @@ struct task_struct {
62399 struct rt_mutex_waiter *pi_blocked_on;
62400 #endif
62401
62402 +/* process credentials */
62403 + const struct cred __rcu *real_cred; /* objective and real subjective task
62404 + * credentials (COW) */
62405 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62406 +
62407 #ifdef CONFIG_DEBUG_MUTEXES
62408 /* mutex deadlock detection */
62409 struct mutex_waiter *blocked_on;
62410 @@ -1585,6 +1611,27 @@ struct task_struct {
62411 unsigned long default_timer_slack_ns;
62412
62413 struct list_head *scm_work_list;
62414 +
62415 +#ifdef CONFIG_GRKERNSEC
62416 + /* grsecurity */
62417 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62418 + u64 exec_id;
62419 +#endif
62420 +#ifdef CONFIG_GRKERNSEC_SETXID
62421 + const struct cred *delayed_cred;
62422 +#endif
62423 + struct dentry *gr_chroot_dentry;
62424 + struct acl_subject_label *acl;
62425 + struct acl_role_label *role;
62426 + struct file *exec_file;
62427 + u16 acl_role_id;
62428 + /* is this the task that authenticated to the special role */
62429 + u8 acl_sp_role;
62430 + u8 is_writable;
62431 + u8 brute;
62432 + u8 gr_is_chrooted;
62433 +#endif
62434 +
62435 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62436 /* Index of current stored address in ret_stack */
62437 int curr_ret_stack;
62438 @@ -1619,6 +1666,51 @@ struct task_struct {
62439 #endif
62440 };
62441
62442 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62443 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62444 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62445 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62446 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62447 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62448 +
62449 +#ifdef CONFIG_PAX_SOFTMODE
62450 +extern int pax_softmode;
62451 +#endif
62452 +
62453 +extern int pax_check_flags(unsigned long *);
62454 +
62455 +/* if tsk != current then task_lock must be held on it */
62456 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62457 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62458 +{
62459 + if (likely(tsk->mm))
62460 + return tsk->mm->pax_flags;
62461 + else
62462 + return 0UL;
62463 +}
62464 +
62465 +/* if tsk != current then task_lock must be held on it */
62466 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62467 +{
62468 + if (likely(tsk->mm)) {
62469 + tsk->mm->pax_flags = flags;
62470 + return 0;
62471 + }
62472 + return -EINVAL;
62473 +}
62474 +#endif
62475 +
62476 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62477 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62478 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62479 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62480 +#endif
62481 +
62482 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62483 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62484 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62485 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62486 +
62487 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62488 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62489
62490 @@ -2138,7 +2230,9 @@ void yield(void);
62491 extern struct exec_domain default_exec_domain;
62492
62493 union thread_union {
62494 +#ifndef CONFIG_X86
62495 struct thread_info thread_info;
62496 +#endif
62497 unsigned long stack[THREAD_SIZE/sizeof(long)];
62498 };
62499
62500 @@ -2171,6 +2265,7 @@ extern struct pid_namespace init_pid_ns;
62501 */
62502
62503 extern struct task_struct *find_task_by_vpid(pid_t nr);
62504 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62505 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62506 struct pid_namespace *ns);
62507
62508 @@ -2314,7 +2409,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62509 extern void exit_itimers(struct signal_struct *);
62510 extern void flush_itimer_signals(void);
62511
62512 -extern void do_group_exit(int);
62513 +extern __noreturn void do_group_exit(int);
62514
62515 extern void daemonize(const char *, ...);
62516 extern int allow_signal(int);
62517 @@ -2515,13 +2610,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62518
62519 #endif
62520
62521 -static inline int object_is_on_stack(void *obj)
62522 +static inline int object_starts_on_stack(void *obj)
62523 {
62524 - void *stack = task_stack_page(current);
62525 + const void *stack = task_stack_page(current);
62526
62527 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62528 }
62529
62530 +#ifdef CONFIG_PAX_USERCOPY
62531 +extern int object_is_on_stack(const void *obj, unsigned long len);
62532 +#endif
62533 +
62534 extern void thread_info_cache_init(void);
62535
62536 #ifdef CONFIG_DEBUG_STACK_USAGE
62537 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62538 index 899fbb4..1cb4138 100644
62539 --- a/include/linux/screen_info.h
62540 +++ b/include/linux/screen_info.h
62541 @@ -43,7 +43,8 @@ struct screen_info {
62542 __u16 pages; /* 0x32 */
62543 __u16 vesa_attributes; /* 0x34 */
62544 __u32 capabilities; /* 0x36 */
62545 - __u8 _reserved[6]; /* 0x3a */
62546 + __u16 vesapm_size; /* 0x3a */
62547 + __u8 _reserved[4]; /* 0x3c */
62548 } __attribute__((packed));
62549
62550 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62551 diff --git a/include/linux/security.h b/include/linux/security.h
62552 index 673afbb..2b7454b 100644
62553 --- a/include/linux/security.h
62554 +++ b/include/linux/security.h
62555 @@ -26,6 +26,7 @@
62556 #include <linux/capability.h>
62557 #include <linux/slab.h>
62558 #include <linux/err.h>
62559 +#include <linux/grsecurity.h>
62560
62561 struct linux_binprm;
62562 struct cred;
62563 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62564 index fc61854..d7c490b 100644
62565 --- a/include/linux/seq_file.h
62566 +++ b/include/linux/seq_file.h
62567 @@ -25,6 +25,9 @@ struct seq_file {
62568 struct mutex lock;
62569 const struct seq_operations *op;
62570 int poll_event;
62571 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62572 + u64 exec_id;
62573 +#endif
62574 void *private;
62575 };
62576
62577 @@ -34,6 +37,7 @@ struct seq_operations {
62578 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62579 int (*show) (struct seq_file *m, void *v);
62580 };
62581 +typedef struct seq_operations __no_const seq_operations_no_const;
62582
62583 #define SEQ_SKIP 1
62584
62585 diff --git a/include/linux/shm.h b/include/linux/shm.h
62586 index 92808b8..c28cac4 100644
62587 --- a/include/linux/shm.h
62588 +++ b/include/linux/shm.h
62589 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62590
62591 /* The task created the shm object. NULL if the task is dead. */
62592 struct task_struct *shm_creator;
62593 +#ifdef CONFIG_GRKERNSEC
62594 + time_t shm_createtime;
62595 + pid_t shm_lapid;
62596 +#endif
62597 };
62598
62599 /* shm_mode upper byte flags */
62600 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62601 index c168907..c7756db 100644
62602 --- a/include/linux/skbuff.h
62603 +++ b/include/linux/skbuff.h
62604 @@ -666,7 +666,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62605 */
62606 static inline int skb_queue_empty(const struct sk_buff_head *list)
62607 {
62608 - return list->next == (struct sk_buff *)list;
62609 + return list->next == (const struct sk_buff *)list;
62610 }
62611
62612 /**
62613 @@ -679,7 +679,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62614 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62615 const struct sk_buff *skb)
62616 {
62617 - return skb->next == (struct sk_buff *)list;
62618 + return skb->next == (const struct sk_buff *)list;
62619 }
62620
62621 /**
62622 @@ -692,7 +692,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62623 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62624 const struct sk_buff *skb)
62625 {
62626 - return skb->prev == (struct sk_buff *)list;
62627 + return skb->prev == (const struct sk_buff *)list;
62628 }
62629
62630 /**
62631 @@ -1587,7 +1587,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62632 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62633 */
62634 #ifndef NET_SKB_PAD
62635 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62636 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62637 #endif
62638
62639 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62640 diff --git a/include/linux/slab.h b/include/linux/slab.h
62641 index a595dce..c403597 100644
62642 --- a/include/linux/slab.h
62643 +++ b/include/linux/slab.h
62644 @@ -11,12 +11,20 @@
62645
62646 #include <linux/gfp.h>
62647 #include <linux/types.h>
62648 +#include <linux/err.h>
62649
62650 /*
62651 * Flags to pass to kmem_cache_create().
62652 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62653 */
62654 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62655 +
62656 +#ifdef CONFIG_PAX_USERCOPY
62657 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62658 +#else
62659 +#define SLAB_USERCOPY 0x00000000UL
62660 +#endif
62661 +
62662 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62663 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62664 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62665 @@ -87,10 +95,13 @@
62666 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62667 * Both make kfree a no-op.
62668 */
62669 -#define ZERO_SIZE_PTR ((void *)16)
62670 +#define ZERO_SIZE_PTR \
62671 +({ \
62672 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62673 + (void *)(-MAX_ERRNO-1L); \
62674 +})
62675
62676 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62677 - (unsigned long)ZERO_SIZE_PTR)
62678 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62679
62680 /*
62681 * struct kmem_cache related prototypes
62682 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62683 void kfree(const void *);
62684 void kzfree(const void *);
62685 size_t ksize(const void *);
62686 +void check_object_size(const void *ptr, unsigned long n, bool to);
62687
62688 /*
62689 * Allocator specific definitions. These are mainly used to establish optimized
62690 @@ -240,6 +252,7 @@ size_t ksize(const void *);
62691 * for general use, and so are not documented here. For a full list of
62692 * potential flags, always refer to linux/gfp.h.
62693 */
62694 +static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
62695 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
62696 {
62697 if (size != 0 && n > ULONG_MAX / size)
62698 @@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
62699 */
62700 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62701 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62702 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62703 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
62704 #define kmalloc_track_caller(size, flags) \
62705 __kmalloc_track_caller(size, flags, _RET_IP_)
62706 #else
62707 @@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62708 */
62709 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62710 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62711 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
62712 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
62713 #define kmalloc_node_track_caller(size, flags, node) \
62714 __kmalloc_node_track_caller(size, flags, node, \
62715 _RET_IP_)
62716 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62717 index fbd1117..d4d8ef8 100644
62718 --- a/include/linux/slab_def.h
62719 +++ b/include/linux/slab_def.h
62720 @@ -66,10 +66,10 @@ struct kmem_cache {
62721 unsigned long node_allocs;
62722 unsigned long node_frees;
62723 unsigned long node_overflow;
62724 - atomic_t allochit;
62725 - atomic_t allocmiss;
62726 - atomic_t freehit;
62727 - atomic_t freemiss;
62728 + atomic_unchecked_t allochit;
62729 + atomic_unchecked_t allocmiss;
62730 + atomic_unchecked_t freehit;
62731 + atomic_unchecked_t freemiss;
62732
62733 /*
62734 * If debugging is enabled, then the allocator can add additional
62735 @@ -107,7 +107,7 @@ struct cache_sizes {
62736 extern struct cache_sizes malloc_sizes[];
62737
62738 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62739 -void *__kmalloc(size_t size, gfp_t flags);
62740 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62741
62742 #ifdef CONFIG_TRACING
62743 extern void *kmem_cache_alloc_trace(size_t size,
62744 @@ -160,7 +160,7 @@ found:
62745 }
62746
62747 #ifdef CONFIG_NUMA
62748 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
62749 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62750 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62751
62752 #ifdef CONFIG_TRACING
62753 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
62754 index 0ec00b3..39cb7fc 100644
62755 --- a/include/linux/slob_def.h
62756 +++ b/include/linux/slob_def.h
62757 @@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
62758 return kmem_cache_alloc_node(cachep, flags, -1);
62759 }
62760
62761 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
62762 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62763
62764 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
62765 {
62766 @@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62767 return __kmalloc_node(size, flags, -1);
62768 }
62769
62770 +static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62771 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
62772 {
62773 return kmalloc(size, flags);
62774 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62775 index c2f8c8b..be9e036 100644
62776 --- a/include/linux/slub_def.h
62777 +++ b/include/linux/slub_def.h
62778 @@ -92,7 +92,7 @@ struct kmem_cache {
62779 struct kmem_cache_order_objects max;
62780 struct kmem_cache_order_objects min;
62781 gfp_t allocflags; /* gfp flags to use on each alloc */
62782 - int refcount; /* Refcount for slab cache destroy */
62783 + atomic_t refcount; /* Refcount for slab cache destroy */
62784 void (*ctor)(void *);
62785 int inuse; /* Offset to metadata */
62786 int align; /* Alignment */
62787 @@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
62788 * Sorry that the following has to be that ugly but some versions of GCC
62789 * have trouble with constant propagation and loops.
62790 */
62791 +static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
62792 static __always_inline int kmalloc_index(size_t size)
62793 {
62794 if (!size)
62795 @@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62796 }
62797
62798 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62799 -void *__kmalloc(size_t size, gfp_t flags);
62800 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
62801
62802 static __always_inline void *
62803 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62804 @@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
62805 }
62806 #endif
62807
62808 +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
62809 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
62810 {
62811 unsigned int order = get_order(size);
62812 @@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62813 }
62814
62815 #ifdef CONFIG_NUMA
62816 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
62817 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62818 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62819
62820 #ifdef CONFIG_TRACING
62821 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62822 index de8832d..0147b46 100644
62823 --- a/include/linux/sonet.h
62824 +++ b/include/linux/sonet.h
62825 @@ -61,7 +61,7 @@ struct sonet_stats {
62826 #include <linux/atomic.h>
62827
62828 struct k_sonet_stats {
62829 -#define __HANDLE_ITEM(i) atomic_t i
62830 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
62831 __SONET_ITEMS
62832 #undef __HANDLE_ITEM
62833 };
62834 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62835 index 523547e..2cb7140 100644
62836 --- a/include/linux/sunrpc/clnt.h
62837 +++ b/include/linux/sunrpc/clnt.h
62838 @@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62839 {
62840 switch (sap->sa_family) {
62841 case AF_INET:
62842 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
62843 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62844 case AF_INET6:
62845 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62846 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62847 }
62848 return 0;
62849 }
62850 @@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62851 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
62852 const struct sockaddr *src)
62853 {
62854 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
62855 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
62856 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
62857
62858 dsin->sin_family = ssin->sin_family;
62859 @@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
62860 if (sa->sa_family != AF_INET6)
62861 return 0;
62862
62863 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
62864 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
62865 }
62866
62867 #endif /* __KERNEL__ */
62868 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
62869 index dc0c3cc..8503fb6 100644
62870 --- a/include/linux/sunrpc/sched.h
62871 +++ b/include/linux/sunrpc/sched.h
62872 @@ -106,6 +106,7 @@ struct rpc_call_ops {
62873 void (*rpc_count_stats)(struct rpc_task *, void *);
62874 void (*rpc_release)(void *);
62875 };
62876 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
62877
62878 struct rpc_task_setup {
62879 struct rpc_task *task;
62880 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
62881 index 0b8e3e6..33e0a01 100644
62882 --- a/include/linux/sunrpc/svc_rdma.h
62883 +++ b/include/linux/sunrpc/svc_rdma.h
62884 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
62885 extern unsigned int svcrdma_max_requests;
62886 extern unsigned int svcrdma_max_req_size;
62887
62888 -extern atomic_t rdma_stat_recv;
62889 -extern atomic_t rdma_stat_read;
62890 -extern atomic_t rdma_stat_write;
62891 -extern atomic_t rdma_stat_sq_starve;
62892 -extern atomic_t rdma_stat_rq_starve;
62893 -extern atomic_t rdma_stat_rq_poll;
62894 -extern atomic_t rdma_stat_rq_prod;
62895 -extern atomic_t rdma_stat_sq_poll;
62896 -extern atomic_t rdma_stat_sq_prod;
62897 +extern atomic_unchecked_t rdma_stat_recv;
62898 +extern atomic_unchecked_t rdma_stat_read;
62899 +extern atomic_unchecked_t rdma_stat_write;
62900 +extern atomic_unchecked_t rdma_stat_sq_starve;
62901 +extern atomic_unchecked_t rdma_stat_rq_starve;
62902 +extern atomic_unchecked_t rdma_stat_rq_poll;
62903 +extern atomic_unchecked_t rdma_stat_rq_prod;
62904 +extern atomic_unchecked_t rdma_stat_sq_poll;
62905 +extern atomic_unchecked_t rdma_stat_sq_prod;
62906
62907 #define RPCRDMA_VERSION 1
62908
62909 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
62910 index c34b4c8..a65b67d 100644
62911 --- a/include/linux/sysctl.h
62912 +++ b/include/linux/sysctl.h
62913 @@ -155,7 +155,11 @@ enum
62914 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
62915 };
62916
62917 -
62918 +#ifdef CONFIG_PAX_SOFTMODE
62919 +enum {
62920 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
62921 +};
62922 +#endif
62923
62924 /* CTL_VM names: */
62925 enum
62926 @@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
62927
62928 extern int proc_dostring(struct ctl_table *, int,
62929 void __user *, size_t *, loff_t *);
62930 +extern int proc_dostring_modpriv(struct ctl_table *, int,
62931 + void __user *, size_t *, loff_t *);
62932 extern int proc_dointvec(struct ctl_table *, int,
62933 void __user *, size_t *, loff_t *);
62934 extern int proc_dointvec_minmax(struct ctl_table *, int,
62935 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
62936 index ff7dc08..893e1bd 100644
62937 --- a/include/linux/tty_ldisc.h
62938 +++ b/include/linux/tty_ldisc.h
62939 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
62940
62941 struct module *owner;
62942
62943 - int refcount;
62944 + atomic_t refcount;
62945 };
62946
62947 struct tty_ldisc {
62948 diff --git a/include/linux/types.h b/include/linux/types.h
62949 index 7f480db..175c256 100644
62950 --- a/include/linux/types.h
62951 +++ b/include/linux/types.h
62952 @@ -220,10 +220,26 @@ typedef struct {
62953 int counter;
62954 } atomic_t;
62955
62956 +#ifdef CONFIG_PAX_REFCOUNT
62957 +typedef struct {
62958 + int counter;
62959 +} atomic_unchecked_t;
62960 +#else
62961 +typedef atomic_t atomic_unchecked_t;
62962 +#endif
62963 +
62964 #ifdef CONFIG_64BIT
62965 typedef struct {
62966 long counter;
62967 } atomic64_t;
62968 +
62969 +#ifdef CONFIG_PAX_REFCOUNT
62970 +typedef struct {
62971 + long counter;
62972 +} atomic64_unchecked_t;
62973 +#else
62974 +typedef atomic64_t atomic64_unchecked_t;
62975 +#endif
62976 #endif
62977
62978 struct list_head {
62979 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
62980 index 5ca0951..ab496a5 100644
62981 --- a/include/linux/uaccess.h
62982 +++ b/include/linux/uaccess.h
62983 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
62984 long ret; \
62985 mm_segment_t old_fs = get_fs(); \
62986 \
62987 - set_fs(KERNEL_DS); \
62988 pagefault_disable(); \
62989 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
62990 - pagefault_enable(); \
62991 + set_fs(KERNEL_DS); \
62992 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
62993 set_fs(old_fs); \
62994 + pagefault_enable(); \
62995 ret; \
62996 })
62997
62998 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
62999 index 99c1b4d..bb94261 100644
63000 --- a/include/linux/unaligned/access_ok.h
63001 +++ b/include/linux/unaligned/access_ok.h
63002 @@ -6,32 +6,32 @@
63003
63004 static inline u16 get_unaligned_le16(const void *p)
63005 {
63006 - return le16_to_cpup((__le16 *)p);
63007 + return le16_to_cpup((const __le16 *)p);
63008 }
63009
63010 static inline u32 get_unaligned_le32(const void *p)
63011 {
63012 - return le32_to_cpup((__le32 *)p);
63013 + return le32_to_cpup((const __le32 *)p);
63014 }
63015
63016 static inline u64 get_unaligned_le64(const void *p)
63017 {
63018 - return le64_to_cpup((__le64 *)p);
63019 + return le64_to_cpup((const __le64 *)p);
63020 }
63021
63022 static inline u16 get_unaligned_be16(const void *p)
63023 {
63024 - return be16_to_cpup((__be16 *)p);
63025 + return be16_to_cpup((const __be16 *)p);
63026 }
63027
63028 static inline u32 get_unaligned_be32(const void *p)
63029 {
63030 - return be32_to_cpup((__be32 *)p);
63031 + return be32_to_cpup((const __be32 *)p);
63032 }
63033
63034 static inline u64 get_unaligned_be64(const void *p)
63035 {
63036 - return be64_to_cpup((__be64 *)p);
63037 + return be64_to_cpup((const __be64 *)p);
63038 }
63039
63040 static inline void put_unaligned_le16(u16 val, void *p)
63041 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63042 index 547e59c..db6ad19 100644
63043 --- a/include/linux/usb/renesas_usbhs.h
63044 +++ b/include/linux/usb/renesas_usbhs.h
63045 @@ -39,7 +39,7 @@ enum {
63046 */
63047 struct renesas_usbhs_driver_callback {
63048 int (*notify_hotplug)(struct platform_device *pdev);
63049 -};
63050 +} __no_const;
63051
63052 /*
63053 * callback functions for platform
63054 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63055 * VBUS control is needed for Host
63056 */
63057 int (*set_vbus)(struct platform_device *pdev, int enable);
63058 -};
63059 +} __no_const;
63060
63061 /*
63062 * parameters for renesas usbhs
63063 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63064 index 6f8fbcf..8259001 100644
63065 --- a/include/linux/vermagic.h
63066 +++ b/include/linux/vermagic.h
63067 @@ -25,9 +25,35 @@
63068 #define MODULE_ARCH_VERMAGIC ""
63069 #endif
63070
63071 +#ifdef CONFIG_PAX_REFCOUNT
63072 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63073 +#else
63074 +#define MODULE_PAX_REFCOUNT ""
63075 +#endif
63076 +
63077 +#ifdef CONSTIFY_PLUGIN
63078 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63079 +#else
63080 +#define MODULE_CONSTIFY_PLUGIN ""
63081 +#endif
63082 +
63083 +#ifdef STACKLEAK_PLUGIN
63084 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63085 +#else
63086 +#define MODULE_STACKLEAK_PLUGIN ""
63087 +#endif
63088 +
63089 +#ifdef CONFIG_GRKERNSEC
63090 +#define MODULE_GRSEC "GRSEC "
63091 +#else
63092 +#define MODULE_GRSEC ""
63093 +#endif
63094 +
63095 #define VERMAGIC_STRING \
63096 UTS_RELEASE " " \
63097 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63098 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63099 - MODULE_ARCH_VERMAGIC
63100 + MODULE_ARCH_VERMAGIC \
63101 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63102 + MODULE_GRSEC
63103
63104 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63105 index dcdfc2b..ec79ab5 100644
63106 --- a/include/linux/vmalloc.h
63107 +++ b/include/linux/vmalloc.h
63108 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63109 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63110 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63111 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63112 +
63113 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63114 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63115 +#endif
63116 +
63117 /* bits [20..32] reserved for arch specific ioremap internals */
63118
63119 /*
63120 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63121 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63122 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63123 unsigned long start, unsigned long end, gfp_t gfp_mask,
63124 - pgprot_t prot, int node, void *caller);
63125 + pgprot_t prot, int node, void *caller) __size_overflow(1);
63126 extern void vfree(const void *addr);
63127
63128 extern void *vmap(struct page **pages, unsigned int count,
63129 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63130 extern void free_vm_area(struct vm_struct *area);
63131
63132 /* for /dev/kmem */
63133 -extern long vread(char *buf, char *addr, unsigned long count);
63134 -extern long vwrite(char *buf, char *addr, unsigned long count);
63135 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63136 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63137
63138 /*
63139 * Internals. Dont't use..
63140 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63141 index 65efb92..137adbb 100644
63142 --- a/include/linux/vmstat.h
63143 +++ b/include/linux/vmstat.h
63144 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63145 /*
63146 * Zone based page accounting with per cpu differentials.
63147 */
63148 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63149 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63150
63151 static inline void zone_page_state_add(long x, struct zone *zone,
63152 enum zone_stat_item item)
63153 {
63154 - atomic_long_add(x, &zone->vm_stat[item]);
63155 - atomic_long_add(x, &vm_stat[item]);
63156 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63157 + atomic_long_add_unchecked(x, &vm_stat[item]);
63158 }
63159
63160 static inline unsigned long global_page_state(enum zone_stat_item item)
63161 {
63162 - long x = atomic_long_read(&vm_stat[item]);
63163 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63164 #ifdef CONFIG_SMP
63165 if (x < 0)
63166 x = 0;
63167 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63168 static inline unsigned long zone_page_state(struct zone *zone,
63169 enum zone_stat_item item)
63170 {
63171 - long x = atomic_long_read(&zone->vm_stat[item]);
63172 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63173 #ifdef CONFIG_SMP
63174 if (x < 0)
63175 x = 0;
63176 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63177 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63178 enum zone_stat_item item)
63179 {
63180 - long x = atomic_long_read(&zone->vm_stat[item]);
63181 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63182
63183 #ifdef CONFIG_SMP
63184 int cpu;
63185 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63186
63187 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63188 {
63189 - atomic_long_inc(&zone->vm_stat[item]);
63190 - atomic_long_inc(&vm_stat[item]);
63191 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63192 + atomic_long_inc_unchecked(&vm_stat[item]);
63193 }
63194
63195 static inline void __inc_zone_page_state(struct page *page,
63196 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63197
63198 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63199 {
63200 - atomic_long_dec(&zone->vm_stat[item]);
63201 - atomic_long_dec(&vm_stat[item]);
63202 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63203 + atomic_long_dec_unchecked(&vm_stat[item]);
63204 }
63205
63206 static inline void __dec_zone_page_state(struct page *page,
63207 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63208 index e5d1220..ef6e406 100644
63209 --- a/include/linux/xattr.h
63210 +++ b/include/linux/xattr.h
63211 @@ -57,6 +57,11 @@
63212 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63213 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63214
63215 +/* User namespace */
63216 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63217 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
63218 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63219 +
63220 #ifdef __KERNEL__
63221
63222 #include <linux/types.h>
63223 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63224 index 4aeff96..b378cdc 100644
63225 --- a/include/media/saa7146_vv.h
63226 +++ b/include/media/saa7146_vv.h
63227 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63228 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63229
63230 /* the extension can override this */
63231 - struct v4l2_ioctl_ops ops;
63232 + v4l2_ioctl_ops_no_const ops;
63233 /* pointer to the saa7146 core ops */
63234 const struct v4l2_ioctl_ops *core_ops;
63235
63236 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63237 index 96d2221..2292f89 100644
63238 --- a/include/media/v4l2-dev.h
63239 +++ b/include/media/v4l2-dev.h
63240 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63241
63242
63243 struct v4l2_file_operations {
63244 - struct module *owner;
63245 + struct module * const owner;
63246 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63247 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63248 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63249 @@ -71,6 +71,7 @@ struct v4l2_file_operations {
63250 int (*open) (struct file *);
63251 int (*release) (struct file *);
63252 };
63253 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63254
63255 /*
63256 * Newer version of video_device, handled by videodev2.c
63257 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63258 index 3cb939c..f23c6bb 100644
63259 --- a/include/media/v4l2-ioctl.h
63260 +++ b/include/media/v4l2-ioctl.h
63261 @@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
63262 long (*vidioc_default) (struct file *file, void *fh,
63263 bool valid_prio, int cmd, void *arg);
63264 };
63265 -
63266 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63267
63268 /* v4l debugging and diagnostics */
63269
63270 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63271 index 6db8ecf..8c23861 100644
63272 --- a/include/net/caif/caif_hsi.h
63273 +++ b/include/net/caif/caif_hsi.h
63274 @@ -98,7 +98,7 @@ struct cfhsi_drv {
63275 void (*rx_done_cb) (struct cfhsi_drv *drv);
63276 void (*wake_up_cb) (struct cfhsi_drv *drv);
63277 void (*wake_down_cb) (struct cfhsi_drv *drv);
63278 -};
63279 +} __no_const;
63280
63281 /* Structure implemented by HSI device. */
63282 struct cfhsi_dev {
63283 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63284 index 9e5425b..8136ffc 100644
63285 --- a/include/net/caif/cfctrl.h
63286 +++ b/include/net/caif/cfctrl.h
63287 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
63288 void (*radioset_rsp)(void);
63289 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63290 struct cflayer *client_layer);
63291 -};
63292 +} __no_const;
63293
63294 /* Link Setup Parameters for CAIF-Links. */
63295 struct cfctrl_link_param {
63296 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
63297 struct cfctrl {
63298 struct cfsrvl serv;
63299 struct cfctrl_rsp res;
63300 - atomic_t req_seq_no;
63301 - atomic_t rsp_seq_no;
63302 + atomic_unchecked_t req_seq_no;
63303 + atomic_unchecked_t rsp_seq_no;
63304 struct list_head list;
63305 /* Protects from simultaneous access to first_req list */
63306 spinlock_t info_list_lock;
63307 diff --git a/include/net/flow.h b/include/net/flow.h
63308 index 6c469db..7743b8e 100644
63309 --- a/include/net/flow.h
63310 +++ b/include/net/flow.h
63311 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63312
63313 extern void flow_cache_flush(void);
63314 extern void flow_cache_flush_deferred(void);
63315 -extern atomic_t flow_cache_genid;
63316 +extern atomic_unchecked_t flow_cache_genid;
63317
63318 #endif
63319 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63320 index b94765e..053f68b 100644
63321 --- a/include/net/inetpeer.h
63322 +++ b/include/net/inetpeer.h
63323 @@ -48,8 +48,8 @@ struct inet_peer {
63324 */
63325 union {
63326 struct {
63327 - atomic_t rid; /* Frag reception counter */
63328 - atomic_t ip_id_count; /* IP ID for the next packet */
63329 + atomic_unchecked_t rid; /* Frag reception counter */
63330 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63331 __u32 tcp_ts;
63332 __u32 tcp_ts_stamp;
63333 };
63334 @@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63335 more++;
63336 inet_peer_refcheck(p);
63337 do {
63338 - old = atomic_read(&p->ip_id_count);
63339 + old = atomic_read_unchecked(&p->ip_id_count);
63340 new = old + more;
63341 if (!new)
63342 new = 1;
63343 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63344 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63345 return new;
63346 }
63347
63348 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63349 index 10422ef..662570f 100644
63350 --- a/include/net/ip_fib.h
63351 +++ b/include/net/ip_fib.h
63352 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63353
63354 #define FIB_RES_SADDR(net, res) \
63355 ((FIB_RES_NH(res).nh_saddr_genid == \
63356 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63357 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63358 FIB_RES_NH(res).nh_saddr : \
63359 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63360 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63361 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63362 index 72522f0..6f03a2b 100644
63363 --- a/include/net/ip_vs.h
63364 +++ b/include/net/ip_vs.h
63365 @@ -510,7 +510,7 @@ struct ip_vs_conn {
63366 struct ip_vs_conn *control; /* Master control connection */
63367 atomic_t n_control; /* Number of controlled ones */
63368 struct ip_vs_dest *dest; /* real server */
63369 - atomic_t in_pkts; /* incoming packet counter */
63370 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63371
63372 /* packet transmitter for different forwarding methods. If it
63373 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63374 @@ -648,7 +648,7 @@ struct ip_vs_dest {
63375 __be16 port; /* port number of the server */
63376 union nf_inet_addr addr; /* IP address of the server */
63377 volatile unsigned flags; /* dest status flags */
63378 - atomic_t conn_flags; /* flags to copy to conn */
63379 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63380 atomic_t weight; /* server weight */
63381
63382 atomic_t refcnt; /* reference counter */
63383 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63384 index 69b610a..fe3962c 100644
63385 --- a/include/net/irda/ircomm_core.h
63386 +++ b/include/net/irda/ircomm_core.h
63387 @@ -51,7 +51,7 @@ typedef struct {
63388 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63389 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63390 struct ircomm_info *);
63391 -} call_t;
63392 +} __no_const call_t;
63393
63394 struct ircomm_cb {
63395 irda_queue_t queue;
63396 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63397 index 59ba38bc..d515662 100644
63398 --- a/include/net/irda/ircomm_tty.h
63399 +++ b/include/net/irda/ircomm_tty.h
63400 @@ -35,6 +35,7 @@
63401 #include <linux/termios.h>
63402 #include <linux/timer.h>
63403 #include <linux/tty.h> /* struct tty_struct */
63404 +#include <asm/local.h>
63405
63406 #include <net/irda/irias_object.h>
63407 #include <net/irda/ircomm_core.h>
63408 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63409 unsigned short close_delay;
63410 unsigned short closing_wait; /* time to wait before closing */
63411
63412 - int open_count;
63413 - int blocked_open; /* # of blocked opens */
63414 + local_t open_count;
63415 + local_t blocked_open; /* # of blocked opens */
63416
63417 /* Protect concurent access to :
63418 * o self->open_count
63419 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63420 index cc7c197..9f2da2a 100644
63421 --- a/include/net/iucv/af_iucv.h
63422 +++ b/include/net/iucv/af_iucv.h
63423 @@ -141,7 +141,7 @@ struct iucv_sock {
63424 struct iucv_sock_list {
63425 struct hlist_head head;
63426 rwlock_t lock;
63427 - atomic_t autobind_name;
63428 + atomic_unchecked_t autobind_name;
63429 };
63430
63431 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63432 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63433 index 34c996f..bb3b4d4 100644
63434 --- a/include/net/neighbour.h
63435 +++ b/include/net/neighbour.h
63436 @@ -123,7 +123,7 @@ struct neigh_ops {
63437 void (*error_report)(struct neighbour *, struct sk_buff *);
63438 int (*output)(struct neighbour *, struct sk_buff *);
63439 int (*connected_output)(struct neighbour *, struct sk_buff *);
63440 -};
63441 +} __do_const;
63442
63443 struct pneigh_entry {
63444 struct pneigh_entry *next;
63445 diff --git a/include/net/netlink.h b/include/net/netlink.h
63446 index f394fe5..fd073f9 100644
63447 --- a/include/net/netlink.h
63448 +++ b/include/net/netlink.h
63449 @@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63450 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63451 {
63452 if (mark)
63453 - skb_trim(skb, (unsigned char *) mark - skb->data);
63454 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63455 }
63456
63457 /**
63458 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63459 index bbd023a..97c6d0d 100644
63460 --- a/include/net/netns/ipv4.h
63461 +++ b/include/net/netns/ipv4.h
63462 @@ -57,8 +57,8 @@ struct netns_ipv4 {
63463 unsigned int sysctl_ping_group_range[2];
63464 long sysctl_tcp_mem[3];
63465
63466 - atomic_t rt_genid;
63467 - atomic_t dev_addr_genid;
63468 + atomic_unchecked_t rt_genid;
63469 + atomic_unchecked_t dev_addr_genid;
63470
63471 #ifdef CONFIG_IP_MROUTE
63472 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63473 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63474 index a2ef814..31a8e3f 100644
63475 --- a/include/net/sctp/sctp.h
63476 +++ b/include/net/sctp/sctp.h
63477 @@ -318,9 +318,9 @@ do { \
63478
63479 #else /* SCTP_DEBUG */
63480
63481 -#define SCTP_DEBUG_PRINTK(whatever...)
63482 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63483 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63484 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63485 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63486 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63487 #define SCTP_ENABLE_DEBUG
63488 #define SCTP_DISABLE_DEBUG
63489 #define SCTP_ASSERT(expr, str, func)
63490 diff --git a/include/net/sock.h b/include/net/sock.h
63491 index 5a0a58a..2e3d4d0 100644
63492 --- a/include/net/sock.h
63493 +++ b/include/net/sock.h
63494 @@ -302,7 +302,7 @@ struct sock {
63495 #ifdef CONFIG_RPS
63496 __u32 sk_rxhash;
63497 #endif
63498 - atomic_t sk_drops;
63499 + atomic_unchecked_t sk_drops;
63500 int sk_rcvbuf;
63501
63502 struct sk_filter __rcu *sk_filter;
63503 @@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
63504 }
63505
63506 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63507 - char __user *from, char *to,
63508 + char __user *from, unsigned char *to,
63509 int copy, int offset)
63510 {
63511 if (skb->ip_summed == CHECKSUM_NONE) {
63512 diff --git a/include/net/tcp.h b/include/net/tcp.h
63513 index f75a04d..702cf06 100644
63514 --- a/include/net/tcp.h
63515 +++ b/include/net/tcp.h
63516 @@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
63517 char *name;
63518 sa_family_t family;
63519 const struct file_operations *seq_fops;
63520 - struct seq_operations seq_ops;
63521 + seq_operations_no_const seq_ops;
63522 };
63523
63524 struct tcp_iter_state {
63525 diff --git a/include/net/udp.h b/include/net/udp.h
63526 index 5d606d9..e879f7b 100644
63527 --- a/include/net/udp.h
63528 +++ b/include/net/udp.h
63529 @@ -244,7 +244,7 @@ struct udp_seq_afinfo {
63530 sa_family_t family;
63531 struct udp_table *udp_table;
63532 const struct file_operations *seq_fops;
63533 - struct seq_operations seq_ops;
63534 + seq_operations_no_const seq_ops;
63535 };
63536
63537 struct udp_iter_state {
63538 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63539 index 96239e7..c85b032 100644
63540 --- a/include/net/xfrm.h
63541 +++ b/include/net/xfrm.h
63542 @@ -505,7 +505,7 @@ struct xfrm_policy {
63543 struct timer_list timer;
63544
63545 struct flow_cache_object flo;
63546 - atomic_t genid;
63547 + atomic_unchecked_t genid;
63548 u32 priority;
63549 u32 index;
63550 struct xfrm_mark mark;
63551 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63552 index 1a046b1..ee0bef0 100644
63553 --- a/include/rdma/iw_cm.h
63554 +++ b/include/rdma/iw_cm.h
63555 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
63556 int backlog);
63557
63558 int (*destroy_listen)(struct iw_cm_id *cm_id);
63559 -};
63560 +} __no_const;
63561
63562 /**
63563 * iw_create_cm_id - Create an IW CM identifier.
63564 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63565 index 8f9dfba..610ab6c 100644
63566 --- a/include/scsi/libfc.h
63567 +++ b/include/scsi/libfc.h
63568 @@ -756,6 +756,7 @@ struct libfc_function_template {
63569 */
63570 void (*disc_stop_final) (struct fc_lport *);
63571 };
63572 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63573
63574 /**
63575 * struct fc_disc - Discovery context
63576 @@ -861,7 +862,7 @@ struct fc_lport {
63577 struct fc_vport *vport;
63578
63579 /* Operational Information */
63580 - struct libfc_function_template tt;
63581 + libfc_function_template_no_const tt;
63582 u8 link_up;
63583 u8 qfull;
63584 enum fc_lport_state state;
63585 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63586 index 6efb2e1..cdad57f 100644
63587 --- a/include/scsi/scsi_device.h
63588 +++ b/include/scsi/scsi_device.h
63589 @@ -162,9 +162,9 @@ struct scsi_device {
63590 unsigned int max_device_blocked; /* what device_blocked counts down from */
63591 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63592
63593 - atomic_t iorequest_cnt;
63594 - atomic_t iodone_cnt;
63595 - atomic_t ioerr_cnt;
63596 + atomic_unchecked_t iorequest_cnt;
63597 + atomic_unchecked_t iodone_cnt;
63598 + atomic_unchecked_t ioerr_cnt;
63599
63600 struct device sdev_gendev,
63601 sdev_dev;
63602 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63603 index 719faf1..d1154d4 100644
63604 --- a/include/scsi/scsi_transport_fc.h
63605 +++ b/include/scsi/scsi_transport_fc.h
63606 @@ -739,7 +739,7 @@ struct fc_function_template {
63607 unsigned long show_host_system_hostname:1;
63608
63609 unsigned long disable_target_scan:1;
63610 -};
63611 +} __do_const;
63612
63613
63614 /**
63615 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63616 index 030b87c..98a6954 100644
63617 --- a/include/sound/ak4xxx-adda.h
63618 +++ b/include/sound/ak4xxx-adda.h
63619 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63620 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63621 unsigned char val);
63622 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63623 -};
63624 +} __no_const;
63625
63626 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63627
63628 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63629 index 8c05e47..2b5df97 100644
63630 --- a/include/sound/hwdep.h
63631 +++ b/include/sound/hwdep.h
63632 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63633 struct snd_hwdep_dsp_status *status);
63634 int (*dsp_load)(struct snd_hwdep *hw,
63635 struct snd_hwdep_dsp_image *image);
63636 -};
63637 +} __no_const;
63638
63639 struct snd_hwdep {
63640 struct snd_card *card;
63641 diff --git a/include/sound/info.h b/include/sound/info.h
63642 index 9ca1a49..aba1728 100644
63643 --- a/include/sound/info.h
63644 +++ b/include/sound/info.h
63645 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
63646 struct snd_info_buffer *buffer);
63647 void (*write)(struct snd_info_entry *entry,
63648 struct snd_info_buffer *buffer);
63649 -};
63650 +} __no_const;
63651
63652 struct snd_info_entry_ops {
63653 int (*open)(struct snd_info_entry *entry,
63654 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63655 index 0d11128..814178e 100644
63656 --- a/include/sound/pcm.h
63657 +++ b/include/sound/pcm.h
63658 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
63659 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63660 int (*ack)(struct snd_pcm_substream *substream);
63661 };
63662 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63663
63664 /*
63665 *
63666 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63667 index af1b49e..a5d55a5 100644
63668 --- a/include/sound/sb16_csp.h
63669 +++ b/include/sound/sb16_csp.h
63670 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63671 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63672 int (*csp_stop) (struct snd_sb_csp * p);
63673 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63674 -};
63675 +} __no_const;
63676
63677 /*
63678 * CSP private data
63679 diff --git a/include/sound/soc.h b/include/sound/soc.h
63680 index 2ebf787..0276839 100644
63681 --- a/include/sound/soc.h
63682 +++ b/include/sound/soc.h
63683 @@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
63684 /* platform IO - used for platform DAPM */
63685 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63686 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63687 -};
63688 +} __do_const;
63689
63690 struct snd_soc_platform {
63691 const char *name;
63692 @@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
63693 struct snd_soc_dai_link *dai_link;
63694 struct mutex pcm_mutex;
63695 enum snd_soc_pcm_subclass pcm_subclass;
63696 - struct snd_pcm_ops ops;
63697 + snd_pcm_ops_no_const ops;
63698
63699 unsigned int complete:1;
63700 unsigned int dev_registered:1;
63701 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63702 index 4119966..1a4671c 100644
63703 --- a/include/sound/ymfpci.h
63704 +++ b/include/sound/ymfpci.h
63705 @@ -358,7 +358,7 @@ struct snd_ymfpci {
63706 spinlock_t reg_lock;
63707 spinlock_t voice_lock;
63708 wait_queue_head_t interrupt_sleep;
63709 - atomic_t interrupt_sleep_count;
63710 + atomic_unchecked_t interrupt_sleep_count;
63711 struct snd_info_entry *proc_entry;
63712 const struct firmware *dsp_microcode;
63713 const struct firmware *controller_microcode;
63714 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63715 index aaccc5f..092d568 100644
63716 --- a/include/target/target_core_base.h
63717 +++ b/include/target/target_core_base.h
63718 @@ -447,7 +447,7 @@ struct t10_reservation_ops {
63719 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63720 int (*t10_pr_register)(struct se_cmd *);
63721 int (*t10_pr_clear)(struct se_cmd *);
63722 -};
63723 +} __no_const;
63724
63725 struct t10_reservation {
63726 /* Reservation effects all target ports */
63727 @@ -576,7 +576,7 @@ struct se_cmd {
63728 atomic_t t_se_count;
63729 atomic_t t_task_cdbs_left;
63730 atomic_t t_task_cdbs_ex_left;
63731 - atomic_t t_task_cdbs_sent;
63732 + atomic_unchecked_t t_task_cdbs_sent;
63733 unsigned int transport_state;
63734 #define CMD_T_ABORTED (1 << 0)
63735 #define CMD_T_ACTIVE (1 << 1)
63736 @@ -802,7 +802,7 @@ struct se_device {
63737 spinlock_t stats_lock;
63738 /* Active commands on this virtual SE device */
63739 atomic_t simple_cmds;
63740 - atomic_t dev_ordered_id;
63741 + atomic_unchecked_t dev_ordered_id;
63742 atomic_t execute_tasks;
63743 atomic_t dev_ordered_sync;
63744 atomic_t dev_qf_count;
63745 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
63746 new file mode 100644
63747 index 0000000..2efe49d
63748 --- /dev/null
63749 +++ b/include/trace/events/fs.h
63750 @@ -0,0 +1,53 @@
63751 +#undef TRACE_SYSTEM
63752 +#define TRACE_SYSTEM fs
63753 +
63754 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
63755 +#define _TRACE_FS_H
63756 +
63757 +#include <linux/fs.h>
63758 +#include <linux/tracepoint.h>
63759 +
63760 +TRACE_EVENT(do_sys_open,
63761 +
63762 + TP_PROTO(char *filename, int flags, int mode),
63763 +
63764 + TP_ARGS(filename, flags, mode),
63765 +
63766 + TP_STRUCT__entry(
63767 + __string( filename, filename )
63768 + __field( int, flags )
63769 + __field( int, mode )
63770 + ),
63771 +
63772 + TP_fast_assign(
63773 + __assign_str(filename, filename);
63774 + __entry->flags = flags;
63775 + __entry->mode = mode;
63776 + ),
63777 +
63778 + TP_printk("\"%s\" %x %o",
63779 + __get_str(filename), __entry->flags, __entry->mode)
63780 +);
63781 +
63782 +TRACE_EVENT(open_exec,
63783 +
63784 + TP_PROTO(const char *filename),
63785 +
63786 + TP_ARGS(filename),
63787 +
63788 + TP_STRUCT__entry(
63789 + __string( filename, filename )
63790 + ),
63791 +
63792 + TP_fast_assign(
63793 + __assign_str(filename, filename);
63794 + ),
63795 +
63796 + TP_printk("\"%s\"",
63797 + __get_str(filename))
63798 +);
63799 +
63800 +#endif /* _TRACE_FS_H */
63801 +
63802 +/* This part must be outside protection */
63803 +#include <trace/define_trace.h>
63804 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63805 index 1c09820..7f5ec79 100644
63806 --- a/include/trace/events/irq.h
63807 +++ b/include/trace/events/irq.h
63808 @@ -36,7 +36,7 @@ struct softirq_action;
63809 */
63810 TRACE_EVENT(irq_handler_entry,
63811
63812 - TP_PROTO(int irq, struct irqaction *action),
63813 + TP_PROTO(int irq, const struct irqaction *action),
63814
63815 TP_ARGS(irq, action),
63816
63817 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63818 */
63819 TRACE_EVENT(irq_handler_exit,
63820
63821 - TP_PROTO(int irq, struct irqaction *action, int ret),
63822 + TP_PROTO(int irq, const struct irqaction *action, int ret),
63823
63824 TP_ARGS(irq, action, ret),
63825
63826 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63827 index f9466fa..f4e2b81 100644
63828 --- a/include/video/udlfb.h
63829 +++ b/include/video/udlfb.h
63830 @@ -53,10 +53,10 @@ struct dlfb_data {
63831 u32 pseudo_palette[256];
63832 int blank_mode; /*one of FB_BLANK_ */
63833 /* blit-only rendering path metrics, exposed through sysfs */
63834 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63835 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63836 - atomic_t bytes_sent; /* to usb, after compression including overhead */
63837 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63838 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63839 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63840 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63841 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63842 };
63843
63844 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63845 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63846 index 0993a22..32ba2fe 100644
63847 --- a/include/video/uvesafb.h
63848 +++ b/include/video/uvesafb.h
63849 @@ -177,6 +177,7 @@ struct uvesafb_par {
63850 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63851 u8 pmi_setpal; /* PMI for palette changes */
63852 u16 *pmi_base; /* protected mode interface location */
63853 + u8 *pmi_code; /* protected mode code location */
63854 void *pmi_start;
63855 void *pmi_pal;
63856 u8 *vbe_state_orig; /*
63857 diff --git a/init/Kconfig b/init/Kconfig
63858 index 6cfd71d..73cb68d 100644
63859 --- a/init/Kconfig
63860 +++ b/init/Kconfig
63861 @@ -790,6 +790,7 @@ endif # CGROUPS
63862
63863 config CHECKPOINT_RESTORE
63864 bool "Checkpoint/restore support" if EXPERT
63865 + depends on !GRKERNSEC
63866 default n
63867 help
63868 Enables additional kernel features in a sake of checkpoint/restore.
63869 @@ -1240,7 +1241,7 @@ config SLUB_DEBUG
63870
63871 config COMPAT_BRK
63872 bool "Disable heap randomization"
63873 - default y
63874 + default n
63875 help
63876 Randomizing heap placement makes heap exploits harder, but it
63877 also breaks ancient binaries (including anything libc5 based).
63878 diff --git a/init/do_mounts.c b/init/do_mounts.c
63879 index 42b0707..c06eef4 100644
63880 --- a/init/do_mounts.c
63881 +++ b/init/do_mounts.c
63882 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
63883 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
63884 {
63885 struct super_block *s;
63886 - int err = sys_mount(name, "/root", fs, flags, data);
63887 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
63888 if (err)
63889 return err;
63890
63891 - sys_chdir((const char __user __force *)"/root");
63892 + sys_chdir((const char __force_user *)"/root");
63893 s = current->fs->pwd.dentry->d_sb;
63894 ROOT_DEV = s->s_dev;
63895 printk(KERN_INFO
63896 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
63897 va_start(args, fmt);
63898 vsprintf(buf, fmt, args);
63899 va_end(args);
63900 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
63901 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
63902 if (fd >= 0) {
63903 sys_ioctl(fd, FDEJECT, 0);
63904 sys_close(fd);
63905 }
63906 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
63907 - fd = sys_open("/dev/console", O_RDWR, 0);
63908 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
63909 if (fd >= 0) {
63910 sys_ioctl(fd, TCGETS, (long)&termios);
63911 termios.c_lflag &= ~ICANON;
63912 sys_ioctl(fd, TCSETSF, (long)&termios);
63913 - sys_read(fd, &c, 1);
63914 + sys_read(fd, (char __user *)&c, 1);
63915 termios.c_lflag |= ICANON;
63916 sys_ioctl(fd, TCSETSF, (long)&termios);
63917 sys_close(fd);
63918 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
63919 mount_root();
63920 out:
63921 devtmpfs_mount("dev");
63922 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63923 - sys_chroot((const char __user __force *)".");
63924 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63925 + sys_chroot((const char __force_user *)".");
63926 }
63927 diff --git a/init/do_mounts.h b/init/do_mounts.h
63928 index f5b978a..69dbfe8 100644
63929 --- a/init/do_mounts.h
63930 +++ b/init/do_mounts.h
63931 @@ -15,15 +15,15 @@ extern int root_mountflags;
63932
63933 static inline int create_dev(char *name, dev_t dev)
63934 {
63935 - sys_unlink(name);
63936 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
63937 + sys_unlink((char __force_user *)name);
63938 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
63939 }
63940
63941 #if BITS_PER_LONG == 32
63942 static inline u32 bstat(char *name)
63943 {
63944 struct stat64 stat;
63945 - if (sys_stat64(name, &stat) != 0)
63946 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
63947 return 0;
63948 if (!S_ISBLK(stat.st_mode))
63949 return 0;
63950 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
63951 static inline u32 bstat(char *name)
63952 {
63953 struct stat stat;
63954 - if (sys_newstat(name, &stat) != 0)
63955 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
63956 return 0;
63957 if (!S_ISBLK(stat.st_mode))
63958 return 0;
63959 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
63960 index 9047330..de0d1fb 100644
63961 --- a/init/do_mounts_initrd.c
63962 +++ b/init/do_mounts_initrd.c
63963 @@ -43,13 +43,13 @@ static void __init handle_initrd(void)
63964 create_dev("/dev/root.old", Root_RAM0);
63965 /* mount initrd on rootfs' /root */
63966 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
63967 - sys_mkdir("/old", 0700);
63968 - root_fd = sys_open("/", 0, 0);
63969 - old_fd = sys_open("/old", 0, 0);
63970 + sys_mkdir((const char __force_user *)"/old", 0700);
63971 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
63972 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
63973 /* move initrd over / and chdir/chroot in initrd root */
63974 - sys_chdir("/root");
63975 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63976 - sys_chroot(".");
63977 + sys_chdir((const char __force_user *)"/root");
63978 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63979 + sys_chroot((const char __force_user *)".");
63980
63981 /*
63982 * In case that a resume from disk is carried out by linuxrc or one of
63983 @@ -66,15 +66,15 @@ static void __init handle_initrd(void)
63984
63985 /* move initrd to rootfs' /old */
63986 sys_fchdir(old_fd);
63987 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
63988 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
63989 /* switch root and cwd back to / of rootfs */
63990 sys_fchdir(root_fd);
63991 - sys_chroot(".");
63992 + sys_chroot((const char __force_user *)".");
63993 sys_close(old_fd);
63994 sys_close(root_fd);
63995
63996 if (new_decode_dev(real_root_dev) == Root_RAM0) {
63997 - sys_chdir("/old");
63998 + sys_chdir((const char __force_user *)"/old");
63999 return;
64000 }
64001
64002 @@ -82,17 +82,17 @@ static void __init handle_initrd(void)
64003 mount_root();
64004
64005 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64006 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64007 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64008 if (!error)
64009 printk("okay\n");
64010 else {
64011 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
64012 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64013 if (error == -ENOENT)
64014 printk("/initrd does not exist. Ignored.\n");
64015 else
64016 printk("failed\n");
64017 printk(KERN_NOTICE "Unmounting old root\n");
64018 - sys_umount("/old", MNT_DETACH);
64019 + sys_umount((char __force_user *)"/old", MNT_DETACH);
64020 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64021 if (fd < 0) {
64022 error = fd;
64023 @@ -115,11 +115,11 @@ int __init initrd_load(void)
64024 * mounted in the normal path.
64025 */
64026 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64027 - sys_unlink("/initrd.image");
64028 + sys_unlink((const char __force_user *)"/initrd.image");
64029 handle_initrd();
64030 return 1;
64031 }
64032 }
64033 - sys_unlink("/initrd.image");
64034 + sys_unlink((const char __force_user *)"/initrd.image");
64035 return 0;
64036 }
64037 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64038 index 32c4799..c27ee74 100644
64039 --- a/init/do_mounts_md.c
64040 +++ b/init/do_mounts_md.c
64041 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64042 partitioned ? "_d" : "", minor,
64043 md_setup_args[ent].device_names);
64044
64045 - fd = sys_open(name, 0, 0);
64046 + fd = sys_open((char __force_user *)name, 0, 0);
64047 if (fd < 0) {
64048 printk(KERN_ERR "md: open failed - cannot start "
64049 "array %s\n", name);
64050 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64051 * array without it
64052 */
64053 sys_close(fd);
64054 - fd = sys_open(name, 0, 0);
64055 + fd = sys_open((char __force_user *)name, 0, 0);
64056 sys_ioctl(fd, BLKRRPART, 0);
64057 }
64058 sys_close(fd);
64059 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64060
64061 wait_for_device_probe();
64062
64063 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64064 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64065 if (fd >= 0) {
64066 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64067 sys_close(fd);
64068 diff --git a/init/initramfs.c b/init/initramfs.c
64069 index 8216c30..25e8e32 100644
64070 --- a/init/initramfs.c
64071 +++ b/init/initramfs.c
64072 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64073 }
64074 }
64075
64076 -static long __init do_utime(char __user *filename, time_t mtime)
64077 +static long __init do_utime(__force char __user *filename, time_t mtime)
64078 {
64079 struct timespec t[2];
64080
64081 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64082 struct dir_entry *de, *tmp;
64083 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64084 list_del(&de->list);
64085 - do_utime(de->name, de->mtime);
64086 + do_utime((char __force_user *)de->name, de->mtime);
64087 kfree(de->name);
64088 kfree(de);
64089 }
64090 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64091 if (nlink >= 2) {
64092 char *old = find_link(major, minor, ino, mode, collected);
64093 if (old)
64094 - return (sys_link(old, collected) < 0) ? -1 : 1;
64095 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64096 }
64097 return 0;
64098 }
64099 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64100 {
64101 struct stat st;
64102
64103 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64104 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64105 if (S_ISDIR(st.st_mode))
64106 - sys_rmdir(path);
64107 + sys_rmdir((char __force_user *)path);
64108 else
64109 - sys_unlink(path);
64110 + sys_unlink((char __force_user *)path);
64111 }
64112 }
64113
64114 @@ -305,7 +305,7 @@ static int __init do_name(void)
64115 int openflags = O_WRONLY|O_CREAT;
64116 if (ml != 1)
64117 openflags |= O_TRUNC;
64118 - wfd = sys_open(collected, openflags, mode);
64119 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64120
64121 if (wfd >= 0) {
64122 sys_fchown(wfd, uid, gid);
64123 @@ -317,17 +317,17 @@ static int __init do_name(void)
64124 }
64125 }
64126 } else if (S_ISDIR(mode)) {
64127 - sys_mkdir(collected, mode);
64128 - sys_chown(collected, uid, gid);
64129 - sys_chmod(collected, mode);
64130 + sys_mkdir((char __force_user *)collected, mode);
64131 + sys_chown((char __force_user *)collected, uid, gid);
64132 + sys_chmod((char __force_user *)collected, mode);
64133 dir_add(collected, mtime);
64134 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64135 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64136 if (maybe_link() == 0) {
64137 - sys_mknod(collected, mode, rdev);
64138 - sys_chown(collected, uid, gid);
64139 - sys_chmod(collected, mode);
64140 - do_utime(collected, mtime);
64141 + sys_mknod((char __force_user *)collected, mode, rdev);
64142 + sys_chown((char __force_user *)collected, uid, gid);
64143 + sys_chmod((char __force_user *)collected, mode);
64144 + do_utime((char __force_user *)collected, mtime);
64145 }
64146 }
64147 return 0;
64148 @@ -336,15 +336,15 @@ static int __init do_name(void)
64149 static int __init do_copy(void)
64150 {
64151 if (count >= body_len) {
64152 - sys_write(wfd, victim, body_len);
64153 + sys_write(wfd, (char __force_user *)victim, body_len);
64154 sys_close(wfd);
64155 - do_utime(vcollected, mtime);
64156 + do_utime((char __force_user *)vcollected, mtime);
64157 kfree(vcollected);
64158 eat(body_len);
64159 state = SkipIt;
64160 return 0;
64161 } else {
64162 - sys_write(wfd, victim, count);
64163 + sys_write(wfd, (char __force_user *)victim, count);
64164 body_len -= count;
64165 eat(count);
64166 return 1;
64167 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64168 {
64169 collected[N_ALIGN(name_len) + body_len] = '\0';
64170 clean_path(collected, 0);
64171 - sys_symlink(collected + N_ALIGN(name_len), collected);
64172 - sys_lchown(collected, uid, gid);
64173 - do_utime(collected, mtime);
64174 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64175 + sys_lchown((char __force_user *)collected, uid, gid);
64176 + do_utime((char __force_user *)collected, mtime);
64177 state = SkipIt;
64178 next_state = Reset;
64179 return 0;
64180 diff --git a/init/main.c b/init/main.c
64181 index cb54cd3..8773e3c 100644
64182 --- a/init/main.c
64183 +++ b/init/main.c
64184 @@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
64185 extern void tc_init(void);
64186 #endif
64187
64188 +extern void grsecurity_init(void);
64189 +
64190 /*
64191 * Debug helper: via this flag we know that we are in 'early bootup code'
64192 * where only the boot processor is running with IRQ disabled. This means
64193 @@ -148,6 +150,49 @@ static int __init set_reset_devices(char *str)
64194
64195 __setup("reset_devices", set_reset_devices);
64196
64197 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64198 +extern char pax_enter_kernel_user[];
64199 +extern char pax_exit_kernel_user[];
64200 +extern pgdval_t clone_pgd_mask;
64201 +#endif
64202 +
64203 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64204 +static int __init setup_pax_nouderef(char *str)
64205 +{
64206 +#ifdef CONFIG_X86_32
64207 + unsigned int cpu;
64208 + struct desc_struct *gdt;
64209 +
64210 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64211 + gdt = get_cpu_gdt_table(cpu);
64212 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64213 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64214 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64215 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64216 + }
64217 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64218 +#else
64219 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64220 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64221 + clone_pgd_mask = ~(pgdval_t)0UL;
64222 +#endif
64223 +
64224 + return 0;
64225 +}
64226 +early_param("pax_nouderef", setup_pax_nouderef);
64227 +#endif
64228 +
64229 +#ifdef CONFIG_PAX_SOFTMODE
64230 +int pax_softmode;
64231 +
64232 +static int __init setup_pax_softmode(char *str)
64233 +{
64234 + get_option(&str, &pax_softmode);
64235 + return 1;
64236 +}
64237 +__setup("pax_softmode=", setup_pax_softmode);
64238 +#endif
64239 +
64240 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64241 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64242 static const char *panic_later, *panic_param;
64243 @@ -674,6 +719,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64244 {
64245 int count = preempt_count();
64246 int ret;
64247 + const char *msg1 = "", *msg2 = "";
64248
64249 if (initcall_debug)
64250 ret = do_one_initcall_debug(fn);
64251 @@ -686,15 +732,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64252 sprintf(msgbuf, "error code %d ", ret);
64253
64254 if (preempt_count() != count) {
64255 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64256 + msg1 = " preemption imbalance";
64257 preempt_count() = count;
64258 }
64259 if (irqs_disabled()) {
64260 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64261 + msg2 = " disabled interrupts";
64262 local_irq_enable();
64263 }
64264 - if (msgbuf[0]) {
64265 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64266 + if (msgbuf[0] || *msg1 || *msg2) {
64267 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64268 }
64269
64270 return ret;
64271 @@ -865,7 +911,7 @@ static int __init kernel_init(void * unused)
64272 do_basic_setup();
64273
64274 /* Open the /dev/console on the rootfs, this should never fail */
64275 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64276 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64277 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64278
64279 (void) sys_dup(0);
64280 @@ -878,11 +924,13 @@ static int __init kernel_init(void * unused)
64281 if (!ramdisk_execute_command)
64282 ramdisk_execute_command = "/init";
64283
64284 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64285 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64286 ramdisk_execute_command = NULL;
64287 prepare_namespace();
64288 }
64289
64290 + grsecurity_init();
64291 +
64292 /*
64293 * Ok, we have completed the initial bootup, and
64294 * we're essentially up and running. Get rid of the
64295 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64296 index 28bd64d..c66b72a 100644
64297 --- a/ipc/mqueue.c
64298 +++ b/ipc/mqueue.c
64299 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64300 mq_bytes = (mq_msg_tblsz +
64301 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64302
64303 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64304 spin_lock(&mq_lock);
64305 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64306 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
64307 diff --git a/ipc/msg.c b/ipc/msg.c
64308 index 7385de2..a8180e08 100644
64309 --- a/ipc/msg.c
64310 +++ b/ipc/msg.c
64311 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64312 return security_msg_queue_associate(msq, msgflg);
64313 }
64314
64315 +static struct ipc_ops msg_ops = {
64316 + .getnew = newque,
64317 + .associate = msg_security,
64318 + .more_checks = NULL
64319 +};
64320 +
64321 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64322 {
64323 struct ipc_namespace *ns;
64324 - struct ipc_ops msg_ops;
64325 struct ipc_params msg_params;
64326
64327 ns = current->nsproxy->ipc_ns;
64328
64329 - msg_ops.getnew = newque;
64330 - msg_ops.associate = msg_security;
64331 - msg_ops.more_checks = NULL;
64332 -
64333 msg_params.key = key;
64334 msg_params.flg = msgflg;
64335
64336 diff --git a/ipc/sem.c b/ipc/sem.c
64337 index 5215a81..cfc0cac 100644
64338 --- a/ipc/sem.c
64339 +++ b/ipc/sem.c
64340 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64341 return 0;
64342 }
64343
64344 +static struct ipc_ops sem_ops = {
64345 + .getnew = newary,
64346 + .associate = sem_security,
64347 + .more_checks = sem_more_checks
64348 +};
64349 +
64350 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64351 {
64352 struct ipc_namespace *ns;
64353 - struct ipc_ops sem_ops;
64354 struct ipc_params sem_params;
64355
64356 ns = current->nsproxy->ipc_ns;
64357 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64358 if (nsems < 0 || nsems > ns->sc_semmsl)
64359 return -EINVAL;
64360
64361 - sem_ops.getnew = newary;
64362 - sem_ops.associate = sem_security;
64363 - sem_ops.more_checks = sem_more_checks;
64364 -
64365 sem_params.key = key;
64366 sem_params.flg = semflg;
64367 sem_params.u.nsems = nsems;
64368 diff --git a/ipc/shm.c b/ipc/shm.c
64369 index 406c5b2..bc66d67 100644
64370 --- a/ipc/shm.c
64371 +++ b/ipc/shm.c
64372 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64373 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64374 #endif
64375
64376 +#ifdef CONFIG_GRKERNSEC
64377 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64378 + const time_t shm_createtime, const uid_t cuid,
64379 + const int shmid);
64380 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64381 + const time_t shm_createtime);
64382 +#endif
64383 +
64384 void shm_init_ns(struct ipc_namespace *ns)
64385 {
64386 ns->shm_ctlmax = SHMMAX;
64387 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64388 shp->shm_lprid = 0;
64389 shp->shm_atim = shp->shm_dtim = 0;
64390 shp->shm_ctim = get_seconds();
64391 +#ifdef CONFIG_GRKERNSEC
64392 + {
64393 + struct timespec timeval;
64394 + do_posix_clock_monotonic_gettime(&timeval);
64395 +
64396 + shp->shm_createtime = timeval.tv_sec;
64397 + }
64398 +#endif
64399 shp->shm_segsz = size;
64400 shp->shm_nattch = 0;
64401 shp->shm_file = file;
64402 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64403 return 0;
64404 }
64405
64406 +static struct ipc_ops shm_ops = {
64407 + .getnew = newseg,
64408 + .associate = shm_security,
64409 + .more_checks = shm_more_checks
64410 +};
64411 +
64412 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64413 {
64414 struct ipc_namespace *ns;
64415 - struct ipc_ops shm_ops;
64416 struct ipc_params shm_params;
64417
64418 ns = current->nsproxy->ipc_ns;
64419
64420 - shm_ops.getnew = newseg;
64421 - shm_ops.associate = shm_security;
64422 - shm_ops.more_checks = shm_more_checks;
64423 -
64424 shm_params.key = key;
64425 shm_params.flg = shmflg;
64426 shm_params.u.size = size;
64427 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64428 f_mode = FMODE_READ | FMODE_WRITE;
64429 }
64430 if (shmflg & SHM_EXEC) {
64431 +
64432 +#ifdef CONFIG_PAX_MPROTECT
64433 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
64434 + goto out;
64435 +#endif
64436 +
64437 prot |= PROT_EXEC;
64438 acc_mode |= S_IXUGO;
64439 }
64440 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64441 if (err)
64442 goto out_unlock;
64443
64444 +#ifdef CONFIG_GRKERNSEC
64445 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64446 + shp->shm_perm.cuid, shmid) ||
64447 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64448 + err = -EACCES;
64449 + goto out_unlock;
64450 + }
64451 +#endif
64452 +
64453 path = shp->shm_file->f_path;
64454 path_get(&path);
64455 shp->shm_nattch++;
64456 +#ifdef CONFIG_GRKERNSEC
64457 + shp->shm_lapid = current->pid;
64458 +#endif
64459 size = i_size_read(path.dentry->d_inode);
64460 shm_unlock(shp);
64461
64462 diff --git a/kernel/acct.c b/kernel/acct.c
64463 index 02e6167..54824f7 100644
64464 --- a/kernel/acct.c
64465 +++ b/kernel/acct.c
64466 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64467 */
64468 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64469 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64470 - file->f_op->write(file, (char *)&ac,
64471 + file->f_op->write(file, (char __force_user *)&ac,
64472 sizeof(acct_t), &file->f_pos);
64473 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64474 set_fs(fs);
64475 diff --git a/kernel/audit.c b/kernel/audit.c
64476 index 1c7f2c6..9ba5359 100644
64477 --- a/kernel/audit.c
64478 +++ b/kernel/audit.c
64479 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64480 3) suppressed due to audit_rate_limit
64481 4) suppressed due to audit_backlog_limit
64482 */
64483 -static atomic_t audit_lost = ATOMIC_INIT(0);
64484 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64485
64486 /* The netlink socket. */
64487 static struct sock *audit_sock;
64488 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64489 unsigned long now;
64490 int print;
64491
64492 - atomic_inc(&audit_lost);
64493 + atomic_inc_unchecked(&audit_lost);
64494
64495 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64496
64497 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64498 printk(KERN_WARNING
64499 "audit: audit_lost=%d audit_rate_limit=%d "
64500 "audit_backlog_limit=%d\n",
64501 - atomic_read(&audit_lost),
64502 + atomic_read_unchecked(&audit_lost),
64503 audit_rate_limit,
64504 audit_backlog_limit);
64505 audit_panic(message);
64506 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64507 status_set.pid = audit_pid;
64508 status_set.rate_limit = audit_rate_limit;
64509 status_set.backlog_limit = audit_backlog_limit;
64510 - status_set.lost = atomic_read(&audit_lost);
64511 + status_set.lost = atomic_read_unchecked(&audit_lost);
64512 status_set.backlog = skb_queue_len(&audit_skb_queue);
64513 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64514 &status_set, sizeof(status_set));
64515 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64516 index af1de0f..06dfe57 100644
64517 --- a/kernel/auditsc.c
64518 +++ b/kernel/auditsc.c
64519 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64520 }
64521
64522 /* global counter which is incremented every time something logs in */
64523 -static atomic_t session_id = ATOMIC_INIT(0);
64524 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64525
64526 /**
64527 * audit_set_loginuid - set current task's audit_context loginuid
64528 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
64529 return -EPERM;
64530 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
64531
64532 - sessionid = atomic_inc_return(&session_id);
64533 + sessionid = atomic_inc_return_unchecked(&session_id);
64534 if (context && context->in_syscall) {
64535 struct audit_buffer *ab;
64536
64537 diff --git a/kernel/capability.c b/kernel/capability.c
64538 index 3f1adb6..c564db0 100644
64539 --- a/kernel/capability.c
64540 +++ b/kernel/capability.c
64541 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64542 * before modification is attempted and the application
64543 * fails.
64544 */
64545 + if (tocopy > ARRAY_SIZE(kdata))
64546 + return -EFAULT;
64547 +
64548 if (copy_to_user(dataptr, kdata, tocopy
64549 * sizeof(struct __user_cap_data_struct))) {
64550 return -EFAULT;
64551 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
64552 int ret;
64553
64554 rcu_read_lock();
64555 - ret = security_capable(__task_cred(t), ns, cap);
64556 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
64557 + gr_task_is_capable(t, __task_cred(t), cap);
64558 rcu_read_unlock();
64559
64560 - return (ret == 0);
64561 + return ret;
64562 }
64563
64564 /**
64565 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
64566 int ret;
64567
64568 rcu_read_lock();
64569 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
64570 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
64571 rcu_read_unlock();
64572
64573 - return (ret == 0);
64574 + return ret;
64575 }
64576
64577 /**
64578 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64579 BUG();
64580 }
64581
64582 - if (security_capable(current_cred(), ns, cap) == 0) {
64583 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
64584 current->flags |= PF_SUPERPRIV;
64585 return true;
64586 }
64587 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
64588 }
64589 EXPORT_SYMBOL(ns_capable);
64590
64591 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
64592 +{
64593 + if (unlikely(!cap_valid(cap))) {
64594 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64595 + BUG();
64596 + }
64597 +
64598 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
64599 + current->flags |= PF_SUPERPRIV;
64600 + return true;
64601 + }
64602 + return false;
64603 +}
64604 +EXPORT_SYMBOL(ns_capable_nolog);
64605 +
64606 /**
64607 * capable - Determine if the current task has a superior capability in effect
64608 * @cap: The capability to be tested for
64609 @@ -408,6 +427,12 @@ bool capable(int cap)
64610 }
64611 EXPORT_SYMBOL(capable);
64612
64613 +bool capable_nolog(int cap)
64614 +{
64615 + return ns_capable_nolog(&init_user_ns, cap);
64616 +}
64617 +EXPORT_SYMBOL(capable_nolog);
64618 +
64619 /**
64620 * nsown_capable - Check superior capability to one's own user_ns
64621 * @cap: The capability in question
64622 diff --git a/kernel/compat.c b/kernel/compat.c
64623 index d2c67aa..a629b2e 100644
64624 --- a/kernel/compat.c
64625 +++ b/kernel/compat.c
64626 @@ -13,6 +13,7 @@
64627
64628 #include <linux/linkage.h>
64629 #include <linux/compat.h>
64630 +#include <linux/module.h>
64631 #include <linux/errno.h>
64632 #include <linux/time.h>
64633 #include <linux/signal.h>
64634 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64635 mm_segment_t oldfs;
64636 long ret;
64637
64638 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64639 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64640 oldfs = get_fs();
64641 set_fs(KERNEL_DS);
64642 ret = hrtimer_nanosleep_restart(restart);
64643 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64644 oldfs = get_fs();
64645 set_fs(KERNEL_DS);
64646 ret = hrtimer_nanosleep(&tu,
64647 - rmtp ? (struct timespec __user *)&rmt : NULL,
64648 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
64649 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64650 set_fs(oldfs);
64651
64652 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64653 mm_segment_t old_fs = get_fs();
64654
64655 set_fs(KERNEL_DS);
64656 - ret = sys_sigpending((old_sigset_t __user *) &s);
64657 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
64658 set_fs(old_fs);
64659 if (ret == 0)
64660 ret = put_user(s, set);
64661 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64662 mm_segment_t old_fs = get_fs();
64663
64664 set_fs(KERNEL_DS);
64665 - ret = sys_old_getrlimit(resource, &r);
64666 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64667 set_fs(old_fs);
64668
64669 if (!ret) {
64670 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64671 mm_segment_t old_fs = get_fs();
64672
64673 set_fs(KERNEL_DS);
64674 - ret = sys_getrusage(who, (struct rusage __user *) &r);
64675 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64676 set_fs(old_fs);
64677
64678 if (ret)
64679 @@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64680 set_fs (KERNEL_DS);
64681 ret = sys_wait4(pid,
64682 (stat_addr ?
64683 - (unsigned int __user *) &status : NULL),
64684 - options, (struct rusage __user *) &r);
64685 + (unsigned int __force_user *) &status : NULL),
64686 + options, (struct rusage __force_user *) &r);
64687 set_fs (old_fs);
64688
64689 if (ret > 0) {
64690 @@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64691 memset(&info, 0, sizeof(info));
64692
64693 set_fs(KERNEL_DS);
64694 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64695 - uru ? (struct rusage __user *)&ru : NULL);
64696 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64697 + uru ? (struct rusage __force_user *)&ru : NULL);
64698 set_fs(old_fs);
64699
64700 if ((ret < 0) || (info.si_signo == 0))
64701 @@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64702 oldfs = get_fs();
64703 set_fs(KERNEL_DS);
64704 err = sys_timer_settime(timer_id, flags,
64705 - (struct itimerspec __user *) &newts,
64706 - (struct itimerspec __user *) &oldts);
64707 + (struct itimerspec __force_user *) &newts,
64708 + (struct itimerspec __force_user *) &oldts);
64709 set_fs(oldfs);
64710 if (!err && old && put_compat_itimerspec(old, &oldts))
64711 return -EFAULT;
64712 @@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64713 oldfs = get_fs();
64714 set_fs(KERNEL_DS);
64715 err = sys_timer_gettime(timer_id,
64716 - (struct itimerspec __user *) &ts);
64717 + (struct itimerspec __force_user *) &ts);
64718 set_fs(oldfs);
64719 if (!err && put_compat_itimerspec(setting, &ts))
64720 return -EFAULT;
64721 @@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64722 oldfs = get_fs();
64723 set_fs(KERNEL_DS);
64724 err = sys_clock_settime(which_clock,
64725 - (struct timespec __user *) &ts);
64726 + (struct timespec __force_user *) &ts);
64727 set_fs(oldfs);
64728 return err;
64729 }
64730 @@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64731 oldfs = get_fs();
64732 set_fs(KERNEL_DS);
64733 err = sys_clock_gettime(which_clock,
64734 - (struct timespec __user *) &ts);
64735 + (struct timespec __force_user *) &ts);
64736 set_fs(oldfs);
64737 if (!err && put_compat_timespec(&ts, tp))
64738 return -EFAULT;
64739 @@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64740
64741 oldfs = get_fs();
64742 set_fs(KERNEL_DS);
64743 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64744 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64745 set_fs(oldfs);
64746
64747 err = compat_put_timex(utp, &txc);
64748 @@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64749 oldfs = get_fs();
64750 set_fs(KERNEL_DS);
64751 err = sys_clock_getres(which_clock,
64752 - (struct timespec __user *) &ts);
64753 + (struct timespec __force_user *) &ts);
64754 set_fs(oldfs);
64755 if (!err && tp && put_compat_timespec(&ts, tp))
64756 return -EFAULT;
64757 @@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64758 long err;
64759 mm_segment_t oldfs;
64760 struct timespec tu;
64761 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64762 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64763
64764 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64765 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64766 oldfs = get_fs();
64767 set_fs(KERNEL_DS);
64768 err = clock_nanosleep_restart(restart);
64769 @@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64770 oldfs = get_fs();
64771 set_fs(KERNEL_DS);
64772 err = sys_clock_nanosleep(which_clock, flags,
64773 - (struct timespec __user *) &in,
64774 - (struct timespec __user *) &out);
64775 + (struct timespec __force_user *) &in,
64776 + (struct timespec __force_user *) &out);
64777 set_fs(oldfs);
64778
64779 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64780 diff --git a/kernel/configs.c b/kernel/configs.c
64781 index 42e8fa0..9e7406b 100644
64782 --- a/kernel/configs.c
64783 +++ b/kernel/configs.c
64784 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64785 struct proc_dir_entry *entry;
64786
64787 /* create the current config file */
64788 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64789 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64790 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64791 + &ikconfig_file_ops);
64792 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64793 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64794 + &ikconfig_file_ops);
64795 +#endif
64796 +#else
64797 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64798 &ikconfig_file_ops);
64799 +#endif
64800 +
64801 if (!entry)
64802 return -ENOMEM;
64803
64804 diff --git a/kernel/cred.c b/kernel/cred.c
64805 index e70683d..27761b6 100644
64806 --- a/kernel/cred.c
64807 +++ b/kernel/cred.c
64808 @@ -205,6 +205,15 @@ void exit_creds(struct task_struct *tsk)
64809 validate_creds(cred);
64810 put_cred(cred);
64811 }
64812 +
64813 +#ifdef CONFIG_GRKERNSEC_SETXID
64814 + cred = (struct cred *) tsk->delayed_cred;
64815 + if (cred) {
64816 + tsk->delayed_cred = NULL;
64817 + validate_creds(cred);
64818 + put_cred(cred);
64819 + }
64820 +#endif
64821 }
64822
64823 /**
64824 @@ -473,7 +482,7 @@ error_put:
64825 * Always returns 0 thus allowing this function to be tail-called at the end
64826 * of, say, sys_setgid().
64827 */
64828 -int commit_creds(struct cred *new)
64829 +static int __commit_creds(struct cred *new)
64830 {
64831 struct task_struct *task = current;
64832 const struct cred *old = task->real_cred;
64833 @@ -492,6 +501,8 @@ int commit_creds(struct cred *new)
64834
64835 get_cred(new); /* we will require a ref for the subj creds too */
64836
64837 + gr_set_role_label(task, new->uid, new->gid);
64838 +
64839 /* dumpability changes */
64840 if (old->euid != new->euid ||
64841 old->egid != new->egid ||
64842 @@ -541,6 +552,101 @@ int commit_creds(struct cred *new)
64843 put_cred(old);
64844 return 0;
64845 }
64846 +#ifdef CONFIG_GRKERNSEC_SETXID
64847 +extern int set_user(struct cred *new);
64848 +
64849 +void gr_delayed_cred_worker(void)
64850 +{
64851 + const struct cred *new = current->delayed_cred;
64852 + struct cred *ncred;
64853 +
64854 + current->delayed_cred = NULL;
64855 +
64856 + if (current_uid() && new != NULL) {
64857 + // from doing get_cred on it when queueing this
64858 + put_cred(new);
64859 + return;
64860 + } else if (new == NULL)
64861 + return;
64862 +
64863 + ncred = prepare_creds();
64864 + if (!ncred)
64865 + goto die;
64866 + // uids
64867 + ncred->uid = new->uid;
64868 + ncred->euid = new->euid;
64869 + ncred->suid = new->suid;
64870 + ncred->fsuid = new->fsuid;
64871 + // gids
64872 + ncred->gid = new->gid;
64873 + ncred->egid = new->egid;
64874 + ncred->sgid = new->sgid;
64875 + ncred->fsgid = new->fsgid;
64876 + // groups
64877 + if (set_groups(ncred, new->group_info) < 0) {
64878 + abort_creds(ncred);
64879 + goto die;
64880 + }
64881 + // caps
64882 + ncred->securebits = new->securebits;
64883 + ncred->cap_inheritable = new->cap_inheritable;
64884 + ncred->cap_permitted = new->cap_permitted;
64885 + ncred->cap_effective = new->cap_effective;
64886 + ncred->cap_bset = new->cap_bset;
64887 +
64888 + if (set_user(ncred)) {
64889 + abort_creds(ncred);
64890 + goto die;
64891 + }
64892 +
64893 + // from doing get_cred on it when queueing this
64894 + put_cred(new);
64895 +
64896 + __commit_creds(ncred);
64897 + return;
64898 +die:
64899 + // from doing get_cred on it when queueing this
64900 + put_cred(new);
64901 + do_group_exit(SIGKILL);
64902 +}
64903 +#endif
64904 +
64905 +int commit_creds(struct cred *new)
64906 +{
64907 +#ifdef CONFIG_GRKERNSEC_SETXID
64908 + int ret;
64909 + int schedule_it = 0;
64910 + struct task_struct *t;
64911 +
64912 + /* we won't get called with tasklist_lock held for writing
64913 + and interrupts disabled as the cred struct in that case is
64914 + init_cred
64915 + */
64916 + if (grsec_enable_setxid && !current_is_single_threaded() &&
64917 + !current_uid() && new->uid) {
64918 + schedule_it = 1;
64919 + }
64920 + ret = __commit_creds(new);
64921 + if (schedule_it) {
64922 + rcu_read_lock();
64923 + read_lock(&tasklist_lock);
64924 + for (t = next_thread(current); t != current;
64925 + t = next_thread(t)) {
64926 + if (t->delayed_cred == NULL) {
64927 + t->delayed_cred = get_cred(new);
64928 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
64929 + set_tsk_need_resched(t);
64930 + }
64931 + }
64932 + read_unlock(&tasklist_lock);
64933 + rcu_read_unlock();
64934 + }
64935 + return ret;
64936 +#else
64937 + return __commit_creds(new);
64938 +#endif
64939 +}
64940 +
64941 EXPORT_SYMBOL(commit_creds);
64942
64943 /**
64944 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
64945 index 0557f24..1a00d9a 100644
64946 --- a/kernel/debug/debug_core.c
64947 +++ b/kernel/debug/debug_core.c
64948 @@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
64949 */
64950 static atomic_t masters_in_kgdb;
64951 static atomic_t slaves_in_kgdb;
64952 -static atomic_t kgdb_break_tasklet_var;
64953 +static atomic_unchecked_t kgdb_break_tasklet_var;
64954 atomic_t kgdb_setting_breakpoint;
64955
64956 struct task_struct *kgdb_usethread;
64957 @@ -132,7 +132,7 @@ int kgdb_single_step;
64958 static pid_t kgdb_sstep_pid;
64959
64960 /* to keep track of the CPU which is doing the single stepping*/
64961 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64962 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64963
64964 /*
64965 * If you are debugging a problem where roundup (the collection of
64966 @@ -540,7 +540,7 @@ return_normal:
64967 * kernel will only try for the value of sstep_tries before
64968 * giving up and continuing on.
64969 */
64970 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
64971 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
64972 (kgdb_info[cpu].task &&
64973 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
64974 atomic_set(&kgdb_active, -1);
64975 @@ -634,8 +634,8 @@ cpu_master_loop:
64976 }
64977
64978 kgdb_restore:
64979 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
64980 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
64981 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
64982 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
64983 if (kgdb_info[sstep_cpu].task)
64984 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
64985 else
64986 @@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
64987 static void kgdb_tasklet_bpt(unsigned long ing)
64988 {
64989 kgdb_breakpoint();
64990 - atomic_set(&kgdb_break_tasklet_var, 0);
64991 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
64992 }
64993
64994 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
64995
64996 void kgdb_schedule_breakpoint(void)
64997 {
64998 - if (atomic_read(&kgdb_break_tasklet_var) ||
64999 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65000 atomic_read(&kgdb_active) != -1 ||
65001 atomic_read(&kgdb_setting_breakpoint))
65002 return;
65003 - atomic_inc(&kgdb_break_tasklet_var);
65004 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
65005 tasklet_schedule(&kgdb_tasklet_breakpoint);
65006 }
65007 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65008 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65009 index 67b847d..93834dd 100644
65010 --- a/kernel/debug/kdb/kdb_main.c
65011 +++ b/kernel/debug/kdb/kdb_main.c
65012 @@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
65013 list_for_each_entry(mod, kdb_modules, list) {
65014
65015 kdb_printf("%-20s%8u 0x%p ", mod->name,
65016 - mod->core_size, (void *)mod);
65017 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
65018 #ifdef CONFIG_MODULE_UNLOAD
65019 kdb_printf("%4ld ", module_refcount(mod));
65020 #endif
65021 @@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
65022 kdb_printf(" (Loading)");
65023 else
65024 kdb_printf(" (Live)");
65025 - kdb_printf(" 0x%p", mod->module_core);
65026 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65027
65028 #ifdef CONFIG_MODULE_UNLOAD
65029 {
65030 diff --git a/kernel/events/core.c b/kernel/events/core.c
65031 index fd126f8..70b755b 100644
65032 --- a/kernel/events/core.c
65033 +++ b/kernel/events/core.c
65034 @@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65035 return 0;
65036 }
65037
65038 -static atomic64_t perf_event_id;
65039 +static atomic64_unchecked_t perf_event_id;
65040
65041 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65042 enum event_type_t event_type);
65043 @@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info)
65044
65045 static inline u64 perf_event_count(struct perf_event *event)
65046 {
65047 - return local64_read(&event->count) + atomic64_read(&event->child_count);
65048 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65049 }
65050
65051 static u64 perf_event_read(struct perf_event *event)
65052 @@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65053 mutex_lock(&event->child_mutex);
65054 total += perf_event_read(event);
65055 *enabled += event->total_time_enabled +
65056 - atomic64_read(&event->child_total_time_enabled);
65057 + atomic64_read_unchecked(&event->child_total_time_enabled);
65058 *running += event->total_time_running +
65059 - atomic64_read(&event->child_total_time_running);
65060 + atomic64_read_unchecked(&event->child_total_time_running);
65061
65062 list_for_each_entry(child, &event->child_list, child_list) {
65063 total += perf_event_read(child);
65064 @@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct perf_event *event)
65065 userpg->offset -= local64_read(&event->hw.prev_count);
65066
65067 userpg->time_enabled = enabled +
65068 - atomic64_read(&event->child_total_time_enabled);
65069 + atomic64_read_unchecked(&event->child_total_time_enabled);
65070
65071 userpg->time_running = running +
65072 - atomic64_read(&event->child_total_time_running);
65073 + atomic64_read_unchecked(&event->child_total_time_running);
65074
65075 arch_perf_update_userpage(userpg, now);
65076
65077 @@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65078 values[n++] = perf_event_count(event);
65079 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65080 values[n++] = enabled +
65081 - atomic64_read(&event->child_total_time_enabled);
65082 + atomic64_read_unchecked(&event->child_total_time_enabled);
65083 }
65084 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65085 values[n++] = running +
65086 - atomic64_read(&event->child_total_time_running);
65087 + atomic64_read_unchecked(&event->child_total_time_running);
65088 }
65089 if (read_format & PERF_FORMAT_ID)
65090 values[n++] = primary_event_id(event);
65091 @@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65092 * need to add enough zero bytes after the string to handle
65093 * the 64bit alignment we do later.
65094 */
65095 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65096 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65097 if (!buf) {
65098 name = strncpy(tmp, "//enomem", sizeof(tmp));
65099 goto got_name;
65100 }
65101 - name = d_path(&file->f_path, buf, PATH_MAX);
65102 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65103 if (IS_ERR(name)) {
65104 name = strncpy(tmp, "//toolong", sizeof(tmp));
65105 goto got_name;
65106 @@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65107 event->parent = parent_event;
65108
65109 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65110 - event->id = atomic64_inc_return(&perf_event_id);
65111 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65112
65113 event->state = PERF_EVENT_STATE_INACTIVE;
65114
65115 @@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf_event *child_event,
65116 /*
65117 * Add back the child's count to the parent's count:
65118 */
65119 - atomic64_add(child_val, &parent_event->child_count);
65120 - atomic64_add(child_event->total_time_enabled,
65121 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65122 + atomic64_add_unchecked(child_event->total_time_enabled,
65123 &parent_event->child_total_time_enabled);
65124 - atomic64_add(child_event->total_time_running,
65125 + atomic64_add_unchecked(child_event->total_time_running,
65126 &parent_event->child_total_time_running);
65127
65128 /*
65129 diff --git a/kernel/exit.c b/kernel/exit.c
65130 index d8bd3b42..26bd8dc 100644
65131 --- a/kernel/exit.c
65132 +++ b/kernel/exit.c
65133 @@ -59,6 +59,10 @@
65134 #include <asm/pgtable.h>
65135 #include <asm/mmu_context.h>
65136
65137 +#ifdef CONFIG_GRKERNSEC
65138 +extern rwlock_t grsec_exec_file_lock;
65139 +#endif
65140 +
65141 static void exit_mm(struct task_struct * tsk);
65142
65143 static void __unhash_process(struct task_struct *p, bool group_dead)
65144 @@ -170,6 +174,10 @@ void release_task(struct task_struct * p)
65145 struct task_struct *leader;
65146 int zap_leader;
65147 repeat:
65148 +#ifdef CONFIG_NET
65149 + gr_del_task_from_ip_table(p);
65150 +#endif
65151 +
65152 /* don't need to get the RCU readlock here - the process is dead and
65153 * can't be modifying its own credentials. But shut RCU-lockdep up */
65154 rcu_read_lock();
65155 @@ -382,7 +390,7 @@ int allow_signal(int sig)
65156 * know it'll be handled, so that they don't get converted to
65157 * SIGKILL or just silently dropped.
65158 */
65159 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65160 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65161 recalc_sigpending();
65162 spin_unlock_irq(&current->sighand->siglock);
65163 return 0;
65164 @@ -418,6 +426,17 @@ void daemonize(const char *name, ...)
65165 vsnprintf(current->comm, sizeof(current->comm), name, args);
65166 va_end(args);
65167
65168 +#ifdef CONFIG_GRKERNSEC
65169 + write_lock(&grsec_exec_file_lock);
65170 + if (current->exec_file) {
65171 + fput(current->exec_file);
65172 + current->exec_file = NULL;
65173 + }
65174 + write_unlock(&grsec_exec_file_lock);
65175 +#endif
65176 +
65177 + gr_set_kernel_label(current);
65178 +
65179 /*
65180 * If we were started as result of loading a module, close all of the
65181 * user space pages. We don't need them, and if we didn't close them
65182 @@ -900,6 +919,8 @@ void do_exit(long code)
65183 struct task_struct *tsk = current;
65184 int group_dead;
65185
65186 + set_fs(USER_DS);
65187 +
65188 profile_task_exit(tsk);
65189
65190 WARN_ON(blk_needs_flush_plug(tsk));
65191 @@ -916,7 +937,6 @@ void do_exit(long code)
65192 * mm_release()->clear_child_tid() from writing to a user-controlled
65193 * kernel address.
65194 */
65195 - set_fs(USER_DS);
65196
65197 ptrace_event(PTRACE_EVENT_EXIT, code);
65198
65199 @@ -977,6 +997,9 @@ void do_exit(long code)
65200 tsk->exit_code = code;
65201 taskstats_exit(tsk, group_dead);
65202
65203 + gr_acl_handle_psacct(tsk, code);
65204 + gr_acl_handle_exit();
65205 +
65206 exit_mm(tsk);
65207
65208 if (group_dead)
65209 @@ -1093,7 +1116,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65210 * Take down every thread in the group. This is called by fatal signals
65211 * as well as by sys_exit_group (below).
65212 */
65213 -void
65214 +__noreturn void
65215 do_group_exit(int exit_code)
65216 {
65217 struct signal_struct *sig = current->signal;
65218 diff --git a/kernel/fork.c b/kernel/fork.c
65219 index 8163333..efb4692 100644
65220 --- a/kernel/fork.c
65221 +++ b/kernel/fork.c
65222 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65223 *stackend = STACK_END_MAGIC; /* for overflow detection */
65224
65225 #ifdef CONFIG_CC_STACKPROTECTOR
65226 - tsk->stack_canary = get_random_int();
65227 + tsk->stack_canary = pax_get_random_long();
65228 #endif
65229
65230 /*
65231 @@ -310,13 +310,78 @@ out:
65232 }
65233
65234 #ifdef CONFIG_MMU
65235 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
65236 +{
65237 + struct vm_area_struct *tmp;
65238 + unsigned long charge;
65239 + struct mempolicy *pol;
65240 + struct file *file;
65241 +
65242 + charge = 0;
65243 + if (mpnt->vm_flags & VM_ACCOUNT) {
65244 + unsigned long len;
65245 + len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65246 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65247 + goto fail_nomem;
65248 + charge = len;
65249 + }
65250 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65251 + if (!tmp)
65252 + goto fail_nomem;
65253 + *tmp = *mpnt;
65254 + tmp->vm_mm = mm;
65255 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
65256 + pol = mpol_dup(vma_policy(mpnt));
65257 + if (IS_ERR(pol))
65258 + goto fail_nomem_policy;
65259 + vma_set_policy(tmp, pol);
65260 + if (anon_vma_fork(tmp, mpnt))
65261 + goto fail_nomem_anon_vma_fork;
65262 + tmp->vm_flags &= ~VM_LOCKED;
65263 + tmp->vm_next = tmp->vm_prev = NULL;
65264 + tmp->vm_mirror = NULL;
65265 + file = tmp->vm_file;
65266 + if (file) {
65267 + struct inode *inode = file->f_path.dentry->d_inode;
65268 + struct address_space *mapping = file->f_mapping;
65269 +
65270 + get_file(file);
65271 + if (tmp->vm_flags & VM_DENYWRITE)
65272 + atomic_dec(&inode->i_writecount);
65273 + mutex_lock(&mapping->i_mmap_mutex);
65274 + if (tmp->vm_flags & VM_SHARED)
65275 + mapping->i_mmap_writable++;
65276 + flush_dcache_mmap_lock(mapping);
65277 + /* insert tmp into the share list, just after mpnt */
65278 + vma_prio_tree_add(tmp, mpnt);
65279 + flush_dcache_mmap_unlock(mapping);
65280 + mutex_unlock(&mapping->i_mmap_mutex);
65281 + }
65282 +
65283 + /*
65284 + * Clear hugetlb-related page reserves for children. This only
65285 + * affects MAP_PRIVATE mappings. Faults generated by the child
65286 + * are not guaranteed to succeed, even if read-only
65287 + */
65288 + if (is_vm_hugetlb_page(tmp))
65289 + reset_vma_resv_huge_pages(tmp);
65290 +
65291 + return tmp;
65292 +
65293 +fail_nomem_anon_vma_fork:
65294 + mpol_put(pol);
65295 +fail_nomem_policy:
65296 + kmem_cache_free(vm_area_cachep, tmp);
65297 +fail_nomem:
65298 + vm_unacct_memory(charge);
65299 + return NULL;
65300 +}
65301 +
65302 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65303 {
65304 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65305 struct rb_node **rb_link, *rb_parent;
65306 int retval;
65307 - unsigned long charge;
65308 - struct mempolicy *pol;
65309
65310 down_write(&oldmm->mmap_sem);
65311 flush_cache_dup_mm(oldmm);
65312 @@ -328,8 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65313 mm->locked_vm = 0;
65314 mm->mmap = NULL;
65315 mm->mmap_cache = NULL;
65316 - mm->free_area_cache = oldmm->mmap_base;
65317 - mm->cached_hole_size = ~0UL;
65318 + mm->free_area_cache = oldmm->free_area_cache;
65319 + mm->cached_hole_size = oldmm->cached_hole_size;
65320 mm->map_count = 0;
65321 cpumask_clear(mm_cpumask(mm));
65322 mm->mm_rb = RB_ROOT;
65323 @@ -345,8 +410,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65324
65325 prev = NULL;
65326 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65327 - struct file *file;
65328 -
65329 if (mpnt->vm_flags & VM_DONTCOPY) {
65330 long pages = vma_pages(mpnt);
65331 mm->total_vm -= pages;
65332 @@ -354,54 +417,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65333 -pages);
65334 continue;
65335 }
65336 - charge = 0;
65337 - if (mpnt->vm_flags & VM_ACCOUNT) {
65338 - unsigned long len;
65339 - len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65340 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65341 - goto fail_nomem;
65342 - charge = len;
65343 + tmp = dup_vma(mm, oldmm, mpnt);
65344 + if (!tmp) {
65345 + retval = -ENOMEM;
65346 + goto out;
65347 }
65348 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65349 - if (!tmp)
65350 - goto fail_nomem;
65351 - *tmp = *mpnt;
65352 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
65353 - pol = mpol_dup(vma_policy(mpnt));
65354 - retval = PTR_ERR(pol);
65355 - if (IS_ERR(pol))
65356 - goto fail_nomem_policy;
65357 - vma_set_policy(tmp, pol);
65358 - tmp->vm_mm = mm;
65359 - if (anon_vma_fork(tmp, mpnt))
65360 - goto fail_nomem_anon_vma_fork;
65361 - tmp->vm_flags &= ~VM_LOCKED;
65362 - tmp->vm_next = tmp->vm_prev = NULL;
65363 - file = tmp->vm_file;
65364 - if (file) {
65365 - struct inode *inode = file->f_path.dentry->d_inode;
65366 - struct address_space *mapping = file->f_mapping;
65367 -
65368 - get_file(file);
65369 - if (tmp->vm_flags & VM_DENYWRITE)
65370 - atomic_dec(&inode->i_writecount);
65371 - mutex_lock(&mapping->i_mmap_mutex);
65372 - if (tmp->vm_flags & VM_SHARED)
65373 - mapping->i_mmap_writable++;
65374 - flush_dcache_mmap_lock(mapping);
65375 - /* insert tmp into the share list, just after mpnt */
65376 - vma_prio_tree_add(tmp, mpnt);
65377 - flush_dcache_mmap_unlock(mapping);
65378 - mutex_unlock(&mapping->i_mmap_mutex);
65379 - }
65380 -
65381 - /*
65382 - * Clear hugetlb-related page reserves for children. This only
65383 - * affects MAP_PRIVATE mappings. Faults generated by the child
65384 - * are not guaranteed to succeed, even if read-only
65385 - */
65386 - if (is_vm_hugetlb_page(tmp))
65387 - reset_vma_resv_huge_pages(tmp);
65388
65389 /*
65390 * Link in the new vma and copy the page table entries.
65391 @@ -424,6 +444,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65392 if (retval)
65393 goto out;
65394 }
65395 +
65396 +#ifdef CONFIG_PAX_SEGMEXEC
65397 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65398 + struct vm_area_struct *mpnt_m;
65399 +
65400 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65401 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65402 +
65403 + if (!mpnt->vm_mirror)
65404 + continue;
65405 +
65406 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65407 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65408 + mpnt->vm_mirror = mpnt_m;
65409 + } else {
65410 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65411 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65412 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65413 + mpnt->vm_mirror->vm_mirror = mpnt;
65414 + }
65415 + }
65416 + BUG_ON(mpnt_m);
65417 + }
65418 +#endif
65419 +
65420 /* a new mm has just been created */
65421 arch_dup_mmap(oldmm, mm);
65422 retval = 0;
65423 @@ -432,14 +477,6 @@ out:
65424 flush_tlb_mm(oldmm);
65425 up_write(&oldmm->mmap_sem);
65426 return retval;
65427 -fail_nomem_anon_vma_fork:
65428 - mpol_put(pol);
65429 -fail_nomem_policy:
65430 - kmem_cache_free(vm_area_cachep, tmp);
65431 -fail_nomem:
65432 - retval = -ENOMEM;
65433 - vm_unacct_memory(charge);
65434 - goto out;
65435 }
65436
65437 static inline int mm_alloc_pgd(struct mm_struct *mm)
65438 @@ -676,8 +713,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65439 return ERR_PTR(err);
65440
65441 mm = get_task_mm(task);
65442 - if (mm && mm != current->mm &&
65443 - !ptrace_may_access(task, mode)) {
65444 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65445 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65446 mmput(mm);
65447 mm = ERR_PTR(-EACCES);
65448 }
65449 @@ -899,13 +936,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65450 spin_unlock(&fs->lock);
65451 return -EAGAIN;
65452 }
65453 - fs->users++;
65454 + atomic_inc(&fs->users);
65455 spin_unlock(&fs->lock);
65456 return 0;
65457 }
65458 tsk->fs = copy_fs_struct(fs);
65459 if (!tsk->fs)
65460 return -ENOMEM;
65461 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65462 return 0;
65463 }
65464
65465 @@ -1172,6 +1210,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65466 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65467 #endif
65468 retval = -EAGAIN;
65469 +
65470 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65471 +
65472 if (atomic_read(&p->real_cred->user->processes) >=
65473 task_rlimit(p, RLIMIT_NPROC)) {
65474 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65475 @@ -1328,6 +1369,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65476 if (clone_flags & CLONE_THREAD)
65477 p->tgid = current->tgid;
65478
65479 + gr_copy_label(p);
65480 +
65481 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65482 /*
65483 * Clear TID on mm_release()?
65484 @@ -1502,6 +1545,8 @@ bad_fork_cleanup_count:
65485 bad_fork_free:
65486 free_task(p);
65487 fork_out:
65488 + gr_log_forkfail(retval);
65489 +
65490 return ERR_PTR(retval);
65491 }
65492
65493 @@ -1602,6 +1647,8 @@ long do_fork(unsigned long clone_flags,
65494 if (clone_flags & CLONE_PARENT_SETTID)
65495 put_user(nr, parent_tidptr);
65496
65497 + gr_handle_brute_check();
65498 +
65499 if (clone_flags & CLONE_VFORK) {
65500 p->vfork_done = &vfork;
65501 init_completion(&vfork);
65502 @@ -1700,7 +1747,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65503 return 0;
65504
65505 /* don't need lock here; in the worst case we'll do useless copy */
65506 - if (fs->users == 1)
65507 + if (atomic_read(&fs->users) == 1)
65508 return 0;
65509
65510 *new_fsp = copy_fs_struct(fs);
65511 @@ -1789,7 +1836,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65512 fs = current->fs;
65513 spin_lock(&fs->lock);
65514 current->fs = new_fs;
65515 - if (--fs->users)
65516 + gr_set_chroot_entries(current, &current->fs->root);
65517 + if (atomic_dec_return(&fs->users))
65518 new_fs = NULL;
65519 else
65520 new_fs = fs;
65521 diff --git a/kernel/futex.c b/kernel/futex.c
65522 index e2b0fb9..db818ac 100644
65523 --- a/kernel/futex.c
65524 +++ b/kernel/futex.c
65525 @@ -54,6 +54,7 @@
65526 #include <linux/mount.h>
65527 #include <linux/pagemap.h>
65528 #include <linux/syscalls.h>
65529 +#include <linux/ptrace.h>
65530 #include <linux/signal.h>
65531 #include <linux/export.h>
65532 #include <linux/magic.h>
65533 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65534 struct page *page, *page_head;
65535 int err, ro = 0;
65536
65537 +#ifdef CONFIG_PAX_SEGMEXEC
65538 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65539 + return -EFAULT;
65540 +#endif
65541 +
65542 /*
65543 * The futex address must be "naturally" aligned.
65544 */
65545 @@ -2711,6 +2717,7 @@ static int __init futex_init(void)
65546 {
65547 u32 curval;
65548 int i;
65549 + mm_segment_t oldfs;
65550
65551 /*
65552 * This will fail and we want it. Some arch implementations do
65553 @@ -2722,8 +2729,11 @@ static int __init futex_init(void)
65554 * implementation, the non-functional ones will return
65555 * -ENOSYS.
65556 */
65557 + oldfs = get_fs();
65558 + set_fs(USER_DS);
65559 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65560 futex_cmpxchg_enabled = 1;
65561 + set_fs(oldfs);
65562
65563 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65564 plist_head_init(&futex_queues[i].chain);
65565 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65566 index 9b22d03..6295b62 100644
65567 --- a/kernel/gcov/base.c
65568 +++ b/kernel/gcov/base.c
65569 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
65570 }
65571
65572 #ifdef CONFIG_MODULES
65573 -static inline int within(void *addr, void *start, unsigned long size)
65574 -{
65575 - return ((addr >= start) && (addr < start + size));
65576 -}
65577 -
65578 /* Update list and generate events when modules are unloaded. */
65579 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65580 void *data)
65581 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65582 prev = NULL;
65583 /* Remove entries located in module from linked list. */
65584 for (info = gcov_info_head; info; info = info->next) {
65585 - if (within(info, mod->module_core, mod->core_size)) {
65586 + if (within_module_core_rw((unsigned long)info, mod)) {
65587 if (prev)
65588 prev->next = info->next;
65589 else
65590 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65591 index ae34bf5..4e2f3d0 100644
65592 --- a/kernel/hrtimer.c
65593 +++ b/kernel/hrtimer.c
65594 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65595 local_irq_restore(flags);
65596 }
65597
65598 -static void run_hrtimer_softirq(struct softirq_action *h)
65599 +static void run_hrtimer_softirq(void)
65600 {
65601 hrtimer_peek_ahead_timers();
65602 }
65603 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65604 index 4304919..bbc53fa 100644
65605 --- a/kernel/jump_label.c
65606 +++ b/kernel/jump_label.c
65607 @@ -50,7 +50,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65608
65609 size = (((unsigned long)stop - (unsigned long)start)
65610 / sizeof(struct jump_entry));
65611 + pax_open_kernel();
65612 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65613 + pax_close_kernel();
65614 }
65615
65616 static void jump_label_update(struct static_key *key, int enable);
65617 @@ -356,10 +358,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65618 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65619 struct jump_entry *iter;
65620
65621 + pax_open_kernel();
65622 for (iter = iter_start; iter < iter_stop; iter++) {
65623 if (within_module_init(iter->code, mod))
65624 iter->code = 0;
65625 }
65626 + pax_close_kernel();
65627 }
65628
65629 static int
65630 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65631 index 079f1d3..a407562 100644
65632 --- a/kernel/kallsyms.c
65633 +++ b/kernel/kallsyms.c
65634 @@ -11,6 +11,9 @@
65635 * Changed the compression method from stem compression to "table lookup"
65636 * compression (see scripts/kallsyms.c for a more complete description)
65637 */
65638 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65639 +#define __INCLUDED_BY_HIDESYM 1
65640 +#endif
65641 #include <linux/kallsyms.h>
65642 #include <linux/module.h>
65643 #include <linux/init.h>
65644 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65645
65646 static inline int is_kernel_inittext(unsigned long addr)
65647 {
65648 + if (system_state != SYSTEM_BOOTING)
65649 + return 0;
65650 +
65651 if (addr >= (unsigned long)_sinittext
65652 && addr <= (unsigned long)_einittext)
65653 return 1;
65654 return 0;
65655 }
65656
65657 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65658 +#ifdef CONFIG_MODULES
65659 +static inline int is_module_text(unsigned long addr)
65660 +{
65661 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65662 + return 1;
65663 +
65664 + addr = ktla_ktva(addr);
65665 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65666 +}
65667 +#else
65668 +static inline int is_module_text(unsigned long addr)
65669 +{
65670 + return 0;
65671 +}
65672 +#endif
65673 +#endif
65674 +
65675 static inline int is_kernel_text(unsigned long addr)
65676 {
65677 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65678 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65679
65680 static inline int is_kernel(unsigned long addr)
65681 {
65682 +
65683 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65684 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
65685 + return 1;
65686 +
65687 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65688 +#else
65689 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65690 +#endif
65691 +
65692 return 1;
65693 return in_gate_area_no_mm(addr);
65694 }
65695
65696 static int is_ksym_addr(unsigned long addr)
65697 {
65698 +
65699 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65700 + if (is_module_text(addr))
65701 + return 0;
65702 +#endif
65703 +
65704 if (all_var)
65705 return is_kernel(addr);
65706
65707 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65708
65709 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65710 {
65711 - iter->name[0] = '\0';
65712 iter->nameoff = get_symbol_offset(new_pos);
65713 iter->pos = new_pos;
65714 }
65715 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65716 {
65717 struct kallsym_iter *iter = m->private;
65718
65719 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65720 + if (current_uid())
65721 + return 0;
65722 +#endif
65723 +
65724 /* Some debugging symbols have no name. Ignore them. */
65725 if (!iter->name[0])
65726 return 0;
65727 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65728 struct kallsym_iter *iter;
65729 int ret;
65730
65731 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65732 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65733 if (!iter)
65734 return -ENOMEM;
65735 reset_iter(iter, 0);
65736 diff --git a/kernel/kexec.c b/kernel/kexec.c
65737 index 4e2e472..cd0c7ae 100644
65738 --- a/kernel/kexec.c
65739 +++ b/kernel/kexec.c
65740 @@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65741 unsigned long flags)
65742 {
65743 struct compat_kexec_segment in;
65744 - struct kexec_segment out, __user *ksegments;
65745 + struct kexec_segment out;
65746 + struct kexec_segment __user *ksegments;
65747 unsigned long i, result;
65748
65749 /* Don't allow clients that don't understand the native
65750 diff --git a/kernel/kmod.c b/kernel/kmod.c
65751 index 05698a7..a4c1e3a 100644
65752 --- a/kernel/kmod.c
65753 +++ b/kernel/kmod.c
65754 @@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
65755 kfree(info->argv);
65756 }
65757
65758 -static int call_modprobe(char *module_name, int wait)
65759 +static int call_modprobe(char *module_name, char *module_param, int wait)
65760 {
65761 static char *envp[] = {
65762 "HOME=/",
65763 @@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
65764 NULL
65765 };
65766
65767 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
65768 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
65769 if (!argv)
65770 goto out;
65771
65772 @@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
65773 argv[1] = "-q";
65774 argv[2] = "--";
65775 argv[3] = module_name; /* check free_modprobe_argv() */
65776 - argv[4] = NULL;
65777 + argv[4] = module_param;
65778 + argv[5] = NULL;
65779
65780 return call_usermodehelper_fns(modprobe_path, argv, envp,
65781 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
65782 @@ -112,9 +113,8 @@ out:
65783 * If module auto-loading support is disabled then this function
65784 * becomes a no-operation.
65785 */
65786 -int __request_module(bool wait, const char *fmt, ...)
65787 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65788 {
65789 - va_list args;
65790 char module_name[MODULE_NAME_LEN];
65791 unsigned int max_modprobes;
65792 int ret;
65793 @@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
65794 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65795 static int kmod_loop_msg;
65796
65797 - va_start(args, fmt);
65798 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65799 - va_end(args);
65800 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65801 if (ret >= MODULE_NAME_LEN)
65802 return -ENAMETOOLONG;
65803
65804 @@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
65805 if (ret)
65806 return ret;
65807
65808 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65809 + if (!current_uid()) {
65810 + /* hack to workaround consolekit/udisks stupidity */
65811 + read_lock(&tasklist_lock);
65812 + if (!strcmp(current->comm, "mount") &&
65813 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65814 + read_unlock(&tasklist_lock);
65815 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65816 + return -EPERM;
65817 + }
65818 + read_unlock(&tasklist_lock);
65819 + }
65820 +#endif
65821 +
65822 /* If modprobe needs a service that is in a module, we get a recursive
65823 * loop. Limit the number of running kmod threads to max_threads/2 or
65824 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65825 @@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
65826
65827 trace_module_request(module_name, wait, _RET_IP_);
65828
65829 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65830 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65831
65832 atomic_dec(&kmod_concurrent);
65833 return ret;
65834 }
65835 +
65836 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65837 +{
65838 + va_list args;
65839 + int ret;
65840 +
65841 + va_start(args, fmt);
65842 + ret = ____request_module(wait, module_param, fmt, args);
65843 + va_end(args);
65844 +
65845 + return ret;
65846 +}
65847 +
65848 +int __request_module(bool wait, const char *fmt, ...)
65849 +{
65850 + va_list args;
65851 + int ret;
65852 +
65853 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65854 + if (current_uid()) {
65855 + char module_param[MODULE_NAME_LEN];
65856 +
65857 + memset(module_param, 0, sizeof(module_param));
65858 +
65859 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
65860 +
65861 + va_start(args, fmt);
65862 + ret = ____request_module(wait, module_param, fmt, args);
65863 + va_end(args);
65864 +
65865 + return ret;
65866 + }
65867 +#endif
65868 +
65869 + va_start(args, fmt);
65870 + ret = ____request_module(wait, NULL, fmt, args);
65871 + va_end(args);
65872 +
65873 + return ret;
65874 +}
65875 +
65876 EXPORT_SYMBOL(__request_module);
65877 #endif /* CONFIG_MODULES */
65878
65879 @@ -267,7 +320,7 @@ static int wait_for_helper(void *data)
65880 *
65881 * Thus the __user pointer cast is valid here.
65882 */
65883 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
65884 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
65885
65886 /*
65887 * If ret is 0, either ____call_usermodehelper failed and the
65888 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
65889 index c62b854..cb67968 100644
65890 --- a/kernel/kprobes.c
65891 +++ b/kernel/kprobes.c
65892 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
65893 * kernel image and loaded module images reside. This is required
65894 * so x86_64 can correctly handle the %rip-relative fixups.
65895 */
65896 - kip->insns = module_alloc(PAGE_SIZE);
65897 + kip->insns = module_alloc_exec(PAGE_SIZE);
65898 if (!kip->insns) {
65899 kfree(kip);
65900 return NULL;
65901 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
65902 */
65903 if (!list_is_singular(&kip->list)) {
65904 list_del(&kip->list);
65905 - module_free(NULL, kip->insns);
65906 + module_free_exec(NULL, kip->insns);
65907 kfree(kip);
65908 }
65909 return 1;
65910 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
65911 {
65912 int i, err = 0;
65913 unsigned long offset = 0, size = 0;
65914 - char *modname, namebuf[128];
65915 + char *modname, namebuf[KSYM_NAME_LEN];
65916 const char *symbol_name;
65917 void *addr;
65918 struct kprobe_blackpoint *kb;
65919 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
65920 const char *sym = NULL;
65921 unsigned int i = *(loff_t *) v;
65922 unsigned long offset = 0;
65923 - char *modname, namebuf[128];
65924 + char *modname, namebuf[KSYM_NAME_LEN];
65925
65926 head = &kprobe_table[i];
65927 preempt_disable();
65928 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
65929 index 4e316e1..5501eef 100644
65930 --- a/kernel/ksysfs.c
65931 +++ b/kernel/ksysfs.c
65932 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
65933 {
65934 if (count+1 > UEVENT_HELPER_PATH_LEN)
65935 return -ENOENT;
65936 + if (!capable(CAP_SYS_ADMIN))
65937 + return -EPERM;
65938 memcpy(uevent_helper, buf, count);
65939 uevent_helper[count] = '\0';
65940 if (count && uevent_helper[count-1] == '\n')
65941 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
65942 index ea9ee45..67ebc8f 100644
65943 --- a/kernel/lockdep.c
65944 +++ b/kernel/lockdep.c
65945 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
65946 end = (unsigned long) &_end,
65947 addr = (unsigned long) obj;
65948
65949 +#ifdef CONFIG_PAX_KERNEXEC
65950 + start = ktla_ktva(start);
65951 +#endif
65952 +
65953 /*
65954 * static variable?
65955 */
65956 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
65957 if (!static_obj(lock->key)) {
65958 debug_locks_off();
65959 printk("INFO: trying to register non-static key.\n");
65960 + printk("lock:%pS key:%pS.\n", lock, lock->key);
65961 printk("the code is fine but needs lockdep annotation.\n");
65962 printk("turning off the locking correctness validator.\n");
65963 dump_stack();
65964 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
65965 if (!class)
65966 return 0;
65967 }
65968 - atomic_inc((atomic_t *)&class->ops);
65969 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
65970 if (very_verbose(class)) {
65971 printk("\nacquire class [%p] %s", class->key, class->name);
65972 if (class->name_version > 1)
65973 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
65974 index 91c32a0..b2c71c5 100644
65975 --- a/kernel/lockdep_proc.c
65976 +++ b/kernel/lockdep_proc.c
65977 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
65978
65979 static void print_name(struct seq_file *m, struct lock_class *class)
65980 {
65981 - char str[128];
65982 + char str[KSYM_NAME_LEN];
65983 const char *name = class->name;
65984
65985 if (!name) {
65986 diff --git a/kernel/module.c b/kernel/module.c
65987 index 78ac6ec..e87db0e 100644
65988 --- a/kernel/module.c
65989 +++ b/kernel/module.c
65990 @@ -58,6 +58,7 @@
65991 #include <linux/jump_label.h>
65992 #include <linux/pfn.h>
65993 #include <linux/bsearch.h>
65994 +#include <linux/grsecurity.h>
65995
65996 #define CREATE_TRACE_POINTS
65997 #include <trace/events/module.h>
65998 @@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
65999
66000 /* Bounds of module allocation, for speeding __module_address.
66001 * Protected by module_mutex. */
66002 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66003 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66004 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66005
66006 int register_module_notifier(struct notifier_block * nb)
66007 {
66008 @@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66009 return true;
66010
66011 list_for_each_entry_rcu(mod, &modules, list) {
66012 - struct symsearch arr[] = {
66013 + struct symsearch modarr[] = {
66014 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66015 NOT_GPL_ONLY, false },
66016 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66017 @@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66018 #endif
66019 };
66020
66021 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66022 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66023 return true;
66024 }
66025 return false;
66026 @@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66027 static int percpu_modalloc(struct module *mod,
66028 unsigned long size, unsigned long align)
66029 {
66030 - if (align > PAGE_SIZE) {
66031 + if (align-1 >= PAGE_SIZE) {
66032 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66033 mod->name, align, PAGE_SIZE);
66034 align = PAGE_SIZE;
66035 @@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
66036 static ssize_t show_coresize(struct module_attribute *mattr,
66037 struct module_kobject *mk, char *buffer)
66038 {
66039 - return sprintf(buffer, "%u\n", mk->mod->core_size);
66040 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66041 }
66042
66043 static struct module_attribute modinfo_coresize =
66044 @@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
66045 static ssize_t show_initsize(struct module_attribute *mattr,
66046 struct module_kobject *mk, char *buffer)
66047 {
66048 - return sprintf(buffer, "%u\n", mk->mod->init_size);
66049 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66050 }
66051
66052 static struct module_attribute modinfo_initsize =
66053 @@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
66054 */
66055 #ifdef CONFIG_SYSFS
66056
66057 -#ifdef CONFIG_KALLSYMS
66058 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66059 static inline bool sect_empty(const Elf_Shdr *sect)
66060 {
66061 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66062 @@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
66063
66064 static void unset_module_core_ro_nx(struct module *mod)
66065 {
66066 - set_page_attributes(mod->module_core + mod->core_text_size,
66067 - mod->module_core + mod->core_size,
66068 + set_page_attributes(mod->module_core_rw,
66069 + mod->module_core_rw + mod->core_size_rw,
66070 set_memory_x);
66071 - set_page_attributes(mod->module_core,
66072 - mod->module_core + mod->core_ro_size,
66073 + set_page_attributes(mod->module_core_rx,
66074 + mod->module_core_rx + mod->core_size_rx,
66075 set_memory_rw);
66076 }
66077
66078 static void unset_module_init_ro_nx(struct module *mod)
66079 {
66080 - set_page_attributes(mod->module_init + mod->init_text_size,
66081 - mod->module_init + mod->init_size,
66082 + set_page_attributes(mod->module_init_rw,
66083 + mod->module_init_rw + mod->init_size_rw,
66084 set_memory_x);
66085 - set_page_attributes(mod->module_init,
66086 - mod->module_init + mod->init_ro_size,
66087 + set_page_attributes(mod->module_init_rx,
66088 + mod->module_init_rx + mod->init_size_rx,
66089 set_memory_rw);
66090 }
66091
66092 @@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
66093
66094 mutex_lock(&module_mutex);
66095 list_for_each_entry_rcu(mod, &modules, list) {
66096 - if ((mod->module_core) && (mod->core_text_size)) {
66097 - set_page_attributes(mod->module_core,
66098 - mod->module_core + mod->core_text_size,
66099 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66100 + set_page_attributes(mod->module_core_rx,
66101 + mod->module_core_rx + mod->core_size_rx,
66102 set_memory_rw);
66103 }
66104 - if ((mod->module_init) && (mod->init_text_size)) {
66105 - set_page_attributes(mod->module_init,
66106 - mod->module_init + mod->init_text_size,
66107 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66108 + set_page_attributes(mod->module_init_rx,
66109 + mod->module_init_rx + mod->init_size_rx,
66110 set_memory_rw);
66111 }
66112 }
66113 @@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
66114
66115 mutex_lock(&module_mutex);
66116 list_for_each_entry_rcu(mod, &modules, list) {
66117 - if ((mod->module_core) && (mod->core_text_size)) {
66118 - set_page_attributes(mod->module_core,
66119 - mod->module_core + mod->core_text_size,
66120 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66121 + set_page_attributes(mod->module_core_rx,
66122 + mod->module_core_rx + mod->core_size_rx,
66123 set_memory_ro);
66124 }
66125 - if ((mod->module_init) && (mod->init_text_size)) {
66126 - set_page_attributes(mod->module_init,
66127 - mod->module_init + mod->init_text_size,
66128 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66129 + set_page_attributes(mod->module_init_rx,
66130 + mod->module_init_rx + mod->init_size_rx,
66131 set_memory_ro);
66132 }
66133 }
66134 @@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
66135
66136 /* This may be NULL, but that's OK */
66137 unset_module_init_ro_nx(mod);
66138 - module_free(mod, mod->module_init);
66139 + module_free(mod, mod->module_init_rw);
66140 + module_free_exec(mod, mod->module_init_rx);
66141 kfree(mod->args);
66142 percpu_modfree(mod);
66143
66144 /* Free lock-classes: */
66145 - lockdep_free_key_range(mod->module_core, mod->core_size);
66146 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66147 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66148
66149 /* Finally, free the core (containing the module structure) */
66150 unset_module_core_ro_nx(mod);
66151 - module_free(mod, mod->module_core);
66152 + module_free_exec(mod, mod->module_core_rx);
66153 + module_free(mod, mod->module_core_rw);
66154
66155 #ifdef CONFIG_MPU
66156 update_protections(current->mm);
66157 @@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66158 int ret = 0;
66159 const struct kernel_symbol *ksym;
66160
66161 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66162 + int is_fs_load = 0;
66163 + int register_filesystem_found = 0;
66164 + char *p;
66165 +
66166 + p = strstr(mod->args, "grsec_modharden_fs");
66167 + if (p) {
66168 + char *endptr = p + strlen("grsec_modharden_fs");
66169 + /* copy \0 as well */
66170 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66171 + is_fs_load = 1;
66172 + }
66173 +#endif
66174 +
66175 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66176 const char *name = info->strtab + sym[i].st_name;
66177
66178 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66179 + /* it's a real shame this will never get ripped and copied
66180 + upstream! ;(
66181 + */
66182 + if (is_fs_load && !strcmp(name, "register_filesystem"))
66183 + register_filesystem_found = 1;
66184 +#endif
66185 +
66186 switch (sym[i].st_shndx) {
66187 case SHN_COMMON:
66188 /* We compiled with -fno-common. These are not
66189 @@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66190 ksym = resolve_symbol_wait(mod, info, name);
66191 /* Ok if resolved. */
66192 if (ksym && !IS_ERR(ksym)) {
66193 + pax_open_kernel();
66194 sym[i].st_value = ksym->value;
66195 + pax_close_kernel();
66196 break;
66197 }
66198
66199 @@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66200 secbase = (unsigned long)mod_percpu(mod);
66201 else
66202 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66203 + pax_open_kernel();
66204 sym[i].st_value += secbase;
66205 + pax_close_kernel();
66206 break;
66207 }
66208 }
66209
66210 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66211 + if (is_fs_load && !register_filesystem_found) {
66212 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66213 + ret = -EPERM;
66214 + }
66215 +#endif
66216 +
66217 return ret;
66218 }
66219
66220 @@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66221 || s->sh_entsize != ~0UL
66222 || strstarts(sname, ".init"))
66223 continue;
66224 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66225 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66226 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66227 + else
66228 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66229 pr_debug("\t%s\n", sname);
66230 }
66231 - switch (m) {
66232 - case 0: /* executable */
66233 - mod->core_size = debug_align(mod->core_size);
66234 - mod->core_text_size = mod->core_size;
66235 - break;
66236 - case 1: /* RO: text and ro-data */
66237 - mod->core_size = debug_align(mod->core_size);
66238 - mod->core_ro_size = mod->core_size;
66239 - break;
66240 - case 3: /* whole core */
66241 - mod->core_size = debug_align(mod->core_size);
66242 - break;
66243 - }
66244 }
66245
66246 pr_debug("Init section allocation order:\n");
66247 @@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66248 || s->sh_entsize != ~0UL
66249 || !strstarts(sname, ".init"))
66250 continue;
66251 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66252 - | INIT_OFFSET_MASK);
66253 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66254 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66255 + else
66256 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66257 + s->sh_entsize |= INIT_OFFSET_MASK;
66258 pr_debug("\t%s\n", sname);
66259 }
66260 - switch (m) {
66261 - case 0: /* executable */
66262 - mod->init_size = debug_align(mod->init_size);
66263 - mod->init_text_size = mod->init_size;
66264 - break;
66265 - case 1: /* RO: text and ro-data */
66266 - mod->init_size = debug_align(mod->init_size);
66267 - mod->init_ro_size = mod->init_size;
66268 - break;
66269 - case 3: /* whole init */
66270 - mod->init_size = debug_align(mod->init_size);
66271 - break;
66272 - }
66273 }
66274 }
66275
66276 @@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66277
66278 /* Put symbol section at end of init part of module. */
66279 symsect->sh_flags |= SHF_ALLOC;
66280 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66281 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66282 info->index.sym) | INIT_OFFSET_MASK;
66283 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
66284
66285 @@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66286 }
66287
66288 /* Append room for core symbols at end of core part. */
66289 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66290 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66291 - mod->core_size += strtab_size;
66292 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66293 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66294 + mod->core_size_rx += strtab_size;
66295
66296 /* Put string table section at end of init part of module. */
66297 strsect->sh_flags |= SHF_ALLOC;
66298 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66299 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66300 info->index.str) | INIT_OFFSET_MASK;
66301 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
66302 }
66303 @@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66304 /* Make sure we get permanent strtab: don't use info->strtab. */
66305 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66306
66307 + pax_open_kernel();
66308 +
66309 /* Set types up while we still have access to sections. */
66310 for (i = 0; i < mod->num_symtab; i++)
66311 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66312
66313 - mod->core_symtab = dst = mod->module_core + info->symoffs;
66314 - mod->core_strtab = s = mod->module_core + info->stroffs;
66315 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66316 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66317 src = mod->symtab;
66318 *dst = *src;
66319 *s++ = 0;
66320 @@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66321 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
66322 }
66323 mod->core_num_syms = ndst;
66324 +
66325 + pax_close_kernel();
66326 }
66327 #else
66328 static inline void layout_symtab(struct module *mod, struct load_info *info)
66329 @@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
66330 return size == 0 ? NULL : vmalloc_exec(size);
66331 }
66332
66333 -static void *module_alloc_update_bounds(unsigned long size)
66334 +static void *module_alloc_update_bounds_rw(unsigned long size)
66335 {
66336 void *ret = module_alloc(size);
66337
66338 if (ret) {
66339 mutex_lock(&module_mutex);
66340 /* Update module bounds. */
66341 - if ((unsigned long)ret < module_addr_min)
66342 - module_addr_min = (unsigned long)ret;
66343 - if ((unsigned long)ret + size > module_addr_max)
66344 - module_addr_max = (unsigned long)ret + size;
66345 + if ((unsigned long)ret < module_addr_min_rw)
66346 + module_addr_min_rw = (unsigned long)ret;
66347 + if ((unsigned long)ret + size > module_addr_max_rw)
66348 + module_addr_max_rw = (unsigned long)ret + size;
66349 + mutex_unlock(&module_mutex);
66350 + }
66351 + return ret;
66352 +}
66353 +
66354 +static void *module_alloc_update_bounds_rx(unsigned long size)
66355 +{
66356 + void *ret = module_alloc_exec(size);
66357 +
66358 + if (ret) {
66359 + mutex_lock(&module_mutex);
66360 + /* Update module bounds. */
66361 + if ((unsigned long)ret < module_addr_min_rx)
66362 + module_addr_min_rx = (unsigned long)ret;
66363 + if ((unsigned long)ret + size > module_addr_max_rx)
66364 + module_addr_max_rx = (unsigned long)ret + size;
66365 mutex_unlock(&module_mutex);
66366 }
66367 return ret;
66368 @@ -2543,8 +2581,14 @@ static struct module *setup_load_info(struct load_info *info)
66369 static int check_modinfo(struct module *mod, struct load_info *info)
66370 {
66371 const char *modmagic = get_modinfo(info, "vermagic");
66372 + const char *license = get_modinfo(info, "license");
66373 int err;
66374
66375 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66376 + if (!license || !license_is_gpl_compatible(license))
66377 + return -ENOEXEC;
66378 +#endif
66379 +
66380 /* This is allowed: modprobe --force will invalidate it. */
66381 if (!modmagic) {
66382 err = try_to_force_load(mod, "bad vermagic");
66383 @@ -2567,7 +2611,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66384 }
66385
66386 /* Set up license info based on the info section */
66387 - set_license(mod, get_modinfo(info, "license"));
66388 + set_license(mod, license);
66389
66390 return 0;
66391 }
66392 @@ -2661,7 +2705,7 @@ static int move_module(struct module *mod, struct load_info *info)
66393 void *ptr;
66394
66395 /* Do the allocs. */
66396 - ptr = module_alloc_update_bounds(mod->core_size);
66397 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66398 /*
66399 * The pointer to this block is stored in the module structure
66400 * which is inside the block. Just mark it as not being a
66401 @@ -2671,23 +2715,50 @@ static int move_module(struct module *mod, struct load_info *info)
66402 if (!ptr)
66403 return -ENOMEM;
66404
66405 - memset(ptr, 0, mod->core_size);
66406 - mod->module_core = ptr;
66407 + memset(ptr, 0, mod->core_size_rw);
66408 + mod->module_core_rw = ptr;
66409
66410 - ptr = module_alloc_update_bounds(mod->init_size);
66411 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66412 /*
66413 * The pointer to this block is stored in the module structure
66414 * which is inside the block. This block doesn't need to be
66415 * scanned as it contains data and code that will be freed
66416 * after the module is initialized.
66417 */
66418 - kmemleak_ignore(ptr);
66419 - if (!ptr && mod->init_size) {
66420 - module_free(mod, mod->module_core);
66421 + kmemleak_not_leak(ptr);
66422 + if (!ptr && mod->init_size_rw) {
66423 + module_free(mod, mod->module_core_rw);
66424 return -ENOMEM;
66425 }
66426 - memset(ptr, 0, mod->init_size);
66427 - mod->module_init = ptr;
66428 + memset(ptr, 0, mod->init_size_rw);
66429 + mod->module_init_rw = ptr;
66430 +
66431 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66432 + kmemleak_not_leak(ptr);
66433 + if (!ptr) {
66434 + module_free(mod, mod->module_init_rw);
66435 + module_free(mod, mod->module_core_rw);
66436 + return -ENOMEM;
66437 + }
66438 +
66439 + pax_open_kernel();
66440 + memset(ptr, 0, mod->core_size_rx);
66441 + pax_close_kernel();
66442 + mod->module_core_rx = ptr;
66443 +
66444 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66445 + kmemleak_not_leak(ptr);
66446 + if (!ptr && mod->init_size_rx) {
66447 + module_free_exec(mod, mod->module_core_rx);
66448 + module_free(mod, mod->module_init_rw);
66449 + module_free(mod, mod->module_core_rw);
66450 + return -ENOMEM;
66451 + }
66452 +
66453 + pax_open_kernel();
66454 + memset(ptr, 0, mod->init_size_rx);
66455 + pax_close_kernel();
66456 + mod->module_init_rx = ptr;
66457
66458 /* Transfer each section which specifies SHF_ALLOC */
66459 pr_debug("final section addresses:\n");
66460 @@ -2698,16 +2769,45 @@ static int move_module(struct module *mod, struct load_info *info)
66461 if (!(shdr->sh_flags & SHF_ALLOC))
66462 continue;
66463
66464 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
66465 - dest = mod->module_init
66466 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66467 - else
66468 - dest = mod->module_core + shdr->sh_entsize;
66469 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66470 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66471 + dest = mod->module_init_rw
66472 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66473 + else
66474 + dest = mod->module_init_rx
66475 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66476 + } else {
66477 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66478 + dest = mod->module_core_rw + shdr->sh_entsize;
66479 + else
66480 + dest = mod->module_core_rx + shdr->sh_entsize;
66481 + }
66482 +
66483 + if (shdr->sh_type != SHT_NOBITS) {
66484 +
66485 +#ifdef CONFIG_PAX_KERNEXEC
66486 +#ifdef CONFIG_X86_64
66487 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66488 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66489 +#endif
66490 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66491 + pax_open_kernel();
66492 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66493 + pax_close_kernel();
66494 + } else
66495 +#endif
66496
66497 - if (shdr->sh_type != SHT_NOBITS)
66498 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66499 + }
66500 /* Update sh_addr to point to copy in image. */
66501 - shdr->sh_addr = (unsigned long)dest;
66502 +
66503 +#ifdef CONFIG_PAX_KERNEXEC
66504 + if (shdr->sh_flags & SHF_EXECINSTR)
66505 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
66506 + else
66507 +#endif
66508 +
66509 + shdr->sh_addr = (unsigned long)dest;
66510 pr_debug("\t0x%lx %s\n",
66511 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
66512 }
66513 @@ -2758,12 +2858,12 @@ static void flush_module_icache(const struct module *mod)
66514 * Do it before processing of module parameters, so the module
66515 * can provide parameter accessor functions of its own.
66516 */
66517 - if (mod->module_init)
66518 - flush_icache_range((unsigned long)mod->module_init,
66519 - (unsigned long)mod->module_init
66520 - + mod->init_size);
66521 - flush_icache_range((unsigned long)mod->module_core,
66522 - (unsigned long)mod->module_core + mod->core_size);
66523 + if (mod->module_init_rx)
66524 + flush_icache_range((unsigned long)mod->module_init_rx,
66525 + (unsigned long)mod->module_init_rx
66526 + + mod->init_size_rx);
66527 + flush_icache_range((unsigned long)mod->module_core_rx,
66528 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
66529
66530 set_fs(old_fs);
66531 }
66532 @@ -2833,8 +2933,10 @@ out:
66533 static void module_deallocate(struct module *mod, struct load_info *info)
66534 {
66535 percpu_modfree(mod);
66536 - module_free(mod, mod->module_init);
66537 - module_free(mod, mod->module_core);
66538 + module_free_exec(mod, mod->module_init_rx);
66539 + module_free_exec(mod, mod->module_core_rx);
66540 + module_free(mod, mod->module_init_rw);
66541 + module_free(mod, mod->module_core_rw);
66542 }
66543
66544 int __weak module_finalize(const Elf_Ehdr *hdr,
66545 @@ -2898,9 +3000,38 @@ static struct module *load_module(void __user *umod,
66546 if (err)
66547 goto free_unload;
66548
66549 + /* Now copy in args */
66550 + mod->args = strndup_user(uargs, ~0UL >> 1);
66551 + if (IS_ERR(mod->args)) {
66552 + err = PTR_ERR(mod->args);
66553 + goto free_unload;
66554 + }
66555 +
66556 /* Set up MODINFO_ATTR fields */
66557 setup_modinfo(mod, &info);
66558
66559 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66560 + {
66561 + char *p, *p2;
66562 +
66563 + if (strstr(mod->args, "grsec_modharden_netdev")) {
66564 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66565 + err = -EPERM;
66566 + goto free_modinfo;
66567 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66568 + p += strlen("grsec_modharden_normal");
66569 + p2 = strstr(p, "_");
66570 + if (p2) {
66571 + *p2 = '\0';
66572 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66573 + *p2 = '_';
66574 + }
66575 + err = -EPERM;
66576 + goto free_modinfo;
66577 + }
66578 + }
66579 +#endif
66580 +
66581 /* Fix up syms, so that st_value is a pointer to location. */
66582 err = simplify_symbols(mod, &info);
66583 if (err < 0)
66584 @@ -2916,13 +3047,6 @@ static struct module *load_module(void __user *umod,
66585
66586 flush_module_icache(mod);
66587
66588 - /* Now copy in args */
66589 - mod->args = strndup_user(uargs, ~0UL >> 1);
66590 - if (IS_ERR(mod->args)) {
66591 - err = PTR_ERR(mod->args);
66592 - goto free_arch_cleanup;
66593 - }
66594 -
66595 /* Mark state as coming so strong_try_module_get() ignores us. */
66596 mod->state = MODULE_STATE_COMING;
66597
66598 @@ -2980,11 +3104,10 @@ static struct module *load_module(void __user *umod,
66599 unlock:
66600 mutex_unlock(&module_mutex);
66601 synchronize_sched();
66602 - kfree(mod->args);
66603 - free_arch_cleanup:
66604 module_arch_cleanup(mod);
66605 free_modinfo:
66606 free_modinfo(mod);
66607 + kfree(mod->args);
66608 free_unload:
66609 module_unload_free(mod);
66610 free_module:
66611 @@ -3025,16 +3148,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66612 MODULE_STATE_COMING, mod);
66613
66614 /* Set RO and NX regions for core */
66615 - set_section_ro_nx(mod->module_core,
66616 - mod->core_text_size,
66617 - mod->core_ro_size,
66618 - mod->core_size);
66619 + set_section_ro_nx(mod->module_core_rx,
66620 + mod->core_size_rx,
66621 + mod->core_size_rx,
66622 + mod->core_size_rx);
66623
66624 /* Set RO and NX regions for init */
66625 - set_section_ro_nx(mod->module_init,
66626 - mod->init_text_size,
66627 - mod->init_ro_size,
66628 - mod->init_size);
66629 + set_section_ro_nx(mod->module_init_rx,
66630 + mod->init_size_rx,
66631 + mod->init_size_rx,
66632 + mod->init_size_rx);
66633
66634 do_mod_ctors(mod);
66635 /* Start the module */
66636 @@ -3080,11 +3203,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66637 mod->strtab = mod->core_strtab;
66638 #endif
66639 unset_module_init_ro_nx(mod);
66640 - module_free(mod, mod->module_init);
66641 - mod->module_init = NULL;
66642 - mod->init_size = 0;
66643 - mod->init_ro_size = 0;
66644 - mod->init_text_size = 0;
66645 + module_free(mod, mod->module_init_rw);
66646 + module_free_exec(mod, mod->module_init_rx);
66647 + mod->module_init_rw = NULL;
66648 + mod->module_init_rx = NULL;
66649 + mod->init_size_rw = 0;
66650 + mod->init_size_rx = 0;
66651 mutex_unlock(&module_mutex);
66652
66653 return 0;
66654 @@ -3115,10 +3239,16 @@ static const char *get_ksymbol(struct module *mod,
66655 unsigned long nextval;
66656
66657 /* At worse, next value is at end of module */
66658 - if (within_module_init(addr, mod))
66659 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
66660 + if (within_module_init_rx(addr, mod))
66661 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66662 + else if (within_module_init_rw(addr, mod))
66663 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66664 + else if (within_module_core_rx(addr, mod))
66665 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66666 + else if (within_module_core_rw(addr, mod))
66667 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66668 else
66669 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
66670 + return NULL;
66671
66672 /* Scan for closest preceding symbol, and next symbol. (ELF
66673 starts real symbols at 1). */
66674 @@ -3353,7 +3483,7 @@ static int m_show(struct seq_file *m, void *p)
66675 char buf[8];
66676
66677 seq_printf(m, "%s %u",
66678 - mod->name, mod->init_size + mod->core_size);
66679 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66680 print_unload_info(m, mod);
66681
66682 /* Informative for users. */
66683 @@ -3362,7 +3492,7 @@ static int m_show(struct seq_file *m, void *p)
66684 mod->state == MODULE_STATE_COMING ? "Loading":
66685 "Live");
66686 /* Used by oprofile and other similar tools. */
66687 - seq_printf(m, " 0x%pK", mod->module_core);
66688 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66689
66690 /* Taints info */
66691 if (mod->taints)
66692 @@ -3398,7 +3528,17 @@ static const struct file_operations proc_modules_operations = {
66693
66694 static int __init proc_modules_init(void)
66695 {
66696 +#ifndef CONFIG_GRKERNSEC_HIDESYM
66697 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66698 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66699 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66700 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66701 +#else
66702 proc_create("modules", 0, NULL, &proc_modules_operations);
66703 +#endif
66704 +#else
66705 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66706 +#endif
66707 return 0;
66708 }
66709 module_init(proc_modules_init);
66710 @@ -3457,12 +3597,12 @@ struct module *__module_address(unsigned long addr)
66711 {
66712 struct module *mod;
66713
66714 - if (addr < module_addr_min || addr > module_addr_max)
66715 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66716 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
66717 return NULL;
66718
66719 list_for_each_entry_rcu(mod, &modules, list)
66720 - if (within_module_core(addr, mod)
66721 - || within_module_init(addr, mod))
66722 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
66723 return mod;
66724 return NULL;
66725 }
66726 @@ -3496,11 +3636,20 @@ bool is_module_text_address(unsigned long addr)
66727 */
66728 struct module *__module_text_address(unsigned long addr)
66729 {
66730 - struct module *mod = __module_address(addr);
66731 + struct module *mod;
66732 +
66733 +#ifdef CONFIG_X86_32
66734 + addr = ktla_ktva(addr);
66735 +#endif
66736 +
66737 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66738 + return NULL;
66739 +
66740 + mod = __module_address(addr);
66741 +
66742 if (mod) {
66743 /* Make sure it's within the text section. */
66744 - if (!within(addr, mod->module_init, mod->init_text_size)
66745 - && !within(addr, mod->module_core, mod->core_text_size))
66746 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66747 mod = NULL;
66748 }
66749 return mod;
66750 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66751 index 7e3443f..b2a1e6b 100644
66752 --- a/kernel/mutex-debug.c
66753 +++ b/kernel/mutex-debug.c
66754 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66755 }
66756
66757 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66758 - struct thread_info *ti)
66759 + struct task_struct *task)
66760 {
66761 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66762
66763 /* Mark the current thread as blocked on the lock: */
66764 - ti->task->blocked_on = waiter;
66765 + task->blocked_on = waiter;
66766 }
66767
66768 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66769 - struct thread_info *ti)
66770 + struct task_struct *task)
66771 {
66772 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66773 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66774 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66775 - ti->task->blocked_on = NULL;
66776 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
66777 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66778 + task->blocked_on = NULL;
66779
66780 list_del_init(&waiter->list);
66781 waiter->task = NULL;
66782 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66783 index 0799fd3..d06ae3b 100644
66784 --- a/kernel/mutex-debug.h
66785 +++ b/kernel/mutex-debug.h
66786 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66787 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66788 extern void debug_mutex_add_waiter(struct mutex *lock,
66789 struct mutex_waiter *waiter,
66790 - struct thread_info *ti);
66791 + struct task_struct *task);
66792 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66793 - struct thread_info *ti);
66794 + struct task_struct *task);
66795 extern void debug_mutex_unlock(struct mutex *lock);
66796 extern void debug_mutex_init(struct mutex *lock, const char *name,
66797 struct lock_class_key *key);
66798 diff --git a/kernel/mutex.c b/kernel/mutex.c
66799 index a307cc9..27fd2e9 100644
66800 --- a/kernel/mutex.c
66801 +++ b/kernel/mutex.c
66802 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66803 spin_lock_mutex(&lock->wait_lock, flags);
66804
66805 debug_mutex_lock_common(lock, &waiter);
66806 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66807 + debug_mutex_add_waiter(lock, &waiter, task);
66808
66809 /* add waiting tasks to the end of the waitqueue (FIFO): */
66810 list_add_tail(&waiter.list, &lock->wait_list);
66811 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66812 * TASK_UNINTERRUPTIBLE case.)
66813 */
66814 if (unlikely(signal_pending_state(state, task))) {
66815 - mutex_remove_waiter(lock, &waiter,
66816 - task_thread_info(task));
66817 + mutex_remove_waiter(lock, &waiter, task);
66818 mutex_release(&lock->dep_map, 1, ip);
66819 spin_unlock_mutex(&lock->wait_lock, flags);
66820
66821 @@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66822 done:
66823 lock_acquired(&lock->dep_map, ip);
66824 /* got the lock - rejoice! */
66825 - mutex_remove_waiter(lock, &waiter, current_thread_info());
66826 + mutex_remove_waiter(lock, &waiter, task);
66827 mutex_set_owner(lock);
66828
66829 /* set it to 0 if there are no waiters left: */
66830 diff --git a/kernel/panic.c b/kernel/panic.c
66831 index 8ed89a1..e83856a 100644
66832 --- a/kernel/panic.c
66833 +++ b/kernel/panic.c
66834 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66835 const char *board;
66836
66837 printk(KERN_WARNING "------------[ cut here ]------------\n");
66838 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66839 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66840 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66841 if (board)
66842 printk(KERN_WARNING "Hardware name: %s\n", board);
66843 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
66844 */
66845 void __stack_chk_fail(void)
66846 {
66847 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
66848 + dump_stack();
66849 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
66850 __builtin_return_address(0));
66851 }
66852 EXPORT_SYMBOL(__stack_chk_fail);
66853 diff --git a/kernel/pid.c b/kernel/pid.c
66854 index 9f08dfa..6765c40 100644
66855 --- a/kernel/pid.c
66856 +++ b/kernel/pid.c
66857 @@ -33,6 +33,7 @@
66858 #include <linux/rculist.h>
66859 #include <linux/bootmem.h>
66860 #include <linux/hash.h>
66861 +#include <linux/security.h>
66862 #include <linux/pid_namespace.h>
66863 #include <linux/init_task.h>
66864 #include <linux/syscalls.h>
66865 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
66866
66867 int pid_max = PID_MAX_DEFAULT;
66868
66869 -#define RESERVED_PIDS 300
66870 +#define RESERVED_PIDS 500
66871
66872 int pid_max_min = RESERVED_PIDS + 1;
66873 int pid_max_max = PID_MAX_LIMIT;
66874 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
66875 */
66876 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
66877 {
66878 + struct task_struct *task;
66879 +
66880 rcu_lockdep_assert(rcu_read_lock_held(),
66881 "find_task_by_pid_ns() needs rcu_read_lock()"
66882 " protection");
66883 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66884 +
66885 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66886 +
66887 + if (gr_pid_is_chrooted(task))
66888 + return NULL;
66889 +
66890 + return task;
66891 }
66892
66893 struct task_struct *find_task_by_vpid(pid_t vnr)
66894 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
66895 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
66896 }
66897
66898 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
66899 +{
66900 + rcu_lockdep_assert(rcu_read_lock_held(),
66901 + "find_task_by_pid_ns() needs rcu_read_lock()"
66902 + " protection");
66903 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
66904 +}
66905 +
66906 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
66907 {
66908 struct pid *pid;
66909 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
66910 index 125cb67..a4d1c30 100644
66911 --- a/kernel/posix-cpu-timers.c
66912 +++ b/kernel/posix-cpu-timers.c
66913 @@ -6,6 +6,7 @@
66914 #include <linux/posix-timers.h>
66915 #include <linux/errno.h>
66916 #include <linux/math64.h>
66917 +#include <linux/security.h>
66918 #include <asm/uaccess.h>
66919 #include <linux/kernel_stat.h>
66920 #include <trace/events/timer.h>
66921 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
66922
66923 static __init int init_posix_cpu_timers(void)
66924 {
66925 - struct k_clock process = {
66926 + static struct k_clock process = {
66927 .clock_getres = process_cpu_clock_getres,
66928 .clock_get = process_cpu_clock_get,
66929 .timer_create = process_cpu_timer_create,
66930 .nsleep = process_cpu_nsleep,
66931 .nsleep_restart = process_cpu_nsleep_restart,
66932 };
66933 - struct k_clock thread = {
66934 + static struct k_clock thread = {
66935 .clock_getres = thread_cpu_clock_getres,
66936 .clock_get = thread_cpu_clock_get,
66937 .timer_create = thread_cpu_timer_create,
66938 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
66939 index 69185ae..cc2847a 100644
66940 --- a/kernel/posix-timers.c
66941 +++ b/kernel/posix-timers.c
66942 @@ -43,6 +43,7 @@
66943 #include <linux/idr.h>
66944 #include <linux/posix-clock.h>
66945 #include <linux/posix-timers.h>
66946 +#include <linux/grsecurity.h>
66947 #include <linux/syscalls.h>
66948 #include <linux/wait.h>
66949 #include <linux/workqueue.h>
66950 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
66951 * which we beg off on and pass to do_sys_settimeofday().
66952 */
66953
66954 -static struct k_clock posix_clocks[MAX_CLOCKS];
66955 +static struct k_clock *posix_clocks[MAX_CLOCKS];
66956
66957 /*
66958 * These ones are defined below.
66959 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66960 */
66961 static __init int init_posix_timers(void)
66962 {
66963 - struct k_clock clock_realtime = {
66964 + static struct k_clock clock_realtime = {
66965 .clock_getres = hrtimer_get_res,
66966 .clock_get = posix_clock_realtime_get,
66967 .clock_set = posix_clock_realtime_set,
66968 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66969 .timer_get = common_timer_get,
66970 .timer_del = common_timer_del,
66971 };
66972 - struct k_clock clock_monotonic = {
66973 + static struct k_clock clock_monotonic = {
66974 .clock_getres = hrtimer_get_res,
66975 .clock_get = posix_ktime_get_ts,
66976 .nsleep = common_nsleep,
66977 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66978 .timer_get = common_timer_get,
66979 .timer_del = common_timer_del,
66980 };
66981 - struct k_clock clock_monotonic_raw = {
66982 + static struct k_clock clock_monotonic_raw = {
66983 .clock_getres = hrtimer_get_res,
66984 .clock_get = posix_get_monotonic_raw,
66985 };
66986 - struct k_clock clock_realtime_coarse = {
66987 + static struct k_clock clock_realtime_coarse = {
66988 .clock_getres = posix_get_coarse_res,
66989 .clock_get = posix_get_realtime_coarse,
66990 };
66991 - struct k_clock clock_monotonic_coarse = {
66992 + static struct k_clock clock_monotonic_coarse = {
66993 .clock_getres = posix_get_coarse_res,
66994 .clock_get = posix_get_monotonic_coarse,
66995 };
66996 - struct k_clock clock_boottime = {
66997 + static struct k_clock clock_boottime = {
66998 .clock_getres = hrtimer_get_res,
66999 .clock_get = posix_get_boottime,
67000 .nsleep = common_nsleep,
67001 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67002 return;
67003 }
67004
67005 - posix_clocks[clock_id] = *new_clock;
67006 + posix_clocks[clock_id] = new_clock;
67007 }
67008 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67009
67010 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67011 return (id & CLOCKFD_MASK) == CLOCKFD ?
67012 &clock_posix_dynamic : &clock_posix_cpu;
67013
67014 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67015 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67016 return NULL;
67017 - return &posix_clocks[id];
67018 + return posix_clocks[id];
67019 }
67020
67021 static int common_timer_create(struct k_itimer *new_timer)
67022 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67023 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67024 return -EFAULT;
67025
67026 + /* only the CLOCK_REALTIME clock can be set, all other clocks
67027 + have their clock_set fptr set to a nosettime dummy function
67028 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67029 + call common_clock_set, which calls do_sys_settimeofday, which
67030 + we hook
67031 + */
67032 +
67033 return kc->clock_set(which_clock, &new_tp);
67034 }
67035
67036 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67037 index d523593..68197a4 100644
67038 --- a/kernel/power/poweroff.c
67039 +++ b/kernel/power/poweroff.c
67040 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67041 .enable_mask = SYSRQ_ENABLE_BOOT,
67042 };
67043
67044 -static int pm_sysrq_init(void)
67045 +static int __init pm_sysrq_init(void)
67046 {
67047 register_sysrq_key('o', &sysrq_poweroff_op);
67048 return 0;
67049 diff --git a/kernel/power/process.c b/kernel/power/process.c
67050 index 19db29f..33b52b6 100644
67051 --- a/kernel/power/process.c
67052 +++ b/kernel/power/process.c
67053 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
67054 u64 elapsed_csecs64;
67055 unsigned int elapsed_csecs;
67056 bool wakeup = false;
67057 + bool timedout = false;
67058
67059 do_gettimeofday(&start);
67060
67061 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67062
67063 while (true) {
67064 todo = 0;
67065 + if (time_after(jiffies, end_time))
67066 + timedout = true;
67067 read_lock(&tasklist_lock);
67068 do_each_thread(g, p) {
67069 if (p == current || !freeze_task(p))
67070 @@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
67071 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
67072 * transition can't race with task state testing here.
67073 */
67074 - if (!task_is_stopped_or_traced(p) &&
67075 - !freezer_should_skip(p))
67076 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67077 todo++;
67078 + if (timedout) {
67079 + printk(KERN_ERR "Task refusing to freeze:\n");
67080 + sched_show_task(p);
67081 + }
67082 + }
67083 } while_each_thread(g, p);
67084 read_unlock(&tasklist_lock);
67085
67086 @@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
67087 todo += wq_busy;
67088 }
67089
67090 - if (!todo || time_after(jiffies, end_time))
67091 + if (!todo || timedout)
67092 break;
67093
67094 if (pm_wakeup_pending()) {
67095 diff --git a/kernel/printk.c b/kernel/printk.c
67096 index b663c2c..1d6ba7a 100644
67097 --- a/kernel/printk.c
67098 +++ b/kernel/printk.c
67099 @@ -316,6 +316,11 @@ static int check_syslog_permissions(int type, bool from_file)
67100 if (from_file && type != SYSLOG_ACTION_OPEN)
67101 return 0;
67102
67103 +#ifdef CONFIG_GRKERNSEC_DMESG
67104 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67105 + return -EPERM;
67106 +#endif
67107 +
67108 if (syslog_action_restricted(type)) {
67109 if (capable(CAP_SYSLOG))
67110 return 0;
67111 diff --git a/kernel/profile.c b/kernel/profile.c
67112 index 76b8e77..a2930e8 100644
67113 --- a/kernel/profile.c
67114 +++ b/kernel/profile.c
67115 @@ -39,7 +39,7 @@ struct profile_hit {
67116 /* Oprofile timer tick hook */
67117 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67118
67119 -static atomic_t *prof_buffer;
67120 +static atomic_unchecked_t *prof_buffer;
67121 static unsigned long prof_len, prof_shift;
67122
67123 int prof_on __read_mostly;
67124 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67125 hits[i].pc = 0;
67126 continue;
67127 }
67128 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67129 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67130 hits[i].hits = hits[i].pc = 0;
67131 }
67132 }
67133 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67134 * Add the current hit(s) and flush the write-queue out
67135 * to the global buffer:
67136 */
67137 - atomic_add(nr_hits, &prof_buffer[pc]);
67138 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67139 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67140 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67141 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67142 hits[i].pc = hits[i].hits = 0;
67143 }
67144 out:
67145 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67146 {
67147 unsigned long pc;
67148 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67149 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67150 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67151 }
67152 #endif /* !CONFIG_SMP */
67153
67154 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67155 return -EFAULT;
67156 buf++; p++; count--; read++;
67157 }
67158 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67159 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67160 if (copy_to_user(buf, (void *)pnt, count))
67161 return -EFAULT;
67162 read += count;
67163 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67164 }
67165 #endif
67166 profile_discard_flip_buffers();
67167 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67168 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67169 return count;
67170 }
67171
67172 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67173 index ee8d49b..bd3d790 100644
67174 --- a/kernel/ptrace.c
67175 +++ b/kernel/ptrace.c
67176 @@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67177
67178 if (seize)
67179 flags |= PT_SEIZED;
67180 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
67181 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
67182 flags |= PT_PTRACE_CAP;
67183 task->ptrace = flags;
67184
67185 @@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67186 break;
67187 return -EIO;
67188 }
67189 - if (copy_to_user(dst, buf, retval))
67190 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67191 return -EFAULT;
67192 copied += retval;
67193 src += retval;
67194 @@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
67195 bool seized = child->ptrace & PT_SEIZED;
67196 int ret = -EIO;
67197 siginfo_t siginfo, *si;
67198 - void __user *datavp = (void __user *) data;
67199 + void __user *datavp = (__force void __user *) data;
67200 unsigned long __user *datalp = datavp;
67201 unsigned long flags;
67202
67203 @@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67204 goto out;
67205 }
67206
67207 + if (gr_handle_ptrace(child, request)) {
67208 + ret = -EPERM;
67209 + goto out_put_task_struct;
67210 + }
67211 +
67212 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67213 ret = ptrace_attach(child, request, addr, data);
67214 /*
67215 * Some architectures need to do book-keeping after
67216 * a ptrace attach.
67217 */
67218 - if (!ret)
67219 + if (!ret) {
67220 arch_ptrace_attach(child);
67221 + gr_audit_ptrace(child);
67222 + }
67223 goto out_put_task_struct;
67224 }
67225
67226 @@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67227 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67228 if (copied != sizeof(tmp))
67229 return -EIO;
67230 - return put_user(tmp, (unsigned long __user *)data);
67231 + return put_user(tmp, (__force unsigned long __user *)data);
67232 }
67233
67234 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67235 @@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67236 goto out;
67237 }
67238
67239 + if (gr_handle_ptrace(child, request)) {
67240 + ret = -EPERM;
67241 + goto out_put_task_struct;
67242 + }
67243 +
67244 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67245 ret = ptrace_attach(child, request, addr, data);
67246 /*
67247 * Some architectures need to do book-keeping after
67248 * a ptrace attach.
67249 */
67250 - if (!ret)
67251 + if (!ret) {
67252 arch_ptrace_attach(child);
67253 + gr_audit_ptrace(child);
67254 + }
67255 goto out_put_task_struct;
67256 }
67257
67258 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
67259 index 37a5444..eec170a 100644
67260 --- a/kernel/rcutiny.c
67261 +++ b/kernel/rcutiny.c
67262 @@ -46,7 +46,7 @@
67263 struct rcu_ctrlblk;
67264 static void invoke_rcu_callbacks(void);
67265 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
67266 -static void rcu_process_callbacks(struct softirq_action *unused);
67267 +static void rcu_process_callbacks(void);
67268 static void __call_rcu(struct rcu_head *head,
67269 void (*func)(struct rcu_head *rcu),
67270 struct rcu_ctrlblk *rcp);
67271 @@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
67272 rcu_is_callbacks_kthread()));
67273 }
67274
67275 -static void rcu_process_callbacks(struct softirq_action *unused)
67276 +static void rcu_process_callbacks(void)
67277 {
67278 __rcu_process_callbacks(&rcu_sched_ctrlblk);
67279 __rcu_process_callbacks(&rcu_bh_ctrlblk);
67280 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
67281 index 22ecea0..3789898 100644
67282 --- a/kernel/rcutiny_plugin.h
67283 +++ b/kernel/rcutiny_plugin.h
67284 @@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
67285 have_rcu_kthread_work = morework;
67286 local_irq_restore(flags);
67287 if (work)
67288 - rcu_process_callbacks(NULL);
67289 + rcu_process_callbacks();
67290 schedule_timeout_interruptible(1); /* Leave CPU for others. */
67291 }
67292
67293 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67294 index a89b381..efdcad8 100644
67295 --- a/kernel/rcutorture.c
67296 +++ b/kernel/rcutorture.c
67297 @@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67298 { 0 };
67299 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67300 { 0 };
67301 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67302 -static atomic_t n_rcu_torture_alloc;
67303 -static atomic_t n_rcu_torture_alloc_fail;
67304 -static atomic_t n_rcu_torture_free;
67305 -static atomic_t n_rcu_torture_mberror;
67306 -static atomic_t n_rcu_torture_error;
67307 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67308 +static atomic_unchecked_t n_rcu_torture_alloc;
67309 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
67310 +static atomic_unchecked_t n_rcu_torture_free;
67311 +static atomic_unchecked_t n_rcu_torture_mberror;
67312 +static atomic_unchecked_t n_rcu_torture_error;
67313 static long n_rcu_torture_boost_ktrerror;
67314 static long n_rcu_torture_boost_rterror;
67315 static long n_rcu_torture_boost_failure;
67316 @@ -253,11 +253,11 @@ rcu_torture_alloc(void)
67317
67318 spin_lock_bh(&rcu_torture_lock);
67319 if (list_empty(&rcu_torture_freelist)) {
67320 - atomic_inc(&n_rcu_torture_alloc_fail);
67321 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67322 spin_unlock_bh(&rcu_torture_lock);
67323 return NULL;
67324 }
67325 - atomic_inc(&n_rcu_torture_alloc);
67326 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67327 p = rcu_torture_freelist.next;
67328 list_del_init(p);
67329 spin_unlock_bh(&rcu_torture_lock);
67330 @@ -270,7 +270,7 @@ rcu_torture_alloc(void)
67331 static void
67332 rcu_torture_free(struct rcu_torture *p)
67333 {
67334 - atomic_inc(&n_rcu_torture_free);
67335 + atomic_inc_unchecked(&n_rcu_torture_free);
67336 spin_lock_bh(&rcu_torture_lock);
67337 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67338 spin_unlock_bh(&rcu_torture_lock);
67339 @@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
67340 i = rp->rtort_pipe_count;
67341 if (i > RCU_TORTURE_PIPE_LEN)
67342 i = RCU_TORTURE_PIPE_LEN;
67343 - atomic_inc(&rcu_torture_wcount[i]);
67344 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67345 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67346 rp->rtort_mbtest = 0;
67347 rcu_torture_free(rp);
67348 @@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67349 i = rp->rtort_pipe_count;
67350 if (i > RCU_TORTURE_PIPE_LEN)
67351 i = RCU_TORTURE_PIPE_LEN;
67352 - atomic_inc(&rcu_torture_wcount[i]);
67353 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67354 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67355 rp->rtort_mbtest = 0;
67356 list_del(&rp->rtort_free);
67357 @@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
67358 i = old_rp->rtort_pipe_count;
67359 if (i > RCU_TORTURE_PIPE_LEN)
67360 i = RCU_TORTURE_PIPE_LEN;
67361 - atomic_inc(&rcu_torture_wcount[i]);
67362 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67363 old_rp->rtort_pipe_count++;
67364 cur_ops->deferred_free(old_rp);
67365 }
67366 @@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
67367 }
67368 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67369 if (p->rtort_mbtest == 0)
67370 - atomic_inc(&n_rcu_torture_mberror);
67371 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67372 spin_lock(&rand_lock);
67373 cur_ops->read_delay(&rand);
67374 n_rcu_torture_timers++;
67375 @@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
67376 }
67377 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67378 if (p->rtort_mbtest == 0)
67379 - atomic_inc(&n_rcu_torture_mberror);
67380 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67381 cur_ops->read_delay(&rand);
67382 preempt_disable();
67383 pipe_count = p->rtort_pipe_count;
67384 @@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
67385 rcu_torture_current,
67386 rcu_torture_current_version,
67387 list_empty(&rcu_torture_freelist),
67388 - atomic_read(&n_rcu_torture_alloc),
67389 - atomic_read(&n_rcu_torture_alloc_fail),
67390 - atomic_read(&n_rcu_torture_free),
67391 - atomic_read(&n_rcu_torture_mberror),
67392 + atomic_read_unchecked(&n_rcu_torture_alloc),
67393 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67394 + atomic_read_unchecked(&n_rcu_torture_free),
67395 + atomic_read_unchecked(&n_rcu_torture_mberror),
67396 n_rcu_torture_boost_ktrerror,
67397 n_rcu_torture_boost_rterror,
67398 n_rcu_torture_boost_failure,
67399 @@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
67400 n_online_attempts,
67401 n_offline_successes,
67402 n_offline_attempts);
67403 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67404 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67405 n_rcu_torture_boost_ktrerror != 0 ||
67406 n_rcu_torture_boost_rterror != 0 ||
67407 n_rcu_torture_boost_failure != 0)
67408 @@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
67409 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67410 if (i > 1) {
67411 cnt += sprintf(&page[cnt], "!!! ");
67412 - atomic_inc(&n_rcu_torture_error);
67413 + atomic_inc_unchecked(&n_rcu_torture_error);
67414 WARN_ON_ONCE(1);
67415 }
67416 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67417 @@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
67418 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67419 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67420 cnt += sprintf(&page[cnt], " %d",
67421 - atomic_read(&rcu_torture_wcount[i]));
67422 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67423 }
67424 cnt += sprintf(&page[cnt], "\n");
67425 if (cur_ops->stats)
67426 @@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
67427
67428 if (cur_ops->cleanup)
67429 cur_ops->cleanup();
67430 - if (atomic_read(&n_rcu_torture_error))
67431 + if (atomic_read_unchecked(&n_rcu_torture_error))
67432 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67433 else if (n_online_successes != n_online_attempts ||
67434 n_offline_successes != n_offline_attempts)
67435 @@ -1744,17 +1744,17 @@ rcu_torture_init(void)
67436
67437 rcu_torture_current = NULL;
67438 rcu_torture_current_version = 0;
67439 - atomic_set(&n_rcu_torture_alloc, 0);
67440 - atomic_set(&n_rcu_torture_alloc_fail, 0);
67441 - atomic_set(&n_rcu_torture_free, 0);
67442 - atomic_set(&n_rcu_torture_mberror, 0);
67443 - atomic_set(&n_rcu_torture_error, 0);
67444 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67445 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67446 + atomic_set_unchecked(&n_rcu_torture_free, 0);
67447 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67448 + atomic_set_unchecked(&n_rcu_torture_error, 0);
67449 n_rcu_torture_boost_ktrerror = 0;
67450 n_rcu_torture_boost_rterror = 0;
67451 n_rcu_torture_boost_failure = 0;
67452 n_rcu_torture_boosts = 0;
67453 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67454 - atomic_set(&rcu_torture_wcount[i], 0);
67455 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67456 for_each_possible_cpu(cpu) {
67457 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67458 per_cpu(rcu_torture_count, cpu)[i] = 0;
67459 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67460 index d0c5baf..109b2e7 100644
67461 --- a/kernel/rcutree.c
67462 +++ b/kernel/rcutree.c
67463 @@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
67464 rcu_prepare_for_idle(smp_processor_id());
67465 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67466 smp_mb__before_atomic_inc(); /* See above. */
67467 - atomic_inc(&rdtp->dynticks);
67468 + atomic_inc_unchecked(&rdtp->dynticks);
67469 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67470 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67471 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67472
67473 /*
67474 * The idle task is not permitted to enter the idle loop while
67475 @@ -448,10 +448,10 @@ void rcu_irq_exit(void)
67476 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
67477 {
67478 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67479 - atomic_inc(&rdtp->dynticks);
67480 + atomic_inc_unchecked(&rdtp->dynticks);
67481 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67482 smp_mb__after_atomic_inc(); /* See above. */
67483 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67484 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67485 rcu_cleanup_after_idle(smp_processor_id());
67486 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
67487 if (!is_idle_task(current)) {
67488 @@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
67489 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67490
67491 if (rdtp->dynticks_nmi_nesting == 0 &&
67492 - (atomic_read(&rdtp->dynticks) & 0x1))
67493 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67494 return;
67495 rdtp->dynticks_nmi_nesting++;
67496 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67497 - atomic_inc(&rdtp->dynticks);
67498 + atomic_inc_unchecked(&rdtp->dynticks);
67499 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67500 smp_mb__after_atomic_inc(); /* See above. */
67501 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67502 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67503 }
67504
67505 /**
67506 @@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
67507 return;
67508 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67509 smp_mb__before_atomic_inc(); /* See above. */
67510 - atomic_inc(&rdtp->dynticks);
67511 + atomic_inc_unchecked(&rdtp->dynticks);
67512 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67513 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67514 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67515 }
67516
67517 #ifdef CONFIG_PROVE_RCU
67518 @@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
67519 int ret;
67520
67521 preempt_disable();
67522 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67523 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67524 preempt_enable();
67525 return ret;
67526 }
67527 @@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
67528 */
67529 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67530 {
67531 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67532 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67533 return (rdp->dynticks_snap & 0x1) == 0;
67534 }
67535
67536 @@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67537 unsigned int curr;
67538 unsigned int snap;
67539
67540 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67541 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67542 snap = (unsigned int)rdp->dynticks_snap;
67543
67544 /*
67545 @@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void)
67546 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
67547 */
67548 if (till_stall_check < 3) {
67549 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
67550 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
67551 till_stall_check = 3;
67552 } else if (till_stall_check > 300) {
67553 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
67554 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
67555 till_stall_check = 300;
67556 }
67557 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
67558 @@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67559 /*
67560 * Do RCU core processing for the current CPU.
67561 */
67562 -static void rcu_process_callbacks(struct softirq_action *unused)
67563 +static void rcu_process_callbacks(void)
67564 {
67565 trace_rcu_utilization("Start RCU core");
67566 __rcu_process_callbacks(&rcu_sched_state,
67567 @@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
67568 }
67569 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
67570
67571 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67572 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67573 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67574 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67575
67576 static int synchronize_sched_expedited_cpu_stop(void *data)
67577 {
67578 @@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
67579 int firstsnap, s, snap, trycount = 0;
67580
67581 /* Note that atomic_inc_return() implies full memory barrier. */
67582 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67583 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67584 get_online_cpus();
67585 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
67586
67587 @@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
67588 }
67589
67590 /* Check to see if someone else did our work for us. */
67591 - s = atomic_read(&sync_sched_expedited_done);
67592 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67593 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67594 smp_mb(); /* ensure test happens before caller kfree */
67595 return;
67596 @@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
67597 * grace period works for us.
67598 */
67599 get_online_cpus();
67600 - snap = atomic_read(&sync_sched_expedited_started);
67601 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
67602 smp_mb(); /* ensure read is before try_stop_cpus(). */
67603 }
67604
67605 @@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
67606 * than we did beat us to the punch.
67607 */
67608 do {
67609 - s = atomic_read(&sync_sched_expedited_done);
67610 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67611 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67612 smp_mb(); /* ensure test happens before caller kfree */
67613 break;
67614 }
67615 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67616 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67617
67618 put_online_cpus();
67619 }
67620 @@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
67621 rdp->qlen = 0;
67622 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
67623 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
67624 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
67625 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
67626 rdp->cpu = cpu;
67627 rdp->rsp = rsp;
67628 raw_spin_unlock_irqrestore(&rnp->lock, flags);
67629 @@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
67630 rdp->n_force_qs_snap = rsp->n_force_qs;
67631 rdp->blimit = blimit;
67632 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
67633 - atomic_set(&rdp->dynticks->dynticks,
67634 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
67635 + atomic_set_unchecked(&rdp->dynticks->dynticks,
67636 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
67637 rcu_prepare_for_idle_init(cpu);
67638 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
67639
67640 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67641 index cdd1be0..5b2efb4 100644
67642 --- a/kernel/rcutree.h
67643 +++ b/kernel/rcutree.h
67644 @@ -87,7 +87,7 @@ struct rcu_dynticks {
67645 long long dynticks_nesting; /* Track irq/process nesting level. */
67646 /* Process level is worth LLONG_MAX/2. */
67647 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67648 - atomic_t dynticks; /* Even value for idle, else odd. */
67649 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
67650 };
67651
67652 /* RCU's kthread states for tracing. */
67653 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67654 index c023464..7f57225 100644
67655 --- a/kernel/rcutree_plugin.h
67656 +++ b/kernel/rcutree_plugin.h
67657 @@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
67658
67659 /* Clean up and exit. */
67660 smp_mb(); /* ensure expedited GP seen before counter increment. */
67661 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67662 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67663 unlock_mb_ret:
67664 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67665 mb_ret:
67666 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67667 index ed459ed..a03c3fa 100644
67668 --- a/kernel/rcutree_trace.c
67669 +++ b/kernel/rcutree_trace.c
67670 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67671 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67672 rdp->qs_pending);
67673 seq_printf(m, " dt=%d/%llx/%d df=%lu",
67674 - atomic_read(&rdp->dynticks->dynticks),
67675 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67676 rdp->dynticks->dynticks_nesting,
67677 rdp->dynticks->dynticks_nmi_nesting,
67678 rdp->dynticks_fqs);
67679 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67680 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67681 rdp->qs_pending);
67682 seq_printf(m, ",%d,%llx,%d,%lu",
67683 - atomic_read(&rdp->dynticks->dynticks),
67684 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67685 rdp->dynticks->dynticks_nesting,
67686 rdp->dynticks->dynticks_nmi_nesting,
67687 rdp->dynticks_fqs);
67688 diff --git a/kernel/resource.c b/kernel/resource.c
67689 index 7e8ea66..1efd11f 100644
67690 --- a/kernel/resource.c
67691 +++ b/kernel/resource.c
67692 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67693
67694 static int __init ioresources_init(void)
67695 {
67696 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67697 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67698 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67699 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67700 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67701 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67702 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67703 +#endif
67704 +#else
67705 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67706 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67707 +#endif
67708 return 0;
67709 }
67710 __initcall(ioresources_init);
67711 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67712 index 98ec494..4241d6d 100644
67713 --- a/kernel/rtmutex-tester.c
67714 +++ b/kernel/rtmutex-tester.c
67715 @@ -20,7 +20,7 @@
67716 #define MAX_RT_TEST_MUTEXES 8
67717
67718 static spinlock_t rttest_lock;
67719 -static atomic_t rttest_event;
67720 +static atomic_unchecked_t rttest_event;
67721
67722 struct test_thread_data {
67723 int opcode;
67724 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67725
67726 case RTTEST_LOCKCONT:
67727 td->mutexes[td->opdata] = 1;
67728 - td->event = atomic_add_return(1, &rttest_event);
67729 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67730 return 0;
67731
67732 case RTTEST_RESET:
67733 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67734 return 0;
67735
67736 case RTTEST_RESETEVENT:
67737 - atomic_set(&rttest_event, 0);
67738 + atomic_set_unchecked(&rttest_event, 0);
67739 return 0;
67740
67741 default:
67742 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67743 return ret;
67744
67745 td->mutexes[id] = 1;
67746 - td->event = atomic_add_return(1, &rttest_event);
67747 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67748 rt_mutex_lock(&mutexes[id]);
67749 - td->event = atomic_add_return(1, &rttest_event);
67750 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67751 td->mutexes[id] = 4;
67752 return 0;
67753
67754 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67755 return ret;
67756
67757 td->mutexes[id] = 1;
67758 - td->event = atomic_add_return(1, &rttest_event);
67759 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67760 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67761 - td->event = atomic_add_return(1, &rttest_event);
67762 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67763 td->mutexes[id] = ret ? 0 : 4;
67764 return ret ? -EINTR : 0;
67765
67766 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67767 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67768 return ret;
67769
67770 - td->event = atomic_add_return(1, &rttest_event);
67771 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67772 rt_mutex_unlock(&mutexes[id]);
67773 - td->event = atomic_add_return(1, &rttest_event);
67774 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67775 td->mutexes[id] = 0;
67776 return 0;
67777
67778 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67779 break;
67780
67781 td->mutexes[dat] = 2;
67782 - td->event = atomic_add_return(1, &rttest_event);
67783 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67784 break;
67785
67786 default:
67787 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67788 return;
67789
67790 td->mutexes[dat] = 3;
67791 - td->event = atomic_add_return(1, &rttest_event);
67792 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67793 break;
67794
67795 case RTTEST_LOCKNOWAIT:
67796 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67797 return;
67798
67799 td->mutexes[dat] = 1;
67800 - td->event = atomic_add_return(1, &rttest_event);
67801 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67802 return;
67803
67804 default:
67805 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
67806 index 0984a21..939f183 100644
67807 --- a/kernel/sched/auto_group.c
67808 +++ b/kernel/sched/auto_group.c
67809 @@ -11,7 +11,7 @@
67810
67811 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67812 static struct autogroup autogroup_default;
67813 -static atomic_t autogroup_seq_nr;
67814 +static atomic_unchecked_t autogroup_seq_nr;
67815
67816 void __init autogroup_init(struct task_struct *init_task)
67817 {
67818 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67819
67820 kref_init(&ag->kref);
67821 init_rwsem(&ag->lock);
67822 - ag->id = atomic_inc_return(&autogroup_seq_nr);
67823 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67824 ag->tg = tg;
67825 #ifdef CONFIG_RT_GROUP_SCHED
67826 /*
67827 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
67828 index e5212ae..2fcf98d 100644
67829 --- a/kernel/sched/core.c
67830 +++ b/kernel/sched/core.c
67831 @@ -3907,6 +3907,8 @@ int can_nice(const struct task_struct *p, const int nice)
67832 /* convert nice value [19,-20] to rlimit style value [1,40] */
67833 int nice_rlim = 20 - nice;
67834
67835 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67836 +
67837 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67838 capable(CAP_SYS_NICE));
67839 }
67840 @@ -3940,7 +3942,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67841 if (nice > 19)
67842 nice = 19;
67843
67844 - if (increment < 0 && !can_nice(current, nice))
67845 + if (increment < 0 && (!can_nice(current, nice) ||
67846 + gr_handle_chroot_nice()))
67847 return -EPERM;
67848
67849 retval = security_task_setnice(current, nice);
67850 @@ -4097,6 +4100,7 @@ recheck:
67851 unsigned long rlim_rtprio =
67852 task_rlimit(p, RLIMIT_RTPRIO);
67853
67854 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
67855 /* can't set/change the rt policy */
67856 if (policy != p->policy && !rlim_rtprio)
67857 return -EPERM;
67858 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
67859 index e955364..eacd2a4 100644
67860 --- a/kernel/sched/fair.c
67861 +++ b/kernel/sched/fair.c
67862 @@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
67863 * run_rebalance_domains is triggered when needed from the scheduler tick.
67864 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
67865 */
67866 -static void run_rebalance_domains(struct softirq_action *h)
67867 +static void run_rebalance_domains(void)
67868 {
67869 int this_cpu = smp_processor_id();
67870 struct rq *this_rq = cpu_rq(this_cpu);
67871 diff --git a/kernel/signal.c b/kernel/signal.c
67872 index 17afcaf..4500b05 100644
67873 --- a/kernel/signal.c
67874 +++ b/kernel/signal.c
67875 @@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
67876
67877 int print_fatal_signals __read_mostly;
67878
67879 -static void __user *sig_handler(struct task_struct *t, int sig)
67880 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
67881 {
67882 return t->sighand->action[sig - 1].sa.sa_handler;
67883 }
67884
67885 -static int sig_handler_ignored(void __user *handler, int sig)
67886 +static int sig_handler_ignored(__sighandler_t handler, int sig)
67887 {
67888 /* Is it explicitly or implicitly ignored? */
67889 return handler == SIG_IGN ||
67890 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
67891
67892 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
67893 {
67894 - void __user *handler;
67895 + __sighandler_t handler;
67896
67897 handler = sig_handler(t, sig);
67898
67899 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
67900 atomic_inc(&user->sigpending);
67901 rcu_read_unlock();
67902
67903 + if (!override_rlimit)
67904 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
67905 +
67906 if (override_rlimit ||
67907 atomic_read(&user->sigpending) <=
67908 task_rlimit(t, RLIMIT_SIGPENDING)) {
67909 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
67910
67911 int unhandled_signal(struct task_struct *tsk, int sig)
67912 {
67913 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
67914 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
67915 if (is_global_init(tsk))
67916 return 1;
67917 if (handler != SIG_IGN && handler != SIG_DFL)
67918 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
67919 }
67920 }
67921
67922 + /* allow glibc communication via tgkill to other threads in our
67923 + thread group */
67924 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
67925 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
67926 + && gr_handle_signal(t, sig))
67927 + return -EPERM;
67928 +
67929 return security_task_kill(t, info, sig, 0);
67930 }
67931
67932 @@ -1204,7 +1214,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67933 return send_signal(sig, info, p, 1);
67934 }
67935
67936 -static int
67937 +int
67938 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67939 {
67940 return send_signal(sig, info, t, 0);
67941 @@ -1241,6 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67942 unsigned long int flags;
67943 int ret, blocked, ignored;
67944 struct k_sigaction *action;
67945 + int is_unhandled = 0;
67946
67947 spin_lock_irqsave(&t->sighand->siglock, flags);
67948 action = &t->sighand->action[sig-1];
67949 @@ -1255,9 +1266,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67950 }
67951 if (action->sa.sa_handler == SIG_DFL)
67952 t->signal->flags &= ~SIGNAL_UNKILLABLE;
67953 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
67954 + is_unhandled = 1;
67955 ret = specific_send_sig_info(sig, info, t);
67956 spin_unlock_irqrestore(&t->sighand->siglock, flags);
67957
67958 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
67959 + normal operation */
67960 + if (is_unhandled) {
67961 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
67962 + gr_handle_crash(t, sig);
67963 + }
67964 +
67965 return ret;
67966 }
67967
67968 @@ -1324,8 +1344,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67969 ret = check_kill_permission(sig, info, p);
67970 rcu_read_unlock();
67971
67972 - if (!ret && sig)
67973 + if (!ret && sig) {
67974 ret = do_send_sig_info(sig, info, p, true);
67975 + if (!ret)
67976 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
67977 + }
67978
67979 return ret;
67980 }
67981 @@ -2840,7 +2863,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
67982 int error = -ESRCH;
67983
67984 rcu_read_lock();
67985 - p = find_task_by_vpid(pid);
67986 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67987 + /* allow glibc communication via tgkill to other threads in our
67988 + thread group */
67989 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
67990 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
67991 + p = find_task_by_vpid_unrestricted(pid);
67992 + else
67993 +#endif
67994 + p = find_task_by_vpid(pid);
67995 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
67996 error = check_kill_permission(sig, info, p);
67997 /*
67998 diff --git a/kernel/smp.c b/kernel/smp.c
67999 index 2f8b10e..a41bc14 100644
68000 --- a/kernel/smp.c
68001 +++ b/kernel/smp.c
68002 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68003 }
68004 EXPORT_SYMBOL(smp_call_function);
68005
68006 -void ipi_call_lock(void)
68007 +void ipi_call_lock(void) __acquires(call_function.lock)
68008 {
68009 raw_spin_lock(&call_function.lock);
68010 }
68011
68012 -void ipi_call_unlock(void)
68013 +void ipi_call_unlock(void) __releases(call_function.lock)
68014 {
68015 raw_spin_unlock(&call_function.lock);
68016 }
68017
68018 -void ipi_call_lock_irq(void)
68019 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68020 {
68021 raw_spin_lock_irq(&call_function.lock);
68022 }
68023
68024 -void ipi_call_unlock_irq(void)
68025 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68026 {
68027 raw_spin_unlock_irq(&call_function.lock);
68028 }
68029 diff --git a/kernel/softirq.c b/kernel/softirq.c
68030 index 671f959..91c51cb 100644
68031 --- a/kernel/softirq.c
68032 +++ b/kernel/softirq.c
68033 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68034
68035 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68036
68037 -char *softirq_to_name[NR_SOFTIRQS] = {
68038 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68039 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68040 "TASKLET", "SCHED", "HRTIMER", "RCU"
68041 };
68042 @@ -235,7 +235,7 @@ restart:
68043 kstat_incr_softirqs_this_cpu(vec_nr);
68044
68045 trace_softirq_entry(vec_nr);
68046 - h->action(h);
68047 + h->action();
68048 trace_softirq_exit(vec_nr);
68049 if (unlikely(prev_count != preempt_count())) {
68050 printk(KERN_ERR "huh, entered softirq %u %s %p"
68051 @@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
68052 or_softirq_pending(1UL << nr);
68053 }
68054
68055 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68056 +void open_softirq(int nr, void (*action)(void))
68057 {
68058 - softirq_vec[nr].action = action;
68059 + pax_open_kernel();
68060 + *(void **)&softirq_vec[nr].action = action;
68061 + pax_close_kernel();
68062 }
68063
68064 /*
68065 @@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68066
68067 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68068
68069 -static void tasklet_action(struct softirq_action *a)
68070 +static void tasklet_action(void)
68071 {
68072 struct tasklet_struct *list;
68073
68074 @@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
68075 }
68076 }
68077
68078 -static void tasklet_hi_action(struct softirq_action *a)
68079 +static void tasklet_hi_action(void)
68080 {
68081 struct tasklet_struct *list;
68082
68083 diff --git a/kernel/sys.c b/kernel/sys.c
68084 index e7006eb..8fb7c51 100644
68085 --- a/kernel/sys.c
68086 +++ b/kernel/sys.c
68087 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68088 error = -EACCES;
68089 goto out;
68090 }
68091 +
68092 + if (gr_handle_chroot_setpriority(p, niceval)) {
68093 + error = -EACCES;
68094 + goto out;
68095 + }
68096 +
68097 no_nice = security_task_setnice(p, niceval);
68098 if (no_nice) {
68099 error = no_nice;
68100 @@ -581,6 +587,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68101 goto error;
68102 }
68103
68104 + if (gr_check_group_change(new->gid, new->egid, -1))
68105 + goto error;
68106 +
68107 if (rgid != (gid_t) -1 ||
68108 (egid != (gid_t) -1 && egid != old->gid))
68109 new->sgid = new->egid;
68110 @@ -610,6 +619,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68111 old = current_cred();
68112
68113 retval = -EPERM;
68114 +
68115 + if (gr_check_group_change(gid, gid, gid))
68116 + goto error;
68117 +
68118 if (nsown_capable(CAP_SETGID))
68119 new->gid = new->egid = new->sgid = new->fsgid = gid;
68120 else if (gid == old->gid || gid == old->sgid)
68121 @@ -627,7 +640,7 @@ error:
68122 /*
68123 * change the user struct in a credentials set to match the new UID
68124 */
68125 -static int set_user(struct cred *new)
68126 +int set_user(struct cred *new)
68127 {
68128 struct user_struct *new_user;
68129
68130 @@ -697,6 +710,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68131 goto error;
68132 }
68133
68134 + if (gr_check_user_change(new->uid, new->euid, -1))
68135 + goto error;
68136 +
68137 if (new->uid != old->uid) {
68138 retval = set_user(new);
68139 if (retval < 0)
68140 @@ -741,6 +757,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68141 old = current_cred();
68142
68143 retval = -EPERM;
68144 +
68145 + if (gr_check_crash_uid(uid))
68146 + goto error;
68147 + if (gr_check_user_change(uid, uid, uid))
68148 + goto error;
68149 +
68150 if (nsown_capable(CAP_SETUID)) {
68151 new->suid = new->uid = uid;
68152 if (uid != old->uid) {
68153 @@ -795,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68154 goto error;
68155 }
68156
68157 + if (gr_check_user_change(ruid, euid, -1))
68158 + goto error;
68159 +
68160 if (ruid != (uid_t) -1) {
68161 new->uid = ruid;
68162 if (ruid != old->uid) {
68163 @@ -859,6 +884,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68164 goto error;
68165 }
68166
68167 + if (gr_check_group_change(rgid, egid, -1))
68168 + goto error;
68169 +
68170 if (rgid != (gid_t) -1)
68171 new->gid = rgid;
68172 if (egid != (gid_t) -1)
68173 @@ -905,6 +933,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68174 old = current_cred();
68175 old_fsuid = old->fsuid;
68176
68177 + if (gr_check_user_change(-1, -1, uid))
68178 + goto error;
68179 +
68180 if (uid == old->uid || uid == old->euid ||
68181 uid == old->suid || uid == old->fsuid ||
68182 nsown_capable(CAP_SETUID)) {
68183 @@ -915,6 +946,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68184 }
68185 }
68186
68187 +error:
68188 abort_creds(new);
68189 return old_fsuid;
68190
68191 @@ -941,12 +973,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68192 if (gid == old->gid || gid == old->egid ||
68193 gid == old->sgid || gid == old->fsgid ||
68194 nsown_capable(CAP_SETGID)) {
68195 + if (gr_check_group_change(-1, -1, gid))
68196 + goto error;
68197 +
68198 if (gid != old_fsgid) {
68199 new->fsgid = gid;
68200 goto change_okay;
68201 }
68202 }
68203
68204 +error:
68205 abort_creds(new);
68206 return old_fsgid;
68207
68208 @@ -1198,7 +1234,10 @@ static int override_release(char __user *release, int len)
68209 }
68210 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68211 snprintf(buf, len, "2.6.%u%s", v, rest);
68212 - ret = copy_to_user(release, buf, len);
68213 + if (len > sizeof(buf))
68214 + ret = -EFAULT;
68215 + else
68216 + ret = copy_to_user(release, buf, len);
68217 }
68218 return ret;
68219 }
68220 @@ -1252,19 +1291,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68221 return -EFAULT;
68222
68223 down_read(&uts_sem);
68224 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
68225 + error = __copy_to_user(name->sysname, &utsname()->sysname,
68226 __OLD_UTS_LEN);
68227 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68228 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68229 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
68230 __OLD_UTS_LEN);
68231 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68232 - error |= __copy_to_user(&name->release, &utsname()->release,
68233 + error |= __copy_to_user(name->release, &utsname()->release,
68234 __OLD_UTS_LEN);
68235 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68236 - error |= __copy_to_user(&name->version, &utsname()->version,
68237 + error |= __copy_to_user(name->version, &utsname()->version,
68238 __OLD_UTS_LEN);
68239 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68240 - error |= __copy_to_user(&name->machine, &utsname()->machine,
68241 + error |= __copy_to_user(name->machine, &utsname()->machine,
68242 __OLD_UTS_LEN);
68243 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68244 up_read(&uts_sem);
68245 @@ -1847,7 +1886,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68246 error = get_dumpable(me->mm);
68247 break;
68248 case PR_SET_DUMPABLE:
68249 - if (arg2 < 0 || arg2 > 1) {
68250 + if (arg2 > 1) {
68251 error = -EINVAL;
68252 break;
68253 }
68254 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68255 index 4ab1187..0b75ced 100644
68256 --- a/kernel/sysctl.c
68257 +++ b/kernel/sysctl.c
68258 @@ -91,7 +91,6 @@
68259
68260
68261 #if defined(CONFIG_SYSCTL)
68262 -
68263 /* External variables not in a header file. */
68264 extern int sysctl_overcommit_memory;
68265 extern int sysctl_overcommit_ratio;
68266 @@ -169,10 +168,8 @@ static int proc_taint(struct ctl_table *table, int write,
68267 void __user *buffer, size_t *lenp, loff_t *ppos);
68268 #endif
68269
68270 -#ifdef CONFIG_PRINTK
68271 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68272 void __user *buffer, size_t *lenp, loff_t *ppos);
68273 -#endif
68274
68275 #ifdef CONFIG_MAGIC_SYSRQ
68276 /* Note: sysrq code uses it's own private copy */
68277 @@ -196,6 +193,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68278
68279 #endif
68280
68281 +extern struct ctl_table grsecurity_table[];
68282 +
68283 static struct ctl_table kern_table[];
68284 static struct ctl_table vm_table[];
68285 static struct ctl_table fs_table[];
68286 @@ -210,6 +209,20 @@ extern struct ctl_table epoll_table[];
68287 int sysctl_legacy_va_layout;
68288 #endif
68289
68290 +#ifdef CONFIG_PAX_SOFTMODE
68291 +static ctl_table pax_table[] = {
68292 + {
68293 + .procname = "softmode",
68294 + .data = &pax_softmode,
68295 + .maxlen = sizeof(unsigned int),
68296 + .mode = 0600,
68297 + .proc_handler = &proc_dointvec,
68298 + },
68299 +
68300 + { }
68301 +};
68302 +#endif
68303 +
68304 /* The default sysctl tables: */
68305
68306 static struct ctl_table sysctl_base_table[] = {
68307 @@ -256,6 +269,22 @@ static int max_extfrag_threshold = 1000;
68308 #endif
68309
68310 static struct ctl_table kern_table[] = {
68311 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68312 + {
68313 + .procname = "grsecurity",
68314 + .mode = 0500,
68315 + .child = grsecurity_table,
68316 + },
68317 +#endif
68318 +
68319 +#ifdef CONFIG_PAX_SOFTMODE
68320 + {
68321 + .procname = "pax",
68322 + .mode = 0500,
68323 + .child = pax_table,
68324 + },
68325 +#endif
68326 +
68327 {
68328 .procname = "sched_child_runs_first",
68329 .data = &sysctl_sched_child_runs_first,
68330 @@ -540,7 +569,7 @@ static struct ctl_table kern_table[] = {
68331 .data = &modprobe_path,
68332 .maxlen = KMOD_PATH_LEN,
68333 .mode = 0644,
68334 - .proc_handler = proc_dostring,
68335 + .proc_handler = proc_dostring_modpriv,
68336 },
68337 {
68338 .procname = "modules_disabled",
68339 @@ -707,16 +736,20 @@ static struct ctl_table kern_table[] = {
68340 .extra1 = &zero,
68341 .extra2 = &one,
68342 },
68343 +#endif
68344 {
68345 .procname = "kptr_restrict",
68346 .data = &kptr_restrict,
68347 .maxlen = sizeof(int),
68348 .mode = 0644,
68349 .proc_handler = proc_dointvec_minmax_sysadmin,
68350 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68351 + .extra1 = &two,
68352 +#else
68353 .extra1 = &zero,
68354 +#endif
68355 .extra2 = &two,
68356 },
68357 -#endif
68358 {
68359 .procname = "ngroups_max",
68360 .data = &ngroups_max,
68361 @@ -1215,6 +1248,13 @@ static struct ctl_table vm_table[] = {
68362 .proc_handler = proc_dointvec_minmax,
68363 .extra1 = &zero,
68364 },
68365 + {
68366 + .procname = "heap_stack_gap",
68367 + .data = &sysctl_heap_stack_gap,
68368 + .maxlen = sizeof(sysctl_heap_stack_gap),
68369 + .mode = 0644,
68370 + .proc_handler = proc_doulongvec_minmax,
68371 + },
68372 #else
68373 {
68374 .procname = "nr_trim_pages",
68375 @@ -1645,6 +1685,16 @@ int proc_dostring(struct ctl_table *table, int write,
68376 buffer, lenp, ppos);
68377 }
68378
68379 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68380 + void __user *buffer, size_t *lenp, loff_t *ppos)
68381 +{
68382 + if (write && !capable(CAP_SYS_MODULE))
68383 + return -EPERM;
68384 +
68385 + return _proc_do_string(table->data, table->maxlen, write,
68386 + buffer, lenp, ppos);
68387 +}
68388 +
68389 static size_t proc_skip_spaces(char **buf)
68390 {
68391 size_t ret;
68392 @@ -1750,6 +1800,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68393 len = strlen(tmp);
68394 if (len > *size)
68395 len = *size;
68396 + if (len > sizeof(tmp))
68397 + len = sizeof(tmp);
68398 if (copy_to_user(*buf, tmp, len))
68399 return -EFAULT;
68400 *size -= len;
68401 @@ -1942,7 +1994,6 @@ static int proc_taint(struct ctl_table *table, int write,
68402 return err;
68403 }
68404
68405 -#ifdef CONFIG_PRINTK
68406 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68407 void __user *buffer, size_t *lenp, loff_t *ppos)
68408 {
68409 @@ -1951,7 +2002,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68410
68411 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
68412 }
68413 -#endif
68414
68415 struct do_proc_dointvec_minmax_conv_param {
68416 int *min;
68417 @@ -2066,8 +2116,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68418 *i = val;
68419 } else {
68420 val = convdiv * (*i) / convmul;
68421 - if (!first)
68422 + if (!first) {
68423 err = proc_put_char(&buffer, &left, '\t');
68424 + if (err)
68425 + break;
68426 + }
68427 err = proc_put_long(&buffer, &left, val, false);
68428 if (err)
68429 break;
68430 @@ -2459,6 +2512,12 @@ int proc_dostring(struct ctl_table *table, int write,
68431 return -ENOSYS;
68432 }
68433
68434 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68435 + void __user *buffer, size_t *lenp, loff_t *ppos)
68436 +{
68437 + return -ENOSYS;
68438 +}
68439 +
68440 int proc_dointvec(struct ctl_table *table, int write,
68441 void __user *buffer, size_t *lenp, loff_t *ppos)
68442 {
68443 @@ -2515,5 +2574,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68444 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68445 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68446 EXPORT_SYMBOL(proc_dostring);
68447 +EXPORT_SYMBOL(proc_dostring_modpriv);
68448 EXPORT_SYMBOL(proc_doulongvec_minmax);
68449 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68450 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68451 index a650694..aaeeb20 100644
68452 --- a/kernel/sysctl_binary.c
68453 +++ b/kernel/sysctl_binary.c
68454 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68455 int i;
68456
68457 set_fs(KERNEL_DS);
68458 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68459 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68460 set_fs(old_fs);
68461 if (result < 0)
68462 goto out_kfree;
68463 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68464 }
68465
68466 set_fs(KERNEL_DS);
68467 - result = vfs_write(file, buffer, str - buffer, &pos);
68468 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68469 set_fs(old_fs);
68470 if (result < 0)
68471 goto out_kfree;
68472 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68473 int i;
68474
68475 set_fs(KERNEL_DS);
68476 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68477 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68478 set_fs(old_fs);
68479 if (result < 0)
68480 goto out_kfree;
68481 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68482 }
68483
68484 set_fs(KERNEL_DS);
68485 - result = vfs_write(file, buffer, str - buffer, &pos);
68486 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68487 set_fs(old_fs);
68488 if (result < 0)
68489 goto out_kfree;
68490 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68491 int i;
68492
68493 set_fs(KERNEL_DS);
68494 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68495 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68496 set_fs(old_fs);
68497 if (result < 0)
68498 goto out;
68499 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68500 __le16 dnaddr;
68501
68502 set_fs(KERNEL_DS);
68503 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68504 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68505 set_fs(old_fs);
68506 if (result < 0)
68507 goto out;
68508 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68509 le16_to_cpu(dnaddr) & 0x3ff);
68510
68511 set_fs(KERNEL_DS);
68512 - result = vfs_write(file, buf, len, &pos);
68513 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68514 set_fs(old_fs);
68515 if (result < 0)
68516 goto out;
68517 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68518 index e660464..c8b9e67 100644
68519 --- a/kernel/taskstats.c
68520 +++ b/kernel/taskstats.c
68521 @@ -27,9 +27,12 @@
68522 #include <linux/cgroup.h>
68523 #include <linux/fs.h>
68524 #include <linux/file.h>
68525 +#include <linux/grsecurity.h>
68526 #include <net/genetlink.h>
68527 #include <linux/atomic.h>
68528
68529 +extern int gr_is_taskstats_denied(int pid);
68530 +
68531 /*
68532 * Maximum length of a cpumask that can be specified in
68533 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68534 @@ -556,6 +559,9 @@ err:
68535
68536 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68537 {
68538 + if (gr_is_taskstats_denied(current->pid))
68539 + return -EACCES;
68540 +
68541 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68542 return cmd_attr_register_cpumask(info);
68543 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68544 diff --git a/kernel/time.c b/kernel/time.c
68545 index ba744cf..267b7c5 100644
68546 --- a/kernel/time.c
68547 +++ b/kernel/time.c
68548 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68549 return error;
68550
68551 if (tz) {
68552 + /* we log in do_settimeofday called below, so don't log twice
68553 + */
68554 + if (!tv)
68555 + gr_log_timechange();
68556 +
68557 sys_tz = *tz;
68558 update_vsyscall_tz();
68559 if (firsttime) {
68560 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68561 index 8a538c5..def79d4 100644
68562 --- a/kernel/time/alarmtimer.c
68563 +++ b/kernel/time/alarmtimer.c
68564 @@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
68565 struct platform_device *pdev;
68566 int error = 0;
68567 int i;
68568 - struct k_clock alarm_clock = {
68569 + static struct k_clock alarm_clock = {
68570 .clock_getres = alarm_clock_getres,
68571 .clock_get = alarm_clock_get,
68572 .timer_create = alarm_timer_create,
68573 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68574 index f113755..ec24223 100644
68575 --- a/kernel/time/tick-broadcast.c
68576 +++ b/kernel/time/tick-broadcast.c
68577 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68578 * then clear the broadcast bit.
68579 */
68580 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68581 - int cpu = smp_processor_id();
68582 + cpu = smp_processor_id();
68583
68584 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68585 tick_broadcast_clear_oneshot(cpu);
68586 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68587 index d66b213..6947686 100644
68588 --- a/kernel/time/timekeeping.c
68589 +++ b/kernel/time/timekeeping.c
68590 @@ -14,6 +14,7 @@
68591 #include <linux/init.h>
68592 #include <linux/mm.h>
68593 #include <linux/sched.h>
68594 +#include <linux/grsecurity.h>
68595 #include <linux/syscore_ops.h>
68596 #include <linux/clocksource.h>
68597 #include <linux/jiffies.h>
68598 @@ -373,6 +374,8 @@ int do_settimeofday(const struct timespec *tv)
68599 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68600 return -EINVAL;
68601
68602 + gr_log_timechange();
68603 +
68604 write_seqlock_irqsave(&timekeeper.lock, flags);
68605
68606 timekeeping_forward_now();
68607 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68608 index 3258455..f35227d 100644
68609 --- a/kernel/time/timer_list.c
68610 +++ b/kernel/time/timer_list.c
68611 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68612
68613 static void print_name_offset(struct seq_file *m, void *sym)
68614 {
68615 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68616 + SEQ_printf(m, "<%p>", NULL);
68617 +#else
68618 char symname[KSYM_NAME_LEN];
68619
68620 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68621 SEQ_printf(m, "<%pK>", sym);
68622 else
68623 SEQ_printf(m, "%s", symname);
68624 +#endif
68625 }
68626
68627 static void
68628 @@ -112,7 +116,11 @@ next_one:
68629 static void
68630 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68631 {
68632 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68633 + SEQ_printf(m, " .base: %p\n", NULL);
68634 +#else
68635 SEQ_printf(m, " .base: %pK\n", base);
68636 +#endif
68637 SEQ_printf(m, " .index: %d\n",
68638 base->index);
68639 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68640 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68641 {
68642 struct proc_dir_entry *pe;
68643
68644 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68645 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68646 +#else
68647 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68648 +#endif
68649 if (!pe)
68650 return -ENOMEM;
68651 return 0;
68652 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68653 index 0b537f2..9e71eca 100644
68654 --- a/kernel/time/timer_stats.c
68655 +++ b/kernel/time/timer_stats.c
68656 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68657 static unsigned long nr_entries;
68658 static struct entry entries[MAX_ENTRIES];
68659
68660 -static atomic_t overflow_count;
68661 +static atomic_unchecked_t overflow_count;
68662
68663 /*
68664 * The entries are in a hash-table, for fast lookup:
68665 @@ -140,7 +140,7 @@ static void reset_entries(void)
68666 nr_entries = 0;
68667 memset(entries, 0, sizeof(entries));
68668 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68669 - atomic_set(&overflow_count, 0);
68670 + atomic_set_unchecked(&overflow_count, 0);
68671 }
68672
68673 static struct entry *alloc_entry(void)
68674 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68675 if (likely(entry))
68676 entry->count++;
68677 else
68678 - atomic_inc(&overflow_count);
68679 + atomic_inc_unchecked(&overflow_count);
68680
68681 out_unlock:
68682 raw_spin_unlock_irqrestore(lock, flags);
68683 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68684
68685 static void print_name_offset(struct seq_file *m, unsigned long addr)
68686 {
68687 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68688 + seq_printf(m, "<%p>", NULL);
68689 +#else
68690 char symname[KSYM_NAME_LEN];
68691
68692 if (lookup_symbol_name(addr, symname) < 0)
68693 seq_printf(m, "<%p>", (void *)addr);
68694 else
68695 seq_printf(m, "%s", symname);
68696 +#endif
68697 }
68698
68699 static int tstats_show(struct seq_file *m, void *v)
68700 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68701
68702 seq_puts(m, "Timer Stats Version: v0.2\n");
68703 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68704 - if (atomic_read(&overflow_count))
68705 + if (atomic_read_unchecked(&overflow_count))
68706 seq_printf(m, "Overflow: %d entries\n",
68707 - atomic_read(&overflow_count));
68708 + atomic_read_unchecked(&overflow_count));
68709
68710 for (i = 0; i < nr_entries; i++) {
68711 entry = entries + i;
68712 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68713 {
68714 struct proc_dir_entry *pe;
68715
68716 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68717 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68718 +#else
68719 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68720 +#endif
68721 if (!pe)
68722 return -ENOMEM;
68723 return 0;
68724 diff --git a/kernel/timer.c b/kernel/timer.c
68725 index a297ffc..5e16b0b 100644
68726 --- a/kernel/timer.c
68727 +++ b/kernel/timer.c
68728 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
68729 /*
68730 * This function runs timers and the timer-tq in bottom half context.
68731 */
68732 -static void run_timer_softirq(struct softirq_action *h)
68733 +static void run_timer_softirq(void)
68734 {
68735 struct tvec_base *base = __this_cpu_read(tvec_bases);
68736
68737 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68738 index c0bd030..62a1927 100644
68739 --- a/kernel/trace/blktrace.c
68740 +++ b/kernel/trace/blktrace.c
68741 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68742 struct blk_trace *bt = filp->private_data;
68743 char buf[16];
68744
68745 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68746 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68747
68748 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68749 }
68750 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68751 return 1;
68752
68753 bt = buf->chan->private_data;
68754 - atomic_inc(&bt->dropped);
68755 + atomic_inc_unchecked(&bt->dropped);
68756 return 0;
68757 }
68758
68759 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68760
68761 bt->dir = dir;
68762 bt->dev = dev;
68763 - atomic_set(&bt->dropped, 0);
68764 + atomic_set_unchecked(&bt->dropped, 0);
68765
68766 ret = -EIO;
68767 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68768 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68769 index 0fa92f6..89950b2 100644
68770 --- a/kernel/trace/ftrace.c
68771 +++ b/kernel/trace/ftrace.c
68772 @@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68773 if (unlikely(ftrace_disabled))
68774 return 0;
68775
68776 + ret = ftrace_arch_code_modify_prepare();
68777 + FTRACE_WARN_ON(ret);
68778 + if (ret)
68779 + return 0;
68780 +
68781 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68782 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68783 if (ret) {
68784 ftrace_bug(ret, ip);
68785 - return 0;
68786 }
68787 - return 1;
68788 + return ret ? 0 : 1;
68789 }
68790
68791 /*
68792 @@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68793
68794 int
68795 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68796 - void *data)
68797 + void *data)
68798 {
68799 struct ftrace_func_probe *entry;
68800 struct ftrace_page *pg;
68801 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68802 index 2a22255..cdcdd06 100644
68803 --- a/kernel/trace/trace.c
68804 +++ b/kernel/trace/trace.c
68805 @@ -4312,10 +4312,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68806 };
68807 #endif
68808
68809 -static struct dentry *d_tracer;
68810 -
68811 struct dentry *tracing_init_dentry(void)
68812 {
68813 + static struct dentry *d_tracer;
68814 static int once;
68815
68816 if (d_tracer)
68817 @@ -4335,10 +4334,9 @@ struct dentry *tracing_init_dentry(void)
68818 return d_tracer;
68819 }
68820
68821 -static struct dentry *d_percpu;
68822 -
68823 struct dentry *tracing_dentry_percpu(void)
68824 {
68825 + static struct dentry *d_percpu;
68826 static int once;
68827 struct dentry *d_tracer;
68828
68829 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68830 index 29111da..d190fe2 100644
68831 --- a/kernel/trace/trace_events.c
68832 +++ b/kernel/trace/trace_events.c
68833 @@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
68834 struct ftrace_module_file_ops {
68835 struct list_head list;
68836 struct module *mod;
68837 - struct file_operations id;
68838 - struct file_operations enable;
68839 - struct file_operations format;
68840 - struct file_operations filter;
68841 };
68842
68843 static struct ftrace_module_file_ops *
68844 @@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
68845
68846 file_ops->mod = mod;
68847
68848 - file_ops->id = ftrace_event_id_fops;
68849 - file_ops->id.owner = mod;
68850 -
68851 - file_ops->enable = ftrace_enable_fops;
68852 - file_ops->enable.owner = mod;
68853 -
68854 - file_ops->filter = ftrace_event_filter_fops;
68855 - file_ops->filter.owner = mod;
68856 -
68857 - file_ops->format = ftrace_event_format_fops;
68858 - file_ops->format.owner = mod;
68859 + pax_open_kernel();
68860 + *(void **)&mod->trace_id.owner = mod;
68861 + *(void **)&mod->trace_enable.owner = mod;
68862 + *(void **)&mod->trace_filter.owner = mod;
68863 + *(void **)&mod->trace_format.owner = mod;
68864 + pax_close_kernel();
68865
68866 list_add(&file_ops->list, &ftrace_module_file_list);
68867
68868 @@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
68869
68870 for_each_event(call, start, end) {
68871 __trace_add_event_call(*call, mod,
68872 - &file_ops->id, &file_ops->enable,
68873 - &file_ops->filter, &file_ops->format);
68874 + &mod->trace_id, &mod->trace_enable,
68875 + &mod->trace_filter, &mod->trace_format);
68876 }
68877 }
68878
68879 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
68880 index 580a05e..9b31acb 100644
68881 --- a/kernel/trace/trace_kprobe.c
68882 +++ b/kernel/trace/trace_kprobe.c
68883 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68884 long ret;
68885 int maxlen = get_rloc_len(*(u32 *)dest);
68886 u8 *dst = get_rloc_data(dest);
68887 - u8 *src = addr;
68888 + const u8 __user *src = (const u8 __force_user *)addr;
68889 mm_segment_t old_fs = get_fs();
68890 if (!maxlen)
68891 return;
68892 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68893 pagefault_disable();
68894 do
68895 ret = __copy_from_user_inatomic(dst++, src++, 1);
68896 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
68897 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
68898 dst[-1] = '\0';
68899 pagefault_enable();
68900 set_fs(old_fs);
68901 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68902 ((u8 *)get_rloc_data(dest))[0] = '\0';
68903 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
68904 } else
68905 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
68906 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
68907 get_rloc_offs(*(u32 *)dest));
68908 }
68909 /* Return the length of string -- including null terminal byte */
68910 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
68911 set_fs(KERNEL_DS);
68912 pagefault_disable();
68913 do {
68914 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
68915 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
68916 len++;
68917 } while (c && ret == 0 && len < MAX_STRING_SIZE);
68918 pagefault_enable();
68919 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
68920 index fd3c8aa..5f324a6 100644
68921 --- a/kernel/trace/trace_mmiotrace.c
68922 +++ b/kernel/trace/trace_mmiotrace.c
68923 @@ -24,7 +24,7 @@ struct header_iter {
68924 static struct trace_array *mmio_trace_array;
68925 static bool overrun_detected;
68926 static unsigned long prev_overruns;
68927 -static atomic_t dropped_count;
68928 +static atomic_unchecked_t dropped_count;
68929
68930 static void mmio_reset_data(struct trace_array *tr)
68931 {
68932 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
68933
68934 static unsigned long count_overruns(struct trace_iterator *iter)
68935 {
68936 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
68937 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
68938 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
68939
68940 if (over > prev_overruns)
68941 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
68942 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
68943 sizeof(*entry), 0, pc);
68944 if (!event) {
68945 - atomic_inc(&dropped_count);
68946 + atomic_inc_unchecked(&dropped_count);
68947 return;
68948 }
68949 entry = ring_buffer_event_data(event);
68950 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
68951 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
68952 sizeof(*entry), 0, pc);
68953 if (!event) {
68954 - atomic_inc(&dropped_count);
68955 + atomic_inc_unchecked(&dropped_count);
68956 return;
68957 }
68958 entry = ring_buffer_event_data(event);
68959 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
68960 index df611a0..10d8b32 100644
68961 --- a/kernel/trace/trace_output.c
68962 +++ b/kernel/trace/trace_output.c
68963 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
68964
68965 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
68966 if (!IS_ERR(p)) {
68967 - p = mangle_path(s->buffer + s->len, p, "\n");
68968 + p = mangle_path(s->buffer + s->len, p, "\n\\");
68969 if (p) {
68970 s->len = p - s->buffer;
68971 return 1;
68972 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
68973 index d4545f4..a9010a1 100644
68974 --- a/kernel/trace/trace_stack.c
68975 +++ b/kernel/trace/trace_stack.c
68976 @@ -53,7 +53,7 @@ static inline void check_stack(void)
68977 return;
68978
68979 /* we do not handle interrupt stacks yet */
68980 - if (!object_is_on_stack(&this_size))
68981 + if (!object_starts_on_stack(&this_size))
68982 return;
68983
68984 local_irq_save(flags);
68985 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
68986 index 209b379..7f76423 100644
68987 --- a/kernel/trace/trace_workqueue.c
68988 +++ b/kernel/trace/trace_workqueue.c
68989 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
68990 int cpu;
68991 pid_t pid;
68992 /* Can be inserted from interrupt or user context, need to be atomic */
68993 - atomic_t inserted;
68994 + atomic_unchecked_t inserted;
68995 /*
68996 * Don't need to be atomic, works are serialized in a single workqueue thread
68997 * on a single CPU.
68998 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
68999 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69000 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69001 if (node->pid == wq_thread->pid) {
69002 - atomic_inc(&node->inserted);
69003 + atomic_inc_unchecked(&node->inserted);
69004 goto found;
69005 }
69006 }
69007 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69008 tsk = get_pid_task(pid, PIDTYPE_PID);
69009 if (tsk) {
69010 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69011 - atomic_read(&cws->inserted), cws->executed,
69012 + atomic_read_unchecked(&cws->inserted), cws->executed,
69013 tsk->comm);
69014 put_task_struct(tsk);
69015 }
69016 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69017 index 6777153..8519f60 100644
69018 --- a/lib/Kconfig.debug
69019 +++ b/lib/Kconfig.debug
69020 @@ -1132,6 +1132,7 @@ config LATENCYTOP
69021 depends on DEBUG_KERNEL
69022 depends on STACKTRACE_SUPPORT
69023 depends on PROC_FS
69024 + depends on !GRKERNSEC_HIDESYM
69025 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69026 select KALLSYMS
69027 select KALLSYMS_ALL
69028 diff --git a/lib/bitmap.c b/lib/bitmap.c
69029 index b5a8b6a..a69623c 100644
69030 --- a/lib/bitmap.c
69031 +++ b/lib/bitmap.c
69032 @@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69033 {
69034 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69035 u32 chunk;
69036 - const char __user __force *ubuf = (const char __user __force *)buf;
69037 + const char __user *ubuf = (const char __force_user *)buf;
69038
69039 bitmap_zero(maskp, nmaskbits);
69040
69041 @@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user *ubuf,
69042 {
69043 if (!access_ok(VERIFY_READ, ubuf, ulen))
69044 return -EFAULT;
69045 - return __bitmap_parse((const char __force *)ubuf,
69046 + return __bitmap_parse((const char __force_kernel *)ubuf,
69047 ulen, 1, maskp, nmaskbits);
69048
69049 }
69050 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69051 {
69052 unsigned a, b;
69053 int c, old_c, totaldigits;
69054 - const char __user __force *ubuf = (const char __user __force *)buf;
69055 + const char __user *ubuf = (const char __force_user *)buf;
69056 int exp_digit, in_range;
69057
69058 totaldigits = c = 0;
69059 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69060 {
69061 if (!access_ok(VERIFY_READ, ubuf, ulen))
69062 return -EFAULT;
69063 - return __bitmap_parselist((const char __force *)ubuf,
69064 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69065 ulen, 1, maskp, nmaskbits);
69066 }
69067 EXPORT_SYMBOL(bitmap_parselist_user);
69068 diff --git a/lib/bug.c b/lib/bug.c
69069 index a28c141..2bd3d95 100644
69070 --- a/lib/bug.c
69071 +++ b/lib/bug.c
69072 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69073 return BUG_TRAP_TYPE_NONE;
69074
69075 bug = find_bug(bugaddr);
69076 + if (!bug)
69077 + return BUG_TRAP_TYPE_NONE;
69078
69079 file = NULL;
69080 line = 0;
69081 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69082 index 0ab9ae8..f01ceca 100644
69083 --- a/lib/debugobjects.c
69084 +++ b/lib/debugobjects.c
69085 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69086 if (limit > 4)
69087 return;
69088
69089 - is_on_stack = object_is_on_stack(addr);
69090 + is_on_stack = object_starts_on_stack(addr);
69091 if (is_on_stack == onstack)
69092 return;
69093
69094 diff --git a/lib/devres.c b/lib/devres.c
69095 index 80b9c76..9e32279 100644
69096 --- a/lib/devres.c
69097 +++ b/lib/devres.c
69098 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69099 void devm_iounmap(struct device *dev, void __iomem *addr)
69100 {
69101 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69102 - (void *)addr));
69103 + (void __force *)addr));
69104 iounmap(addr);
69105 }
69106 EXPORT_SYMBOL(devm_iounmap);
69107 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69108 {
69109 ioport_unmap(addr);
69110 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69111 - devm_ioport_map_match, (void *)addr));
69112 + devm_ioport_map_match, (void __force *)addr));
69113 }
69114 EXPORT_SYMBOL(devm_ioport_unmap);
69115
69116 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69117 index 13ef233..5241683 100644
69118 --- a/lib/dma-debug.c
69119 +++ b/lib/dma-debug.c
69120 @@ -924,7 +924,7 @@ out:
69121
69122 static void check_for_stack(struct device *dev, void *addr)
69123 {
69124 - if (object_is_on_stack(addr))
69125 + if (object_starts_on_stack(addr))
69126 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69127 "stack [addr=%p]\n", addr);
69128 }
69129 diff --git a/lib/extable.c b/lib/extable.c
69130 index 4cac81e..63e9b8f 100644
69131 --- a/lib/extable.c
69132 +++ b/lib/extable.c
69133 @@ -13,6 +13,7 @@
69134 #include <linux/init.h>
69135 #include <linux/sort.h>
69136 #include <asm/uaccess.h>
69137 +#include <asm/pgtable.h>
69138
69139 #ifndef ARCH_HAS_SORT_EXTABLE
69140 /*
69141 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69142 void sort_extable(struct exception_table_entry *start,
69143 struct exception_table_entry *finish)
69144 {
69145 + pax_open_kernel();
69146 sort(start, finish - start, sizeof(struct exception_table_entry),
69147 cmp_ex, NULL);
69148 + pax_close_kernel();
69149 }
69150
69151 #ifdef CONFIG_MODULES
69152 diff --git a/lib/inflate.c b/lib/inflate.c
69153 index 013a761..c28f3fc 100644
69154 --- a/lib/inflate.c
69155 +++ b/lib/inflate.c
69156 @@ -269,7 +269,7 @@ static void free(void *where)
69157 malloc_ptr = free_mem_ptr;
69158 }
69159 #else
69160 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69161 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69162 #define free(a) kfree(a)
69163 #endif
69164
69165 diff --git a/lib/ioremap.c b/lib/ioremap.c
69166 index 0c9216c..863bd89 100644
69167 --- a/lib/ioremap.c
69168 +++ b/lib/ioremap.c
69169 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
69170 unsigned long next;
69171
69172 phys_addr -= addr;
69173 - pmd = pmd_alloc(&init_mm, pud, addr);
69174 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
69175 if (!pmd)
69176 return -ENOMEM;
69177 do {
69178 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
69179 unsigned long next;
69180
69181 phys_addr -= addr;
69182 - pud = pud_alloc(&init_mm, pgd, addr);
69183 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
69184 if (!pud)
69185 return -ENOMEM;
69186 do {
69187 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69188 index bd2bea9..6b3c95e 100644
69189 --- a/lib/is_single_threaded.c
69190 +++ b/lib/is_single_threaded.c
69191 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69192 struct task_struct *p, *t;
69193 bool ret;
69194
69195 + if (!mm)
69196 + return true;
69197 +
69198 if (atomic_read(&task->signal->live) != 1)
69199 return false;
69200
69201 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69202 index 3ac50dc..240bb7e 100644
69203 --- a/lib/radix-tree.c
69204 +++ b/lib/radix-tree.c
69205 @@ -79,7 +79,7 @@ struct radix_tree_preload {
69206 int nr;
69207 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69208 };
69209 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69210 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69211
69212 static inline void *ptr_to_indirect(void *ptr)
69213 {
69214 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69215 index abbabec..362988d 100644
69216 --- a/lib/vsprintf.c
69217 +++ b/lib/vsprintf.c
69218 @@ -16,6 +16,9 @@
69219 * - scnprintf and vscnprintf
69220 */
69221
69222 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69223 +#define __INCLUDED_BY_HIDESYM 1
69224 +#endif
69225 #include <stdarg.h>
69226 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
69227 #include <linux/types.h>
69228 @@ -433,7 +436,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69229 char sym[KSYM_SYMBOL_LEN];
69230 if (ext == 'B')
69231 sprint_backtrace(sym, value);
69232 - else if (ext != 'f' && ext != 's')
69233 + else if (ext != 'f' && ext != 's' && ext != 'a')
69234 sprint_symbol(sym, value);
69235 else
69236 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69237 @@ -809,7 +812,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
69238 return number(buf, end, *(const netdev_features_t *)addr, spec);
69239 }
69240
69241 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69242 +int kptr_restrict __read_mostly = 2;
69243 +#else
69244 int kptr_restrict __read_mostly;
69245 +#endif
69246
69247 /*
69248 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69249 @@ -823,6 +830,8 @@ int kptr_restrict __read_mostly;
69250 * - 'S' For symbolic direct pointers with offset
69251 * - 's' For symbolic direct pointers without offset
69252 * - 'B' For backtraced symbolic direct pointers with offset
69253 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69254 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69255 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69256 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69257 * - 'M' For a 6-byte MAC address, it prints the address in the
69258 @@ -868,12 +877,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69259 {
69260 if (!ptr && *fmt != 'K') {
69261 /*
69262 - * Print (null) with the same width as a pointer so it makes
69263 + * Print (nil) with the same width as a pointer so it makes
69264 * tabular output look nice.
69265 */
69266 if (spec.field_width == -1)
69267 spec.field_width = 2 * sizeof(void *);
69268 - return string(buf, end, "(null)", spec);
69269 + return string(buf, end, "(nil)", spec);
69270 }
69271
69272 switch (*fmt) {
69273 @@ -883,6 +892,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69274 /* Fallthrough */
69275 case 'S':
69276 case 's':
69277 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69278 + break;
69279 +#else
69280 + return symbol_string(buf, end, ptr, spec, *fmt);
69281 +#endif
69282 + case 'A':
69283 + case 'a':
69284 case 'B':
69285 return symbol_string(buf, end, ptr, spec, *fmt);
69286 case 'R':
69287 @@ -1653,11 +1669,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69288 typeof(type) value; \
69289 if (sizeof(type) == 8) { \
69290 args = PTR_ALIGN(args, sizeof(u32)); \
69291 - *(u32 *)&value = *(u32 *)args; \
69292 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69293 + *(u32 *)&value = *(const u32 *)args; \
69294 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69295 } else { \
69296 args = PTR_ALIGN(args, sizeof(type)); \
69297 - value = *(typeof(type) *)args; \
69298 + value = *(const typeof(type) *)args; \
69299 } \
69300 args += sizeof(type); \
69301 value; \
69302 @@ -1720,7 +1736,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69303 case FORMAT_TYPE_STR: {
69304 const char *str_arg = args;
69305 args += strlen(str_arg) + 1;
69306 - str = string(str, end, (char *)str_arg, spec);
69307 + str = string(str, end, str_arg, spec);
69308 break;
69309 }
69310
69311 diff --git a/localversion-grsec b/localversion-grsec
69312 new file mode 100644
69313 index 0000000..7cd6065
69314 --- /dev/null
69315 +++ b/localversion-grsec
69316 @@ -0,0 +1 @@
69317 +-grsec
69318 diff --git a/mm/Kconfig b/mm/Kconfig
69319 index e338407..4210331 100644
69320 --- a/mm/Kconfig
69321 +++ b/mm/Kconfig
69322 @@ -247,10 +247,10 @@ config KSM
69323 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69324
69325 config DEFAULT_MMAP_MIN_ADDR
69326 - int "Low address space to protect from user allocation"
69327 + int "Low address space to protect from user allocation"
69328 depends on MMU
69329 - default 4096
69330 - help
69331 + default 65536
69332 + help
69333 This is the portion of low virtual memory which should be protected
69334 from userspace allocation. Keeping a user from writing to low pages
69335 can help reduce the impact of kernel NULL pointer bugs.
69336 @@ -280,7 +280,7 @@ config MEMORY_FAILURE
69337
69338 config HWPOISON_INJECT
69339 tristate "HWPoison pages injector"
69340 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
69341 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
69342 select PROC_PAGE_MONITOR
69343
69344 config NOMMU_INITIAL_TRIM_EXCESS
69345 diff --git a/mm/filemap.c b/mm/filemap.c
69346 index 79c4b2b..596b417 100644
69347 --- a/mm/filemap.c
69348 +++ b/mm/filemap.c
69349 @@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69350 struct address_space *mapping = file->f_mapping;
69351
69352 if (!mapping->a_ops->readpage)
69353 - return -ENOEXEC;
69354 + return -ENODEV;
69355 file_accessed(file);
69356 vma->vm_ops = &generic_file_vm_ops;
69357 vma->vm_flags |= VM_CAN_NONLINEAR;
69358 @@ -2168,6 +2168,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69359 *pos = i_size_read(inode);
69360
69361 if (limit != RLIM_INFINITY) {
69362 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69363 if (*pos >= limit) {
69364 send_sig(SIGXFSZ, current, 0);
69365 return -EFBIG;
69366 diff --git a/mm/fremap.c b/mm/fremap.c
69367 index 9ed4fd4..c42648d 100644
69368 --- a/mm/fremap.c
69369 +++ b/mm/fremap.c
69370 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69371 retry:
69372 vma = find_vma(mm, start);
69373
69374 +#ifdef CONFIG_PAX_SEGMEXEC
69375 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69376 + goto out;
69377 +#endif
69378 +
69379 /*
69380 * Make sure the vma is shared, that it supports prefaulting,
69381 * and that the remapped range is valid and fully within
69382 diff --git a/mm/highmem.c b/mm/highmem.c
69383 index 57d82c6..e9e0552 100644
69384 --- a/mm/highmem.c
69385 +++ b/mm/highmem.c
69386 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69387 * So no dangers, even with speculative execution.
69388 */
69389 page = pte_page(pkmap_page_table[i]);
69390 + pax_open_kernel();
69391 pte_clear(&init_mm, (unsigned long)page_address(page),
69392 &pkmap_page_table[i]);
69393 -
69394 + pax_close_kernel();
69395 set_page_address(page, NULL);
69396 need_flush = 1;
69397 }
69398 @@ -186,9 +187,11 @@ start:
69399 }
69400 }
69401 vaddr = PKMAP_ADDR(last_pkmap_nr);
69402 +
69403 + pax_open_kernel();
69404 set_pte_at(&init_mm, vaddr,
69405 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69406 -
69407 + pax_close_kernel();
69408 pkmap_count[last_pkmap_nr] = 1;
69409 set_page_address(page, (void *)vaddr);
69410
69411 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69412 index f0e5306..cb9398e 100644
69413 --- a/mm/huge_memory.c
69414 +++ b/mm/huge_memory.c
69415 @@ -733,7 +733,7 @@ out:
69416 * run pte_offset_map on the pmd, if an huge pmd could
69417 * materialize from under us from a different thread.
69418 */
69419 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69420 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69421 return VM_FAULT_OOM;
69422 /* if an huge pmd materialized from under us just retry later */
69423 if (unlikely(pmd_trans_huge(*pmd)))
69424 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69425 index 263e177..3f36aec 100644
69426 --- a/mm/hugetlb.c
69427 +++ b/mm/hugetlb.c
69428 @@ -2446,6 +2446,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69429 return 1;
69430 }
69431
69432 +#ifdef CONFIG_PAX_SEGMEXEC
69433 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69434 +{
69435 + struct mm_struct *mm = vma->vm_mm;
69436 + struct vm_area_struct *vma_m;
69437 + unsigned long address_m;
69438 + pte_t *ptep_m;
69439 +
69440 + vma_m = pax_find_mirror_vma(vma);
69441 + if (!vma_m)
69442 + return;
69443 +
69444 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69445 + address_m = address + SEGMEXEC_TASK_SIZE;
69446 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69447 + get_page(page_m);
69448 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
69449 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69450 +}
69451 +#endif
69452 +
69453 /*
69454 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69455 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
69456 @@ -2558,6 +2579,11 @@ retry_avoidcopy:
69457 make_huge_pte(vma, new_page, 1));
69458 page_remove_rmap(old_page);
69459 hugepage_add_new_anon_rmap(new_page, vma, address);
69460 +
69461 +#ifdef CONFIG_PAX_SEGMEXEC
69462 + pax_mirror_huge_pte(vma, address, new_page);
69463 +#endif
69464 +
69465 /* Make the old page be freed below */
69466 new_page = old_page;
69467 mmu_notifier_invalidate_range_end(mm,
69468 @@ -2712,6 +2738,10 @@ retry:
69469 && (vma->vm_flags & VM_SHARED)));
69470 set_huge_pte_at(mm, address, ptep, new_pte);
69471
69472 +#ifdef CONFIG_PAX_SEGMEXEC
69473 + pax_mirror_huge_pte(vma, address, page);
69474 +#endif
69475 +
69476 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69477 /* Optimization, do the COW without a second fault */
69478 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69479 @@ -2741,6 +2771,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69480 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69481 struct hstate *h = hstate_vma(vma);
69482
69483 +#ifdef CONFIG_PAX_SEGMEXEC
69484 + struct vm_area_struct *vma_m;
69485 +#endif
69486 +
69487 address &= huge_page_mask(h);
69488
69489 ptep = huge_pte_offset(mm, address);
69490 @@ -2754,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69491 VM_FAULT_SET_HINDEX(h - hstates);
69492 }
69493
69494 +#ifdef CONFIG_PAX_SEGMEXEC
69495 + vma_m = pax_find_mirror_vma(vma);
69496 + if (vma_m) {
69497 + unsigned long address_m;
69498 +
69499 + if (vma->vm_start > vma_m->vm_start) {
69500 + address_m = address;
69501 + address -= SEGMEXEC_TASK_SIZE;
69502 + vma = vma_m;
69503 + h = hstate_vma(vma);
69504 + } else
69505 + address_m = address + SEGMEXEC_TASK_SIZE;
69506 +
69507 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69508 + return VM_FAULT_OOM;
69509 + address_m &= HPAGE_MASK;
69510 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69511 + }
69512 +#endif
69513 +
69514 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69515 if (!ptep)
69516 return VM_FAULT_OOM;
69517 diff --git a/mm/internal.h b/mm/internal.h
69518 index 2189af4..f2ca332 100644
69519 --- a/mm/internal.h
69520 +++ b/mm/internal.h
69521 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69522 * in mm/page_alloc.c
69523 */
69524 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69525 +extern void free_compound_page(struct page *page);
69526 extern void prep_compound_page(struct page *page, unsigned long order);
69527 #ifdef CONFIG_MEMORY_FAILURE
69528 extern bool is_free_buddy_page(struct page *page);
69529 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69530 index 45eb621..6ccd8ea 100644
69531 --- a/mm/kmemleak.c
69532 +++ b/mm/kmemleak.c
69533 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
69534
69535 for (i = 0; i < object->trace_len; i++) {
69536 void *ptr = (void *)object->trace[i];
69537 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69538 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69539 }
69540 }
69541
69542 diff --git a/mm/maccess.c b/mm/maccess.c
69543 index d53adf9..03a24bf 100644
69544 --- a/mm/maccess.c
69545 +++ b/mm/maccess.c
69546 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69547 set_fs(KERNEL_DS);
69548 pagefault_disable();
69549 ret = __copy_from_user_inatomic(dst,
69550 - (__force const void __user *)src, size);
69551 + (const void __force_user *)src, size);
69552 pagefault_enable();
69553 set_fs(old_fs);
69554
69555 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69556
69557 set_fs(KERNEL_DS);
69558 pagefault_disable();
69559 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69560 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69561 pagefault_enable();
69562 set_fs(old_fs);
69563
69564 diff --git a/mm/madvise.c b/mm/madvise.c
69565 index 1ccbba5..79e16f9 100644
69566 --- a/mm/madvise.c
69567 +++ b/mm/madvise.c
69568 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69569 pgoff_t pgoff;
69570 unsigned long new_flags = vma->vm_flags;
69571
69572 +#ifdef CONFIG_PAX_SEGMEXEC
69573 + struct vm_area_struct *vma_m;
69574 +#endif
69575 +
69576 switch (behavior) {
69577 case MADV_NORMAL:
69578 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69579 @@ -116,6 +120,13 @@ success:
69580 /*
69581 * vm_flags is protected by the mmap_sem held in write mode.
69582 */
69583 +
69584 +#ifdef CONFIG_PAX_SEGMEXEC
69585 + vma_m = pax_find_mirror_vma(vma);
69586 + if (vma_m)
69587 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69588 +#endif
69589 +
69590 vma->vm_flags = new_flags;
69591
69592 out:
69593 @@ -174,6 +185,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69594 struct vm_area_struct ** prev,
69595 unsigned long start, unsigned long end)
69596 {
69597 +
69598 +#ifdef CONFIG_PAX_SEGMEXEC
69599 + struct vm_area_struct *vma_m;
69600 +#endif
69601 +
69602 *prev = vma;
69603 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69604 return -EINVAL;
69605 @@ -186,6 +202,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69606 zap_page_range(vma, start, end - start, &details);
69607 } else
69608 zap_page_range(vma, start, end - start, NULL);
69609 +
69610 +#ifdef CONFIG_PAX_SEGMEXEC
69611 + vma_m = pax_find_mirror_vma(vma);
69612 + if (vma_m) {
69613 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69614 + struct zap_details details = {
69615 + .nonlinear_vma = vma_m,
69616 + .last_index = ULONG_MAX,
69617 + };
69618 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69619 + } else
69620 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69621 + }
69622 +#endif
69623 +
69624 return 0;
69625 }
69626
69627 @@ -384,6 +415,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69628 if (end < start)
69629 goto out;
69630
69631 +#ifdef CONFIG_PAX_SEGMEXEC
69632 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69633 + if (end > SEGMEXEC_TASK_SIZE)
69634 + goto out;
69635 + } else
69636 +#endif
69637 +
69638 + if (end > TASK_SIZE)
69639 + goto out;
69640 +
69641 error = 0;
69642 if (end == start)
69643 goto out;
69644 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69645 index 97cc273..6ed703f 100644
69646 --- a/mm/memory-failure.c
69647 +++ b/mm/memory-failure.c
69648 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69649
69650 int sysctl_memory_failure_recovery __read_mostly = 1;
69651
69652 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69653 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69654
69655 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69656
69657 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
69658 pfn, t->comm, t->pid);
69659 si.si_signo = SIGBUS;
69660 si.si_errno = 0;
69661 - si.si_addr = (void *)addr;
69662 + si.si_addr = (void __user *)addr;
69663 #ifdef __ARCH_SI_TRAPNO
69664 si.si_trapno = trapno;
69665 #endif
69666 @@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69667 }
69668
69669 nr_pages = 1 << compound_trans_order(hpage);
69670 - atomic_long_add(nr_pages, &mce_bad_pages);
69671 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69672
69673 /*
69674 * We need/can do nothing about count=0 pages.
69675 @@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69676 if (!PageHWPoison(hpage)
69677 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69678 || (p != hpage && TestSetPageHWPoison(hpage))) {
69679 - atomic_long_sub(nr_pages, &mce_bad_pages);
69680 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69681 return 0;
69682 }
69683 set_page_hwpoison_huge_page(hpage);
69684 @@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69685 }
69686 if (hwpoison_filter(p)) {
69687 if (TestClearPageHWPoison(p))
69688 - atomic_long_sub(nr_pages, &mce_bad_pages);
69689 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69690 unlock_page(hpage);
69691 put_page(hpage);
69692 return 0;
69693 @@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
69694 return 0;
69695 }
69696 if (TestClearPageHWPoison(p))
69697 - atomic_long_sub(nr_pages, &mce_bad_pages);
69698 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69699 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69700 return 0;
69701 }
69702 @@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
69703 */
69704 if (TestClearPageHWPoison(page)) {
69705 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69706 - atomic_long_sub(nr_pages, &mce_bad_pages);
69707 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69708 freeit = 1;
69709 if (PageHuge(page))
69710 clear_page_hwpoison_huge_page(page);
69711 @@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69712 }
69713 done:
69714 if (!PageHWPoison(hpage))
69715 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69716 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69717 set_page_hwpoison_huge_page(hpage);
69718 dequeue_hwpoisoned_huge_page(hpage);
69719 /* keep elevated page count for bad page */
69720 @@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page, int flags)
69721 return ret;
69722
69723 done:
69724 - atomic_long_add(1, &mce_bad_pages);
69725 + atomic_long_add_unchecked(1, &mce_bad_pages);
69726 SetPageHWPoison(page);
69727 /* keep elevated page count for bad page */
69728 return ret;
69729 diff --git a/mm/memory.c b/mm/memory.c
69730 index 6105f47..3363489 100644
69731 --- a/mm/memory.c
69732 +++ b/mm/memory.c
69733 @@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69734 return;
69735
69736 pmd = pmd_offset(pud, start);
69737 +
69738 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69739 pud_clear(pud);
69740 pmd_free_tlb(tlb, pmd, start);
69741 +#endif
69742 +
69743 }
69744
69745 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69746 @@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69747 if (end - 1 > ceiling - 1)
69748 return;
69749
69750 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69751 pud = pud_offset(pgd, start);
69752 pgd_clear(pgd);
69753 pud_free_tlb(tlb, pud, start);
69754 +#endif
69755 +
69756 }
69757
69758 /*
69759 @@ -1597,12 +1604,6 @@ no_page_table:
69760 return page;
69761 }
69762
69763 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69764 -{
69765 - return stack_guard_page_start(vma, addr) ||
69766 - stack_guard_page_end(vma, addr+PAGE_SIZE);
69767 -}
69768 -
69769 /**
69770 * __get_user_pages() - pin user pages in memory
69771 * @tsk: task_struct of target task
69772 @@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69773 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69774 i = 0;
69775
69776 - do {
69777 + while (nr_pages) {
69778 struct vm_area_struct *vma;
69779
69780 - vma = find_extend_vma(mm, start);
69781 + vma = find_vma(mm, start);
69782 if (!vma && in_gate_area(mm, start)) {
69783 unsigned long pg = start & PAGE_MASK;
69784 pgd_t *pgd;
69785 @@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69786 goto next_page;
69787 }
69788
69789 - if (!vma ||
69790 + if (!vma || start < vma->vm_start ||
69791 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69792 !(vm_flags & vma->vm_flags))
69793 return i ? : -EFAULT;
69794 @@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69795 int ret;
69796 unsigned int fault_flags = 0;
69797
69798 - /* For mlock, just skip the stack guard page. */
69799 - if (foll_flags & FOLL_MLOCK) {
69800 - if (stack_guard_page(vma, start))
69801 - goto next_page;
69802 - }
69803 if (foll_flags & FOLL_WRITE)
69804 fault_flags |= FAULT_FLAG_WRITE;
69805 if (nonblocking)
69806 @@ -1831,7 +1827,7 @@ next_page:
69807 start += PAGE_SIZE;
69808 nr_pages--;
69809 } while (nr_pages && start < vma->vm_end);
69810 - } while (nr_pages);
69811 + }
69812 return i;
69813 }
69814 EXPORT_SYMBOL(__get_user_pages);
69815 @@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69816 page_add_file_rmap(page);
69817 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69818
69819 +#ifdef CONFIG_PAX_SEGMEXEC
69820 + pax_mirror_file_pte(vma, addr, page, ptl);
69821 +#endif
69822 +
69823 retval = 0;
69824 pte_unmap_unlock(pte, ptl);
69825 return retval;
69826 @@ -2072,10 +2072,22 @@ out:
69827 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69828 struct page *page)
69829 {
69830 +
69831 +#ifdef CONFIG_PAX_SEGMEXEC
69832 + struct vm_area_struct *vma_m;
69833 +#endif
69834 +
69835 if (addr < vma->vm_start || addr >= vma->vm_end)
69836 return -EFAULT;
69837 if (!page_count(page))
69838 return -EINVAL;
69839 +
69840 +#ifdef CONFIG_PAX_SEGMEXEC
69841 + vma_m = pax_find_mirror_vma(vma);
69842 + if (vma_m)
69843 + vma_m->vm_flags |= VM_INSERTPAGE;
69844 +#endif
69845 +
69846 vma->vm_flags |= VM_INSERTPAGE;
69847 return insert_page(vma, addr, page, vma->vm_page_prot);
69848 }
69849 @@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
69850 unsigned long pfn)
69851 {
69852 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
69853 + BUG_ON(vma->vm_mirror);
69854
69855 if (addr < vma->vm_start || addr >= vma->vm_end)
69856 return -EFAULT;
69857 @@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
69858
69859 BUG_ON(pud_huge(*pud));
69860
69861 - pmd = pmd_alloc(mm, pud, addr);
69862 + pmd = (mm == &init_mm) ?
69863 + pmd_alloc_kernel(mm, pud, addr) :
69864 + pmd_alloc(mm, pud, addr);
69865 if (!pmd)
69866 return -ENOMEM;
69867 do {
69868 @@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
69869 unsigned long next;
69870 int err;
69871
69872 - pud = pud_alloc(mm, pgd, addr);
69873 + pud = (mm == &init_mm) ?
69874 + pud_alloc_kernel(mm, pgd, addr) :
69875 + pud_alloc(mm, pgd, addr);
69876 if (!pud)
69877 return -ENOMEM;
69878 do {
69879 @@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
69880 copy_user_highpage(dst, src, va, vma);
69881 }
69882
69883 +#ifdef CONFIG_PAX_SEGMEXEC
69884 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
69885 +{
69886 + struct mm_struct *mm = vma->vm_mm;
69887 + spinlock_t *ptl;
69888 + pte_t *pte, entry;
69889 +
69890 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
69891 + entry = *pte;
69892 + if (!pte_present(entry)) {
69893 + if (!pte_none(entry)) {
69894 + BUG_ON(pte_file(entry));
69895 + free_swap_and_cache(pte_to_swp_entry(entry));
69896 + pte_clear_not_present_full(mm, address, pte, 0);
69897 + }
69898 + } else {
69899 + struct page *page;
69900 +
69901 + flush_cache_page(vma, address, pte_pfn(entry));
69902 + entry = ptep_clear_flush(vma, address, pte);
69903 + BUG_ON(pte_dirty(entry));
69904 + page = vm_normal_page(vma, address, entry);
69905 + if (page) {
69906 + update_hiwater_rss(mm);
69907 + if (PageAnon(page))
69908 + dec_mm_counter_fast(mm, MM_ANONPAGES);
69909 + else
69910 + dec_mm_counter_fast(mm, MM_FILEPAGES);
69911 + page_remove_rmap(page);
69912 + page_cache_release(page);
69913 + }
69914 + }
69915 + pte_unmap_unlock(pte, ptl);
69916 +}
69917 +
69918 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
69919 + *
69920 + * the ptl of the lower mapped page is held on entry and is not released on exit
69921 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
69922 + */
69923 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69924 +{
69925 + struct mm_struct *mm = vma->vm_mm;
69926 + unsigned long address_m;
69927 + spinlock_t *ptl_m;
69928 + struct vm_area_struct *vma_m;
69929 + pmd_t *pmd_m;
69930 + pte_t *pte_m, entry_m;
69931 +
69932 + BUG_ON(!page_m || !PageAnon(page_m));
69933 +
69934 + vma_m = pax_find_mirror_vma(vma);
69935 + if (!vma_m)
69936 + return;
69937 +
69938 + BUG_ON(!PageLocked(page_m));
69939 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69940 + address_m = address + SEGMEXEC_TASK_SIZE;
69941 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69942 + pte_m = pte_offset_map(pmd_m, address_m);
69943 + ptl_m = pte_lockptr(mm, pmd_m);
69944 + if (ptl != ptl_m) {
69945 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69946 + if (!pte_none(*pte_m))
69947 + goto out;
69948 + }
69949 +
69950 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69951 + page_cache_get(page_m);
69952 + page_add_anon_rmap(page_m, vma_m, address_m);
69953 + inc_mm_counter_fast(mm, MM_ANONPAGES);
69954 + set_pte_at(mm, address_m, pte_m, entry_m);
69955 + update_mmu_cache(vma_m, address_m, entry_m);
69956 +out:
69957 + if (ptl != ptl_m)
69958 + spin_unlock(ptl_m);
69959 + pte_unmap(pte_m);
69960 + unlock_page(page_m);
69961 +}
69962 +
69963 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69964 +{
69965 + struct mm_struct *mm = vma->vm_mm;
69966 + unsigned long address_m;
69967 + spinlock_t *ptl_m;
69968 + struct vm_area_struct *vma_m;
69969 + pmd_t *pmd_m;
69970 + pte_t *pte_m, entry_m;
69971 +
69972 + BUG_ON(!page_m || PageAnon(page_m));
69973 +
69974 + vma_m = pax_find_mirror_vma(vma);
69975 + if (!vma_m)
69976 + return;
69977 +
69978 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69979 + address_m = address + SEGMEXEC_TASK_SIZE;
69980 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69981 + pte_m = pte_offset_map(pmd_m, address_m);
69982 + ptl_m = pte_lockptr(mm, pmd_m);
69983 + if (ptl != ptl_m) {
69984 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69985 + if (!pte_none(*pte_m))
69986 + goto out;
69987 + }
69988 +
69989 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69990 + page_cache_get(page_m);
69991 + page_add_file_rmap(page_m);
69992 + inc_mm_counter_fast(mm, MM_FILEPAGES);
69993 + set_pte_at(mm, address_m, pte_m, entry_m);
69994 + update_mmu_cache(vma_m, address_m, entry_m);
69995 +out:
69996 + if (ptl != ptl_m)
69997 + spin_unlock(ptl_m);
69998 + pte_unmap(pte_m);
69999 +}
70000 +
70001 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70002 +{
70003 + struct mm_struct *mm = vma->vm_mm;
70004 + unsigned long address_m;
70005 + spinlock_t *ptl_m;
70006 + struct vm_area_struct *vma_m;
70007 + pmd_t *pmd_m;
70008 + pte_t *pte_m, entry_m;
70009 +
70010 + vma_m = pax_find_mirror_vma(vma);
70011 + if (!vma_m)
70012 + return;
70013 +
70014 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70015 + address_m = address + SEGMEXEC_TASK_SIZE;
70016 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70017 + pte_m = pte_offset_map(pmd_m, address_m);
70018 + ptl_m = pte_lockptr(mm, pmd_m);
70019 + if (ptl != ptl_m) {
70020 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70021 + if (!pte_none(*pte_m))
70022 + goto out;
70023 + }
70024 +
70025 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70026 + set_pte_at(mm, address_m, pte_m, entry_m);
70027 +out:
70028 + if (ptl != ptl_m)
70029 + spin_unlock(ptl_m);
70030 + pte_unmap(pte_m);
70031 +}
70032 +
70033 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70034 +{
70035 + struct page *page_m;
70036 + pte_t entry;
70037 +
70038 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70039 + goto out;
70040 +
70041 + entry = *pte;
70042 + page_m = vm_normal_page(vma, address, entry);
70043 + if (!page_m)
70044 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70045 + else if (PageAnon(page_m)) {
70046 + if (pax_find_mirror_vma(vma)) {
70047 + pte_unmap_unlock(pte, ptl);
70048 + lock_page(page_m);
70049 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70050 + if (pte_same(entry, *pte))
70051 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70052 + else
70053 + unlock_page(page_m);
70054 + }
70055 + } else
70056 + pax_mirror_file_pte(vma, address, page_m, ptl);
70057 +
70058 +out:
70059 + pte_unmap_unlock(pte, ptl);
70060 +}
70061 +#endif
70062 +
70063 /*
70064 * This routine handles present pages, when users try to write
70065 * to a shared page. It is done by copying the page to a new address
70066 @@ -2687,6 +2884,12 @@ gotten:
70067 */
70068 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70069 if (likely(pte_same(*page_table, orig_pte))) {
70070 +
70071 +#ifdef CONFIG_PAX_SEGMEXEC
70072 + if (pax_find_mirror_vma(vma))
70073 + BUG_ON(!trylock_page(new_page));
70074 +#endif
70075 +
70076 if (old_page) {
70077 if (!PageAnon(old_page)) {
70078 dec_mm_counter_fast(mm, MM_FILEPAGES);
70079 @@ -2738,6 +2941,10 @@ gotten:
70080 page_remove_rmap(old_page);
70081 }
70082
70083 +#ifdef CONFIG_PAX_SEGMEXEC
70084 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70085 +#endif
70086 +
70087 /* Free the old page.. */
70088 new_page = old_page;
70089 ret |= VM_FAULT_WRITE;
70090 @@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70091 swap_free(entry);
70092 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70093 try_to_free_swap(page);
70094 +
70095 +#ifdef CONFIG_PAX_SEGMEXEC
70096 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70097 +#endif
70098 +
70099 unlock_page(page);
70100 if (swapcache) {
70101 /*
70102 @@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70103
70104 /* No need to invalidate - it was non-present before */
70105 update_mmu_cache(vma, address, page_table);
70106 +
70107 +#ifdef CONFIG_PAX_SEGMEXEC
70108 + pax_mirror_anon_pte(vma, address, page, ptl);
70109 +#endif
70110 +
70111 unlock:
70112 pte_unmap_unlock(page_table, ptl);
70113 out:
70114 @@ -3059,40 +3276,6 @@ out_release:
70115 }
70116
70117 /*
70118 - * This is like a special single-page "expand_{down|up}wards()",
70119 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70120 - * doesn't hit another vma.
70121 - */
70122 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70123 -{
70124 - address &= PAGE_MASK;
70125 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70126 - struct vm_area_struct *prev = vma->vm_prev;
70127 -
70128 - /*
70129 - * Is there a mapping abutting this one below?
70130 - *
70131 - * That's only ok if it's the same stack mapping
70132 - * that has gotten split..
70133 - */
70134 - if (prev && prev->vm_end == address)
70135 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70136 -
70137 - expand_downwards(vma, address - PAGE_SIZE);
70138 - }
70139 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70140 - struct vm_area_struct *next = vma->vm_next;
70141 -
70142 - /* As VM_GROWSDOWN but s/below/above/ */
70143 - if (next && next->vm_start == address + PAGE_SIZE)
70144 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70145 -
70146 - expand_upwards(vma, address + PAGE_SIZE);
70147 - }
70148 - return 0;
70149 -}
70150 -
70151 -/*
70152 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70153 * but allow concurrent faults), and pte mapped but not yet locked.
70154 * We return with mmap_sem still held, but pte unmapped and unlocked.
70155 @@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70156 unsigned long address, pte_t *page_table, pmd_t *pmd,
70157 unsigned int flags)
70158 {
70159 - struct page *page;
70160 + struct page *page = NULL;
70161 spinlock_t *ptl;
70162 pte_t entry;
70163
70164 - pte_unmap(page_table);
70165 -
70166 - /* Check if we need to add a guard page to the stack */
70167 - if (check_stack_guard_page(vma, address) < 0)
70168 - return VM_FAULT_SIGBUS;
70169 -
70170 - /* Use the zero-page for reads */
70171 if (!(flags & FAULT_FLAG_WRITE)) {
70172 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70173 vma->vm_page_prot));
70174 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70175 + ptl = pte_lockptr(mm, pmd);
70176 + spin_lock(ptl);
70177 if (!pte_none(*page_table))
70178 goto unlock;
70179 goto setpte;
70180 }
70181
70182 /* Allocate our own private page. */
70183 + pte_unmap(page_table);
70184 +
70185 if (unlikely(anon_vma_prepare(vma)))
70186 goto oom;
70187 page = alloc_zeroed_user_highpage_movable(vma, address);
70188 @@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70189 if (!pte_none(*page_table))
70190 goto release;
70191
70192 +#ifdef CONFIG_PAX_SEGMEXEC
70193 + if (pax_find_mirror_vma(vma))
70194 + BUG_ON(!trylock_page(page));
70195 +#endif
70196 +
70197 inc_mm_counter_fast(mm, MM_ANONPAGES);
70198 page_add_new_anon_rmap(page, vma, address);
70199 setpte:
70200 @@ -3147,6 +3331,12 @@ setpte:
70201
70202 /* No need to invalidate - it was non-present before */
70203 update_mmu_cache(vma, address, page_table);
70204 +
70205 +#ifdef CONFIG_PAX_SEGMEXEC
70206 + if (page)
70207 + pax_mirror_anon_pte(vma, address, page, ptl);
70208 +#endif
70209 +
70210 unlock:
70211 pte_unmap_unlock(page_table, ptl);
70212 return 0;
70213 @@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70214 */
70215 /* Only go through if we didn't race with anybody else... */
70216 if (likely(pte_same(*page_table, orig_pte))) {
70217 +
70218 +#ifdef CONFIG_PAX_SEGMEXEC
70219 + if (anon && pax_find_mirror_vma(vma))
70220 + BUG_ON(!trylock_page(page));
70221 +#endif
70222 +
70223 flush_icache_page(vma, page);
70224 entry = mk_pte(page, vma->vm_page_prot);
70225 if (flags & FAULT_FLAG_WRITE)
70226 @@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70227
70228 /* no need to invalidate: a not-present page won't be cached */
70229 update_mmu_cache(vma, address, page_table);
70230 +
70231 +#ifdef CONFIG_PAX_SEGMEXEC
70232 + if (anon)
70233 + pax_mirror_anon_pte(vma, address, page, ptl);
70234 + else
70235 + pax_mirror_file_pte(vma, address, page, ptl);
70236 +#endif
70237 +
70238 } else {
70239 if (cow_page)
70240 mem_cgroup_uncharge_page(cow_page);
70241 @@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *mm,
70242 if (flags & FAULT_FLAG_WRITE)
70243 flush_tlb_fix_spurious_fault(vma, address);
70244 }
70245 +
70246 +#ifdef CONFIG_PAX_SEGMEXEC
70247 + pax_mirror_pte(vma, address, pte, pmd, ptl);
70248 + return 0;
70249 +#endif
70250 +
70251 unlock:
70252 pte_unmap_unlock(pte, ptl);
70253 return 0;
70254 @@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70255 pmd_t *pmd;
70256 pte_t *pte;
70257
70258 +#ifdef CONFIG_PAX_SEGMEXEC
70259 + struct vm_area_struct *vma_m;
70260 +#endif
70261 +
70262 __set_current_state(TASK_RUNNING);
70263
70264 count_vm_event(PGFAULT);
70265 @@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70266 if (unlikely(is_vm_hugetlb_page(vma)))
70267 return hugetlb_fault(mm, vma, address, flags);
70268
70269 +#ifdef CONFIG_PAX_SEGMEXEC
70270 + vma_m = pax_find_mirror_vma(vma);
70271 + if (vma_m) {
70272 + unsigned long address_m;
70273 + pgd_t *pgd_m;
70274 + pud_t *pud_m;
70275 + pmd_t *pmd_m;
70276 +
70277 + if (vma->vm_start > vma_m->vm_start) {
70278 + address_m = address;
70279 + address -= SEGMEXEC_TASK_SIZE;
70280 + vma = vma_m;
70281 + } else
70282 + address_m = address + SEGMEXEC_TASK_SIZE;
70283 +
70284 + pgd_m = pgd_offset(mm, address_m);
70285 + pud_m = pud_alloc(mm, pgd_m, address_m);
70286 + if (!pud_m)
70287 + return VM_FAULT_OOM;
70288 + pmd_m = pmd_alloc(mm, pud_m, address_m);
70289 + if (!pmd_m)
70290 + return VM_FAULT_OOM;
70291 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70292 + return VM_FAULT_OOM;
70293 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70294 + }
70295 +#endif
70296 +
70297 pgd = pgd_offset(mm, address);
70298 pud = pud_alloc(mm, pgd, address);
70299 if (!pud)
70300 @@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70301 * run pte_offset_map on the pmd, if an huge pmd could
70302 * materialize from under us from a different thread.
70303 */
70304 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70305 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70306 return VM_FAULT_OOM;
70307 /* if an huge pmd materialized from under us just retry later */
70308 if (unlikely(pmd_trans_huge(*pmd)))
70309 @@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70310 spin_unlock(&mm->page_table_lock);
70311 return 0;
70312 }
70313 +
70314 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70315 +{
70316 + pud_t *new = pud_alloc_one(mm, address);
70317 + if (!new)
70318 + return -ENOMEM;
70319 +
70320 + smp_wmb(); /* See comment in __pte_alloc */
70321 +
70322 + spin_lock(&mm->page_table_lock);
70323 + if (pgd_present(*pgd)) /* Another has populated it */
70324 + pud_free(mm, new);
70325 + else
70326 + pgd_populate_kernel(mm, pgd, new);
70327 + spin_unlock(&mm->page_table_lock);
70328 + return 0;
70329 +}
70330 #endif /* __PAGETABLE_PUD_FOLDED */
70331
70332 #ifndef __PAGETABLE_PMD_FOLDED
70333 @@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70334 spin_unlock(&mm->page_table_lock);
70335 return 0;
70336 }
70337 +
70338 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70339 +{
70340 + pmd_t *new = pmd_alloc_one(mm, address);
70341 + if (!new)
70342 + return -ENOMEM;
70343 +
70344 + smp_wmb(); /* See comment in __pte_alloc */
70345 +
70346 + spin_lock(&mm->page_table_lock);
70347 +#ifndef __ARCH_HAS_4LEVEL_HACK
70348 + if (pud_present(*pud)) /* Another has populated it */
70349 + pmd_free(mm, new);
70350 + else
70351 + pud_populate_kernel(mm, pud, new);
70352 +#else
70353 + if (pgd_present(*pud)) /* Another has populated it */
70354 + pmd_free(mm, new);
70355 + else
70356 + pgd_populate_kernel(mm, pud, new);
70357 +#endif /* __ARCH_HAS_4LEVEL_HACK */
70358 + spin_unlock(&mm->page_table_lock);
70359 + return 0;
70360 +}
70361 #endif /* __PAGETABLE_PMD_FOLDED */
70362
70363 int make_pages_present(unsigned long addr, unsigned long end)
70364 @@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
70365 gate_vma.vm_start = FIXADDR_USER_START;
70366 gate_vma.vm_end = FIXADDR_USER_END;
70367 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70368 - gate_vma.vm_page_prot = __P101;
70369 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70370
70371 return 0;
70372 }
70373 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70374 index bf5b485..e44c2cb 100644
70375 --- a/mm/mempolicy.c
70376 +++ b/mm/mempolicy.c
70377 @@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70378 unsigned long vmstart;
70379 unsigned long vmend;
70380
70381 +#ifdef CONFIG_PAX_SEGMEXEC
70382 + struct vm_area_struct *vma_m;
70383 +#endif
70384 +
70385 vma = find_vma(mm, start);
70386 if (!vma || vma->vm_start > start)
70387 return -EFAULT;
70388 @@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70389 if (err)
70390 goto out;
70391 }
70392 +
70393 +#ifdef CONFIG_PAX_SEGMEXEC
70394 + vma_m = pax_find_mirror_vma(vma);
70395 + if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
70396 + err = vma_m->vm_ops->set_policy(vma_m, new_pol);
70397 + if (err)
70398 + goto out;
70399 + }
70400 +#endif
70401 +
70402 }
70403
70404 out:
70405 @@ -1105,6 +1119,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70406
70407 if (end < start)
70408 return -EINVAL;
70409 +
70410 +#ifdef CONFIG_PAX_SEGMEXEC
70411 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70412 + if (end > SEGMEXEC_TASK_SIZE)
70413 + return -EINVAL;
70414 + } else
70415 +#endif
70416 +
70417 + if (end > TASK_SIZE)
70418 + return -EINVAL;
70419 +
70420 if (end == start)
70421 return 0;
70422
70423 @@ -1328,8 +1353,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70424 */
70425 tcred = __task_cred(task);
70426 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70427 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70428 - !capable(CAP_SYS_NICE)) {
70429 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70430 rcu_read_unlock();
70431 err = -EPERM;
70432 goto out_put;
70433 @@ -1360,6 +1384,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70434 goto out;
70435 }
70436
70437 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70438 + if (mm != current->mm &&
70439 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70440 + mmput(mm);
70441 + err = -EPERM;
70442 + goto out;
70443 + }
70444 +#endif
70445 +
70446 err = do_migrate_pages(mm, old, new,
70447 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
70448
70449 diff --git a/mm/mlock.c b/mm/mlock.c
70450 index ef726e8..13e0901 100644
70451 --- a/mm/mlock.c
70452 +++ b/mm/mlock.c
70453 @@ -13,6 +13,7 @@
70454 #include <linux/pagemap.h>
70455 #include <linux/mempolicy.h>
70456 #include <linux/syscalls.h>
70457 +#include <linux/security.h>
70458 #include <linux/sched.h>
70459 #include <linux/export.h>
70460 #include <linux/rmap.h>
70461 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70462 return -EINVAL;
70463 if (end == start)
70464 return 0;
70465 + if (end > TASK_SIZE)
70466 + return -EINVAL;
70467 +
70468 vma = find_vma(current->mm, start);
70469 if (!vma || vma->vm_start > start)
70470 return -ENOMEM;
70471 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70472 for (nstart = start ; ; ) {
70473 vm_flags_t newflags;
70474
70475 +#ifdef CONFIG_PAX_SEGMEXEC
70476 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70477 + break;
70478 +#endif
70479 +
70480 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70481
70482 newflags = vma->vm_flags | VM_LOCKED;
70483 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70484 lock_limit >>= PAGE_SHIFT;
70485
70486 /* check against resource limits */
70487 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70488 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70489 error = do_mlock(start, len, 1);
70490 up_write(&current->mm->mmap_sem);
70491 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70492 static int do_mlockall(int flags)
70493 {
70494 struct vm_area_struct * vma, * prev = NULL;
70495 - unsigned int def_flags = 0;
70496
70497 if (flags & MCL_FUTURE)
70498 - def_flags = VM_LOCKED;
70499 - current->mm->def_flags = def_flags;
70500 + current->mm->def_flags |= VM_LOCKED;
70501 + else
70502 + current->mm->def_flags &= ~VM_LOCKED;
70503 if (flags == MCL_FUTURE)
70504 goto out;
70505
70506 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70507 vm_flags_t newflags;
70508
70509 +#ifdef CONFIG_PAX_SEGMEXEC
70510 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70511 + break;
70512 +#endif
70513 +
70514 + BUG_ON(vma->vm_end > TASK_SIZE);
70515 newflags = vma->vm_flags | VM_LOCKED;
70516 if (!(flags & MCL_CURRENT))
70517 newflags &= ~VM_LOCKED;
70518 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70519 lock_limit >>= PAGE_SHIFT;
70520
70521 ret = -ENOMEM;
70522 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70523 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70524 capable(CAP_IPC_LOCK))
70525 ret = do_mlockall(flags);
70526 diff --git a/mm/mmap.c b/mm/mmap.c
70527 index 848ef52..d2b586c 100644
70528 --- a/mm/mmap.c
70529 +++ b/mm/mmap.c
70530 @@ -46,6 +46,16 @@
70531 #define arch_rebalance_pgtables(addr, len) (addr)
70532 #endif
70533
70534 +static inline void verify_mm_writelocked(struct mm_struct *mm)
70535 +{
70536 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70537 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70538 + up_read(&mm->mmap_sem);
70539 + BUG();
70540 + }
70541 +#endif
70542 +}
70543 +
70544 static void unmap_region(struct mm_struct *mm,
70545 struct vm_area_struct *vma, struct vm_area_struct *prev,
70546 unsigned long start, unsigned long end);
70547 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70548 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70549 *
70550 */
70551 -pgprot_t protection_map[16] = {
70552 +pgprot_t protection_map[16] __read_only = {
70553 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70554 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70555 };
70556
70557 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
70558 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70559 {
70560 - return __pgprot(pgprot_val(protection_map[vm_flags &
70561 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70562 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70563 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70564 +
70565 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70566 + if (!(__supported_pte_mask & _PAGE_NX) &&
70567 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70568 + (vm_flags & (VM_READ | VM_WRITE)))
70569 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70570 +#endif
70571 +
70572 + return prot;
70573 }
70574 EXPORT_SYMBOL(vm_get_page_prot);
70575
70576 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70577 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70578 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70579 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70580 /*
70581 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70582 * other variables. It can be updated by several CPUs frequently.
70583 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70584 struct vm_area_struct *next = vma->vm_next;
70585
70586 might_sleep();
70587 + BUG_ON(vma->vm_mirror);
70588 if (vma->vm_ops && vma->vm_ops->close)
70589 vma->vm_ops->close(vma);
70590 if (vma->vm_file) {
70591 @@ -274,6 +295,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70592 * not page aligned -Ram Gupta
70593 */
70594 rlim = rlimit(RLIMIT_DATA);
70595 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70596 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70597 (mm->end_data - mm->start_data) > rlim)
70598 goto out;
70599 @@ -690,6 +712,12 @@ static int
70600 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70601 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70602 {
70603 +
70604 +#ifdef CONFIG_PAX_SEGMEXEC
70605 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70606 + return 0;
70607 +#endif
70608 +
70609 if (is_mergeable_vma(vma, file, vm_flags) &&
70610 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70611 if (vma->vm_pgoff == vm_pgoff)
70612 @@ -709,6 +737,12 @@ static int
70613 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70614 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70615 {
70616 +
70617 +#ifdef CONFIG_PAX_SEGMEXEC
70618 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70619 + return 0;
70620 +#endif
70621 +
70622 if (is_mergeable_vma(vma, file, vm_flags) &&
70623 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70624 pgoff_t vm_pglen;
70625 @@ -751,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70626 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70627 struct vm_area_struct *prev, unsigned long addr,
70628 unsigned long end, unsigned long vm_flags,
70629 - struct anon_vma *anon_vma, struct file *file,
70630 + struct anon_vma *anon_vma, struct file *file,
70631 pgoff_t pgoff, struct mempolicy *policy)
70632 {
70633 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70634 struct vm_area_struct *area, *next;
70635 int err;
70636
70637 +#ifdef CONFIG_PAX_SEGMEXEC
70638 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70639 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70640 +
70641 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70642 +#endif
70643 +
70644 /*
70645 * We later require that vma->vm_flags == vm_flags,
70646 * so this tests vma->vm_flags & VM_SPECIAL, too.
70647 @@ -773,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70648 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70649 next = next->vm_next;
70650
70651 +#ifdef CONFIG_PAX_SEGMEXEC
70652 + if (prev)
70653 + prev_m = pax_find_mirror_vma(prev);
70654 + if (area)
70655 + area_m = pax_find_mirror_vma(area);
70656 + if (next)
70657 + next_m = pax_find_mirror_vma(next);
70658 +#endif
70659 +
70660 /*
70661 * Can it merge with the predecessor?
70662 */
70663 @@ -792,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70664 /* cases 1, 6 */
70665 err = vma_adjust(prev, prev->vm_start,
70666 next->vm_end, prev->vm_pgoff, NULL);
70667 - } else /* cases 2, 5, 7 */
70668 +
70669 +#ifdef CONFIG_PAX_SEGMEXEC
70670 + if (!err && prev_m)
70671 + err = vma_adjust(prev_m, prev_m->vm_start,
70672 + next_m->vm_end, prev_m->vm_pgoff, NULL);
70673 +#endif
70674 +
70675 + } else { /* cases 2, 5, 7 */
70676 err = vma_adjust(prev, prev->vm_start,
70677 end, prev->vm_pgoff, NULL);
70678 +
70679 +#ifdef CONFIG_PAX_SEGMEXEC
70680 + if (!err && prev_m)
70681 + err = vma_adjust(prev_m, prev_m->vm_start,
70682 + end_m, prev_m->vm_pgoff, NULL);
70683 +#endif
70684 +
70685 + }
70686 if (err)
70687 return NULL;
70688 khugepaged_enter_vma_merge(prev);
70689 @@ -808,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70690 mpol_equal(policy, vma_policy(next)) &&
70691 can_vma_merge_before(next, vm_flags,
70692 anon_vma, file, pgoff+pglen)) {
70693 - if (prev && addr < prev->vm_end) /* case 4 */
70694 + if (prev && addr < prev->vm_end) { /* case 4 */
70695 err = vma_adjust(prev, prev->vm_start,
70696 addr, prev->vm_pgoff, NULL);
70697 - else /* cases 3, 8 */
70698 +
70699 +#ifdef CONFIG_PAX_SEGMEXEC
70700 + if (!err && prev_m)
70701 + err = vma_adjust(prev_m, prev_m->vm_start,
70702 + addr_m, prev_m->vm_pgoff, NULL);
70703 +#endif
70704 +
70705 + } else { /* cases 3, 8 */
70706 err = vma_adjust(area, addr, next->vm_end,
70707 next->vm_pgoff - pglen, NULL);
70708 +
70709 +#ifdef CONFIG_PAX_SEGMEXEC
70710 + if (!err && area_m)
70711 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
70712 + next_m->vm_pgoff - pglen, NULL);
70713 +#endif
70714 +
70715 + }
70716 if (err)
70717 return NULL;
70718 khugepaged_enter_vma_merge(area);
70719 @@ -922,14 +1002,11 @@ none:
70720 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70721 struct file *file, long pages)
70722 {
70723 - const unsigned long stack_flags
70724 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70725 -
70726 if (file) {
70727 mm->shared_vm += pages;
70728 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70729 mm->exec_vm += pages;
70730 - } else if (flags & stack_flags)
70731 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70732 mm->stack_vm += pages;
70733 if (flags & (VM_RESERVED|VM_IO))
70734 mm->reserved_vm += pages;
70735 @@ -969,7 +1046,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70736 * (the exception is when the underlying filesystem is noexec
70737 * mounted, in which case we dont add PROT_EXEC.)
70738 */
70739 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70740 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70741 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70742 prot |= PROT_EXEC;
70743
70744 @@ -995,7 +1072,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70745 /* Obtain the address to map to. we verify (or select) it and ensure
70746 * that it represents a valid section of the address space.
70747 */
70748 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
70749 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70750 if (addr & ~PAGE_MASK)
70751 return addr;
70752
70753 @@ -1006,6 +1083,36 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70754 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70755 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70756
70757 +#ifdef CONFIG_PAX_MPROTECT
70758 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70759 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70760 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70761 + gr_log_rwxmmap(file);
70762 +
70763 +#ifdef CONFIG_PAX_EMUPLT
70764 + vm_flags &= ~VM_EXEC;
70765 +#else
70766 + return -EPERM;
70767 +#endif
70768 +
70769 + }
70770 +
70771 + if (!(vm_flags & VM_EXEC))
70772 + vm_flags &= ~VM_MAYEXEC;
70773 +#else
70774 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70775 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70776 +#endif
70777 + else
70778 + vm_flags &= ~VM_MAYWRITE;
70779 + }
70780 +#endif
70781 +
70782 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70783 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70784 + vm_flags &= ~VM_PAGEEXEC;
70785 +#endif
70786 +
70787 if (flags & MAP_LOCKED)
70788 if (!can_do_mlock())
70789 return -EPERM;
70790 @@ -1017,6 +1124,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70791 locked += mm->locked_vm;
70792 lock_limit = rlimit(RLIMIT_MEMLOCK);
70793 lock_limit >>= PAGE_SHIFT;
70794 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70795 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70796 return -EAGAIN;
70797 }
70798 @@ -1087,6 +1195,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70799 if (error)
70800 return error;
70801
70802 + if (!gr_acl_handle_mmap(file, prot))
70803 + return -EACCES;
70804 +
70805 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70806 }
70807
70808 @@ -1192,7 +1303,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70809 vm_flags_t vm_flags = vma->vm_flags;
70810
70811 /* If it was private or non-writable, the write bit is already clear */
70812 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70813 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70814 return 0;
70815
70816 /* The backer wishes to know when pages are first written to? */
70817 @@ -1241,14 +1352,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70818 unsigned long charged = 0;
70819 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70820
70821 +#ifdef CONFIG_PAX_SEGMEXEC
70822 + struct vm_area_struct *vma_m = NULL;
70823 +#endif
70824 +
70825 + /*
70826 + * mm->mmap_sem is required to protect against another thread
70827 + * changing the mappings in case we sleep.
70828 + */
70829 + verify_mm_writelocked(mm);
70830 +
70831 /* Clear old maps */
70832 error = -ENOMEM;
70833 -munmap_back:
70834 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70835 if (vma && vma->vm_start < addr + len) {
70836 if (do_munmap(mm, addr, len))
70837 return -ENOMEM;
70838 - goto munmap_back;
70839 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70840 + BUG_ON(vma && vma->vm_start < addr + len);
70841 }
70842
70843 /* Check against address space limit. */
70844 @@ -1297,6 +1418,16 @@ munmap_back:
70845 goto unacct_error;
70846 }
70847
70848 +#ifdef CONFIG_PAX_SEGMEXEC
70849 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
70850 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70851 + if (!vma_m) {
70852 + error = -ENOMEM;
70853 + goto free_vma;
70854 + }
70855 + }
70856 +#endif
70857 +
70858 vma->vm_mm = mm;
70859 vma->vm_start = addr;
70860 vma->vm_end = addr + len;
70861 @@ -1321,6 +1452,19 @@ munmap_back:
70862 error = file->f_op->mmap(file, vma);
70863 if (error)
70864 goto unmap_and_free_vma;
70865 +
70866 +#ifdef CONFIG_PAX_SEGMEXEC
70867 + if (vma_m && (vm_flags & VM_EXECUTABLE))
70868 + added_exe_file_vma(mm);
70869 +#endif
70870 +
70871 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70872 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
70873 + vma->vm_flags |= VM_PAGEEXEC;
70874 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70875 + }
70876 +#endif
70877 +
70878 if (vm_flags & VM_EXECUTABLE)
70879 added_exe_file_vma(mm);
70880
70881 @@ -1358,6 +1502,11 @@ munmap_back:
70882 vma_link(mm, vma, prev, rb_link, rb_parent);
70883 file = vma->vm_file;
70884
70885 +#ifdef CONFIG_PAX_SEGMEXEC
70886 + if (vma_m)
70887 + BUG_ON(pax_mirror_vma(vma_m, vma));
70888 +#endif
70889 +
70890 /* Once vma denies write, undo our temporary denial count */
70891 if (correct_wcount)
70892 atomic_inc(&inode->i_writecount);
70893 @@ -1366,6 +1515,7 @@ out:
70894
70895 mm->total_vm += len >> PAGE_SHIFT;
70896 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
70897 + track_exec_limit(mm, addr, addr + len, vm_flags);
70898 if (vm_flags & VM_LOCKED) {
70899 if (!mlock_vma_pages_range(vma, addr, addr + len))
70900 mm->locked_vm += (len >> PAGE_SHIFT);
70901 @@ -1383,6 +1533,12 @@ unmap_and_free_vma:
70902 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
70903 charged = 0;
70904 free_vma:
70905 +
70906 +#ifdef CONFIG_PAX_SEGMEXEC
70907 + if (vma_m)
70908 + kmem_cache_free(vm_area_cachep, vma_m);
70909 +#endif
70910 +
70911 kmem_cache_free(vm_area_cachep, vma);
70912 unacct_error:
70913 if (charged)
70914 @@ -1390,6 +1546,44 @@ unacct_error:
70915 return error;
70916 }
70917
70918 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
70919 +{
70920 + if (!vma) {
70921 +#ifdef CONFIG_STACK_GROWSUP
70922 + if (addr > sysctl_heap_stack_gap)
70923 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
70924 + else
70925 + vma = find_vma(current->mm, 0);
70926 + if (vma && (vma->vm_flags & VM_GROWSUP))
70927 + return false;
70928 +#endif
70929 + return true;
70930 + }
70931 +
70932 + if (addr + len > vma->vm_start)
70933 + return false;
70934 +
70935 + if (vma->vm_flags & VM_GROWSDOWN)
70936 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
70937 +#ifdef CONFIG_STACK_GROWSUP
70938 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
70939 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
70940 +#endif
70941 +
70942 + return true;
70943 +}
70944 +
70945 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
70946 +{
70947 + if (vma->vm_start < len)
70948 + return -ENOMEM;
70949 + if (!(vma->vm_flags & VM_GROWSDOWN))
70950 + return vma->vm_start - len;
70951 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
70952 + return vma->vm_start - len - sysctl_heap_stack_gap;
70953 + return -ENOMEM;
70954 +}
70955 +
70956 /* Get an address range which is currently unmapped.
70957 * For shmat() with addr=0.
70958 *
70959 @@ -1416,18 +1610,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
70960 if (flags & MAP_FIXED)
70961 return addr;
70962
70963 +#ifdef CONFIG_PAX_RANDMMAP
70964 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70965 +#endif
70966 +
70967 if (addr) {
70968 addr = PAGE_ALIGN(addr);
70969 - vma = find_vma(mm, addr);
70970 - if (TASK_SIZE - len >= addr &&
70971 - (!vma || addr + len <= vma->vm_start))
70972 - return addr;
70973 + if (TASK_SIZE - len >= addr) {
70974 + vma = find_vma(mm, addr);
70975 + if (check_heap_stack_gap(vma, addr, len))
70976 + return addr;
70977 + }
70978 }
70979 if (len > mm->cached_hole_size) {
70980 - start_addr = addr = mm->free_area_cache;
70981 + start_addr = addr = mm->free_area_cache;
70982 } else {
70983 - start_addr = addr = TASK_UNMAPPED_BASE;
70984 - mm->cached_hole_size = 0;
70985 + start_addr = addr = mm->mmap_base;
70986 + mm->cached_hole_size = 0;
70987 }
70988
70989 full_search:
70990 @@ -1438,34 +1637,40 @@ full_search:
70991 * Start a new search - just in case we missed
70992 * some holes.
70993 */
70994 - if (start_addr != TASK_UNMAPPED_BASE) {
70995 - addr = TASK_UNMAPPED_BASE;
70996 - start_addr = addr;
70997 + if (start_addr != mm->mmap_base) {
70998 + start_addr = addr = mm->mmap_base;
70999 mm->cached_hole_size = 0;
71000 goto full_search;
71001 }
71002 return -ENOMEM;
71003 }
71004 - if (!vma || addr + len <= vma->vm_start) {
71005 - /*
71006 - * Remember the place where we stopped the search:
71007 - */
71008 - mm->free_area_cache = addr + len;
71009 - return addr;
71010 - }
71011 + if (check_heap_stack_gap(vma, addr, len))
71012 + break;
71013 if (addr + mm->cached_hole_size < vma->vm_start)
71014 mm->cached_hole_size = vma->vm_start - addr;
71015 addr = vma->vm_end;
71016 }
71017 +
71018 + /*
71019 + * Remember the place where we stopped the search:
71020 + */
71021 + mm->free_area_cache = addr + len;
71022 + return addr;
71023 }
71024 #endif
71025
71026 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71027 {
71028 +
71029 +#ifdef CONFIG_PAX_SEGMEXEC
71030 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71031 + return;
71032 +#endif
71033 +
71034 /*
71035 * Is this a new hole at the lowest possible address?
71036 */
71037 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
71038 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
71039 mm->free_area_cache = addr;
71040 }
71041
71042 @@ -1481,7 +1686,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71043 {
71044 struct vm_area_struct *vma;
71045 struct mm_struct *mm = current->mm;
71046 - unsigned long addr = addr0, start_addr;
71047 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
71048
71049 /* requested length too big for entire address space */
71050 if (len > TASK_SIZE)
71051 @@ -1490,13 +1695,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71052 if (flags & MAP_FIXED)
71053 return addr;
71054
71055 +#ifdef CONFIG_PAX_RANDMMAP
71056 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71057 +#endif
71058 +
71059 /* requesting a specific address */
71060 if (addr) {
71061 addr = PAGE_ALIGN(addr);
71062 - vma = find_vma(mm, addr);
71063 - if (TASK_SIZE - len >= addr &&
71064 - (!vma || addr + len <= vma->vm_start))
71065 - return addr;
71066 + if (TASK_SIZE - len >= addr) {
71067 + vma = find_vma(mm, addr);
71068 + if (check_heap_stack_gap(vma, addr, len))
71069 + return addr;
71070 + }
71071 }
71072
71073 /* check if free_area_cache is useful for us */
71074 @@ -1520,7 +1730,7 @@ try_again:
71075 * return with success:
71076 */
71077 vma = find_vma(mm, addr);
71078 - if (!vma || addr+len <= vma->vm_start)
71079 + if (check_heap_stack_gap(vma, addr, len))
71080 /* remember the address as a hint for next time */
71081 return (mm->free_area_cache = addr);
71082
71083 @@ -1529,8 +1739,8 @@ try_again:
71084 mm->cached_hole_size = vma->vm_start - addr;
71085
71086 /* try just below the current vma->vm_start */
71087 - addr = vma->vm_start-len;
71088 - } while (len < vma->vm_start);
71089 + addr = skip_heap_stack_gap(vma, len);
71090 + } while (!IS_ERR_VALUE(addr));
71091
71092 fail:
71093 /*
71094 @@ -1553,13 +1763,21 @@ fail:
71095 * can happen with large stack limits and large mmap()
71096 * allocations.
71097 */
71098 + mm->mmap_base = TASK_UNMAPPED_BASE;
71099 +
71100 +#ifdef CONFIG_PAX_RANDMMAP
71101 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71102 + mm->mmap_base += mm->delta_mmap;
71103 +#endif
71104 +
71105 + mm->free_area_cache = mm->mmap_base;
71106 mm->cached_hole_size = ~0UL;
71107 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71108 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71109 /*
71110 * Restore the topdown base:
71111 */
71112 - mm->free_area_cache = mm->mmap_base;
71113 + mm->mmap_base = base;
71114 + mm->free_area_cache = base;
71115 mm->cached_hole_size = ~0UL;
71116
71117 return addr;
71118 @@ -1568,6 +1786,12 @@ fail:
71119
71120 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71121 {
71122 +
71123 +#ifdef CONFIG_PAX_SEGMEXEC
71124 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71125 + return;
71126 +#endif
71127 +
71128 /*
71129 * Is this a new hole at the highest possible address?
71130 */
71131 @@ -1575,8 +1799,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71132 mm->free_area_cache = addr;
71133
71134 /* dont allow allocations above current base */
71135 - if (mm->free_area_cache > mm->mmap_base)
71136 + if (mm->free_area_cache > mm->mmap_base) {
71137 mm->free_area_cache = mm->mmap_base;
71138 + mm->cached_hole_size = ~0UL;
71139 + }
71140 }
71141
71142 unsigned long
71143 @@ -1672,6 +1898,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71144 return vma;
71145 }
71146
71147 +#ifdef CONFIG_PAX_SEGMEXEC
71148 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71149 +{
71150 + struct vm_area_struct *vma_m;
71151 +
71152 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71153 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71154 + BUG_ON(vma->vm_mirror);
71155 + return NULL;
71156 + }
71157 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71158 + vma_m = vma->vm_mirror;
71159 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71160 + BUG_ON(vma->vm_file != vma_m->vm_file);
71161 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71162 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71163 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71164 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71165 + return vma_m;
71166 +}
71167 +#endif
71168 +
71169 /*
71170 * Verify that the stack growth is acceptable and
71171 * update accounting. This is shared with both the
71172 @@ -1688,6 +1936,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71173 return -ENOMEM;
71174
71175 /* Stack limit test */
71176 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
71177 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71178 return -ENOMEM;
71179
71180 @@ -1698,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71181 locked = mm->locked_vm + grow;
71182 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71183 limit >>= PAGE_SHIFT;
71184 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71185 if (locked > limit && !capable(CAP_IPC_LOCK))
71186 return -ENOMEM;
71187 }
71188 @@ -1728,37 +1978,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71189 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71190 * vma is the last one with address > vma->vm_end. Have to extend vma.
71191 */
71192 +#ifndef CONFIG_IA64
71193 +static
71194 +#endif
71195 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71196 {
71197 int error;
71198 + bool locknext;
71199
71200 if (!(vma->vm_flags & VM_GROWSUP))
71201 return -EFAULT;
71202
71203 + /* Also guard against wrapping around to address 0. */
71204 + if (address < PAGE_ALIGN(address+1))
71205 + address = PAGE_ALIGN(address+1);
71206 + else
71207 + return -ENOMEM;
71208 +
71209 /*
71210 * We must make sure the anon_vma is allocated
71211 * so that the anon_vma locking is not a noop.
71212 */
71213 if (unlikely(anon_vma_prepare(vma)))
71214 return -ENOMEM;
71215 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71216 + if (locknext && anon_vma_prepare(vma->vm_next))
71217 + return -ENOMEM;
71218 vma_lock_anon_vma(vma);
71219 + if (locknext)
71220 + vma_lock_anon_vma(vma->vm_next);
71221
71222 /*
71223 * vma->vm_start/vm_end cannot change under us because the caller
71224 * is required to hold the mmap_sem in read mode. We need the
71225 - * anon_vma lock to serialize against concurrent expand_stacks.
71226 - * Also guard against wrapping around to address 0.
71227 + * anon_vma locks to serialize against concurrent expand_stacks
71228 + * and expand_upwards.
71229 */
71230 - if (address < PAGE_ALIGN(address+4))
71231 - address = PAGE_ALIGN(address+4);
71232 - else {
71233 - vma_unlock_anon_vma(vma);
71234 - return -ENOMEM;
71235 - }
71236 error = 0;
71237
71238 /* Somebody else might have raced and expanded it already */
71239 - if (address > vma->vm_end) {
71240 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71241 + error = -ENOMEM;
71242 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71243 unsigned long size, grow;
71244
71245 size = address - vma->vm_start;
71246 @@ -1773,6 +2034,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71247 }
71248 }
71249 }
71250 + if (locknext)
71251 + vma_unlock_anon_vma(vma->vm_next);
71252 vma_unlock_anon_vma(vma);
71253 khugepaged_enter_vma_merge(vma);
71254 return error;
71255 @@ -1786,6 +2049,8 @@ int expand_downwards(struct vm_area_struct *vma,
71256 unsigned long address)
71257 {
71258 int error;
71259 + bool lockprev = false;
71260 + struct vm_area_struct *prev;
71261
71262 /*
71263 * We must make sure the anon_vma is allocated
71264 @@ -1799,6 +2064,15 @@ int expand_downwards(struct vm_area_struct *vma,
71265 if (error)
71266 return error;
71267
71268 + prev = vma->vm_prev;
71269 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71270 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71271 +#endif
71272 + if (lockprev && anon_vma_prepare(prev))
71273 + return -ENOMEM;
71274 + if (lockprev)
71275 + vma_lock_anon_vma(prev);
71276 +
71277 vma_lock_anon_vma(vma);
71278
71279 /*
71280 @@ -1808,9 +2082,17 @@ int expand_downwards(struct vm_area_struct *vma,
71281 */
71282
71283 /* Somebody else might have raced and expanded it already */
71284 - if (address < vma->vm_start) {
71285 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71286 + error = -ENOMEM;
71287 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71288 unsigned long size, grow;
71289
71290 +#ifdef CONFIG_PAX_SEGMEXEC
71291 + struct vm_area_struct *vma_m;
71292 +
71293 + vma_m = pax_find_mirror_vma(vma);
71294 +#endif
71295 +
71296 size = vma->vm_end - address;
71297 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71298
71299 @@ -1820,11 +2102,22 @@ int expand_downwards(struct vm_area_struct *vma,
71300 if (!error) {
71301 vma->vm_start = address;
71302 vma->vm_pgoff -= grow;
71303 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71304 +
71305 +#ifdef CONFIG_PAX_SEGMEXEC
71306 + if (vma_m) {
71307 + vma_m->vm_start -= grow << PAGE_SHIFT;
71308 + vma_m->vm_pgoff -= grow;
71309 + }
71310 +#endif
71311 +
71312 perf_event_mmap(vma);
71313 }
71314 }
71315 }
71316 vma_unlock_anon_vma(vma);
71317 + if (lockprev)
71318 + vma_unlock_anon_vma(prev);
71319 khugepaged_enter_vma_merge(vma);
71320 return error;
71321 }
71322 @@ -1894,6 +2187,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71323 do {
71324 long nrpages = vma_pages(vma);
71325
71326 +#ifdef CONFIG_PAX_SEGMEXEC
71327 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71328 + vma = remove_vma(vma);
71329 + continue;
71330 + }
71331 +#endif
71332 +
71333 mm->total_vm -= nrpages;
71334 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71335 vma = remove_vma(vma);
71336 @@ -1939,6 +2239,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71337 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71338 vma->vm_prev = NULL;
71339 do {
71340 +
71341 +#ifdef CONFIG_PAX_SEGMEXEC
71342 + if (vma->vm_mirror) {
71343 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71344 + vma->vm_mirror->vm_mirror = NULL;
71345 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
71346 + vma->vm_mirror = NULL;
71347 + }
71348 +#endif
71349 +
71350 rb_erase(&vma->vm_rb, &mm->mm_rb);
71351 mm->map_count--;
71352 tail_vma = vma;
71353 @@ -1967,14 +2277,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71354 struct vm_area_struct *new;
71355 int err = -ENOMEM;
71356
71357 +#ifdef CONFIG_PAX_SEGMEXEC
71358 + struct vm_area_struct *vma_m, *new_m = NULL;
71359 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71360 +#endif
71361 +
71362 if (is_vm_hugetlb_page(vma) && (addr &
71363 ~(huge_page_mask(hstate_vma(vma)))))
71364 return -EINVAL;
71365
71366 +#ifdef CONFIG_PAX_SEGMEXEC
71367 + vma_m = pax_find_mirror_vma(vma);
71368 +#endif
71369 +
71370 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71371 if (!new)
71372 goto out_err;
71373
71374 +#ifdef CONFIG_PAX_SEGMEXEC
71375 + if (vma_m) {
71376 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71377 + if (!new_m) {
71378 + kmem_cache_free(vm_area_cachep, new);
71379 + goto out_err;
71380 + }
71381 + }
71382 +#endif
71383 +
71384 /* most fields are the same, copy all, and then fixup */
71385 *new = *vma;
71386
71387 @@ -1987,6 +2316,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71388 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71389 }
71390
71391 +#ifdef CONFIG_PAX_SEGMEXEC
71392 + if (vma_m) {
71393 + *new_m = *vma_m;
71394 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71395 + new_m->vm_mirror = new;
71396 + new->vm_mirror = new_m;
71397 +
71398 + if (new_below)
71399 + new_m->vm_end = addr_m;
71400 + else {
71401 + new_m->vm_start = addr_m;
71402 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71403 + }
71404 + }
71405 +#endif
71406 +
71407 pol = mpol_dup(vma_policy(vma));
71408 if (IS_ERR(pol)) {
71409 err = PTR_ERR(pol);
71410 @@ -2012,6 +2357,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71411 else
71412 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71413
71414 +#ifdef CONFIG_PAX_SEGMEXEC
71415 + if (!err && vma_m) {
71416 + if (anon_vma_clone(new_m, vma_m))
71417 + goto out_free_mpol;
71418 +
71419 + mpol_get(pol);
71420 + vma_set_policy(new_m, pol);
71421 +
71422 + if (new_m->vm_file) {
71423 + get_file(new_m->vm_file);
71424 + if (vma_m->vm_flags & VM_EXECUTABLE)
71425 + added_exe_file_vma(mm);
71426 + }
71427 +
71428 + if (new_m->vm_ops && new_m->vm_ops->open)
71429 + new_m->vm_ops->open(new_m);
71430 +
71431 + if (new_below)
71432 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71433 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71434 + else
71435 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71436 +
71437 + if (err) {
71438 + if (new_m->vm_ops && new_m->vm_ops->close)
71439 + new_m->vm_ops->close(new_m);
71440 + if (new_m->vm_file) {
71441 + if (vma_m->vm_flags & VM_EXECUTABLE)
71442 + removed_exe_file_vma(mm);
71443 + fput(new_m->vm_file);
71444 + }
71445 + mpol_put(pol);
71446 + }
71447 + }
71448 +#endif
71449 +
71450 /* Success. */
71451 if (!err)
71452 return 0;
71453 @@ -2024,10 +2405,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71454 removed_exe_file_vma(mm);
71455 fput(new->vm_file);
71456 }
71457 - unlink_anon_vmas(new);
71458 out_free_mpol:
71459 mpol_put(pol);
71460 out_free_vma:
71461 +
71462 +#ifdef CONFIG_PAX_SEGMEXEC
71463 + if (new_m) {
71464 + unlink_anon_vmas(new_m);
71465 + kmem_cache_free(vm_area_cachep, new_m);
71466 + }
71467 +#endif
71468 +
71469 + unlink_anon_vmas(new);
71470 kmem_cache_free(vm_area_cachep, new);
71471 out_err:
71472 return err;
71473 @@ -2040,6 +2429,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71474 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71475 unsigned long addr, int new_below)
71476 {
71477 +
71478 +#ifdef CONFIG_PAX_SEGMEXEC
71479 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71480 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71481 + if (mm->map_count >= sysctl_max_map_count-1)
71482 + return -ENOMEM;
71483 + } else
71484 +#endif
71485 +
71486 if (mm->map_count >= sysctl_max_map_count)
71487 return -ENOMEM;
71488
71489 @@ -2051,11 +2449,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71490 * work. This now handles partial unmappings.
71491 * Jeremy Fitzhardinge <jeremy@goop.org>
71492 */
71493 +#ifdef CONFIG_PAX_SEGMEXEC
71494 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71495 {
71496 + int ret = __do_munmap(mm, start, len);
71497 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71498 + return ret;
71499 +
71500 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71501 +}
71502 +
71503 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71504 +#else
71505 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71506 +#endif
71507 +{
71508 unsigned long end;
71509 struct vm_area_struct *vma, *prev, *last;
71510
71511 + /*
71512 + * mm->mmap_sem is required to protect against another thread
71513 + * changing the mappings in case we sleep.
71514 + */
71515 + verify_mm_writelocked(mm);
71516 +
71517 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71518 return -EINVAL;
71519
71520 @@ -2130,6 +2547,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71521 /* Fix up all other VM information */
71522 remove_vma_list(mm, vma);
71523
71524 + track_exec_limit(mm, start, end, 0UL);
71525 +
71526 return 0;
71527 }
71528 EXPORT_SYMBOL(do_munmap);
71529 @@ -2139,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
71530 int ret;
71531 struct mm_struct *mm = current->mm;
71532
71533 +
71534 +#ifdef CONFIG_PAX_SEGMEXEC
71535 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71536 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
71537 + return -EINVAL;
71538 +#endif
71539 +
71540 down_write(&mm->mmap_sem);
71541 ret = do_munmap(mm, start, len);
71542 up_write(&mm->mmap_sem);
71543 @@ -2152,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71544 return vm_munmap(addr, len);
71545 }
71546
71547 -static inline void verify_mm_writelocked(struct mm_struct *mm)
71548 -{
71549 -#ifdef CONFIG_DEBUG_VM
71550 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71551 - WARN_ON(1);
71552 - up_read(&mm->mmap_sem);
71553 - }
71554 -#endif
71555 -}
71556 -
71557 /*
71558 * this is really a simplified "do_mmap". it only handles
71559 * anonymous maps. eventually we may be able to do some
71560 @@ -2175,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71561 struct rb_node ** rb_link, * rb_parent;
71562 pgoff_t pgoff = addr >> PAGE_SHIFT;
71563 int error;
71564 + unsigned long charged;
71565
71566 len = PAGE_ALIGN(len);
71567 if (!len)
71568 @@ -2186,16 +2603,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71569
71570 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71571
71572 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71573 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71574 + flags &= ~VM_EXEC;
71575 +
71576 +#ifdef CONFIG_PAX_MPROTECT
71577 + if (mm->pax_flags & MF_PAX_MPROTECT)
71578 + flags &= ~VM_MAYEXEC;
71579 +#endif
71580 +
71581 + }
71582 +#endif
71583 +
71584 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71585 if (error & ~PAGE_MASK)
71586 return error;
71587
71588 + charged = len >> PAGE_SHIFT;
71589 +
71590 /*
71591 * mlock MCL_FUTURE?
71592 */
71593 if (mm->def_flags & VM_LOCKED) {
71594 unsigned long locked, lock_limit;
71595 - locked = len >> PAGE_SHIFT;
71596 + locked = charged;
71597 locked += mm->locked_vm;
71598 lock_limit = rlimit(RLIMIT_MEMLOCK);
71599 lock_limit >>= PAGE_SHIFT;
71600 @@ -2212,22 +2643,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71601 /*
71602 * Clear old maps. this also does some error checking for us
71603 */
71604 - munmap_back:
71605 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71606 if (vma && vma->vm_start < addr + len) {
71607 if (do_munmap(mm, addr, len))
71608 return -ENOMEM;
71609 - goto munmap_back;
71610 - }
71611 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71612 + BUG_ON(vma && vma->vm_start < addr + len);
71613 + }
71614
71615 /* Check against address space limits *after* clearing old maps... */
71616 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71617 + if (!may_expand_vm(mm, charged))
71618 return -ENOMEM;
71619
71620 if (mm->map_count > sysctl_max_map_count)
71621 return -ENOMEM;
71622
71623 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
71624 + if (security_vm_enough_memory_mm(mm, charged))
71625 return -ENOMEM;
71626
71627 /* Can we just expand an old private anonymous mapping? */
71628 @@ -2241,7 +2672,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71629 */
71630 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71631 if (!vma) {
71632 - vm_unacct_memory(len >> PAGE_SHIFT);
71633 + vm_unacct_memory(charged);
71634 return -ENOMEM;
71635 }
71636
71637 @@ -2255,11 +2686,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71638 vma_link(mm, vma, prev, rb_link, rb_parent);
71639 out:
71640 perf_event_mmap(vma);
71641 - mm->total_vm += len >> PAGE_SHIFT;
71642 + mm->total_vm += charged;
71643 if (flags & VM_LOCKED) {
71644 if (!mlock_vma_pages_range(vma, addr, addr + len))
71645 - mm->locked_vm += (len >> PAGE_SHIFT);
71646 + mm->locked_vm += charged;
71647 }
71648 + track_exec_limit(mm, addr, addr + len, flags);
71649 return addr;
71650 }
71651
71652 @@ -2315,8 +2747,10 @@ void exit_mmap(struct mm_struct *mm)
71653 * Walk the list again, actually closing and freeing it,
71654 * with preemption enabled, without holding any MM locks.
71655 */
71656 - while (vma)
71657 + while (vma) {
71658 + vma->vm_mirror = NULL;
71659 vma = remove_vma(vma);
71660 + }
71661
71662 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71663 }
71664 @@ -2330,6 +2764,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71665 struct vm_area_struct * __vma, * prev;
71666 struct rb_node ** rb_link, * rb_parent;
71667
71668 +#ifdef CONFIG_PAX_SEGMEXEC
71669 + struct vm_area_struct *vma_m = NULL;
71670 +#endif
71671 +
71672 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71673 + return -EPERM;
71674 +
71675 /*
71676 * The vm_pgoff of a purely anonymous vma should be irrelevant
71677 * until its first write fault, when page's anon_vma and index
71678 @@ -2352,7 +2793,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71679 if ((vma->vm_flags & VM_ACCOUNT) &&
71680 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71681 return -ENOMEM;
71682 +
71683 +#ifdef CONFIG_PAX_SEGMEXEC
71684 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71685 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71686 + if (!vma_m)
71687 + return -ENOMEM;
71688 + }
71689 +#endif
71690 +
71691 vma_link(mm, vma, prev, rb_link, rb_parent);
71692 +
71693 +#ifdef CONFIG_PAX_SEGMEXEC
71694 + if (vma_m)
71695 + BUG_ON(pax_mirror_vma(vma_m, vma));
71696 +#endif
71697 +
71698 return 0;
71699 }
71700
71701 @@ -2371,6 +2827,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71702 struct mempolicy *pol;
71703 bool faulted_in_anon_vma = true;
71704
71705 + BUG_ON(vma->vm_mirror);
71706 +
71707 /*
71708 * If anonymous vma has not yet been faulted, update new pgoff
71709 * to match new location, to increase its chance of merging.
71710 @@ -2438,6 +2896,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71711 return NULL;
71712 }
71713
71714 +#ifdef CONFIG_PAX_SEGMEXEC
71715 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71716 +{
71717 + struct vm_area_struct *prev_m;
71718 + struct rb_node **rb_link_m, *rb_parent_m;
71719 + struct mempolicy *pol_m;
71720 +
71721 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71722 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71723 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71724 + *vma_m = *vma;
71725 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71726 + if (anon_vma_clone(vma_m, vma))
71727 + return -ENOMEM;
71728 + pol_m = vma_policy(vma_m);
71729 + mpol_get(pol_m);
71730 + vma_set_policy(vma_m, pol_m);
71731 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71732 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71733 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71734 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71735 + if (vma_m->vm_file)
71736 + get_file(vma_m->vm_file);
71737 + if (vma_m->vm_ops && vma_m->vm_ops->open)
71738 + vma_m->vm_ops->open(vma_m);
71739 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71740 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71741 + vma_m->vm_mirror = vma;
71742 + vma->vm_mirror = vma_m;
71743 + return 0;
71744 +}
71745 +#endif
71746 +
71747 /*
71748 * Return true if the calling process may expand its vm space by the passed
71749 * number of pages
71750 @@ -2449,6 +2940,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71751
71752 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71753
71754 +#ifdef CONFIG_PAX_RANDMMAP
71755 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71756 + cur -= mm->brk_gap;
71757 +#endif
71758 +
71759 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71760 if (cur + npages > lim)
71761 return 0;
71762 return 1;
71763 @@ -2519,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
71764 vma->vm_start = addr;
71765 vma->vm_end = addr + len;
71766
71767 +#ifdef CONFIG_PAX_MPROTECT
71768 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71769 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71770 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71771 + return -EPERM;
71772 + if (!(vm_flags & VM_EXEC))
71773 + vm_flags &= ~VM_MAYEXEC;
71774 +#else
71775 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71776 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71777 +#endif
71778 + else
71779 + vm_flags &= ~VM_MAYWRITE;
71780 + }
71781 +#endif
71782 +
71783 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71784 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71785
71786 diff --git a/mm/mprotect.c b/mm/mprotect.c
71787 index a409926..8b32e6d 100644
71788 --- a/mm/mprotect.c
71789 +++ b/mm/mprotect.c
71790 @@ -23,10 +23,17 @@
71791 #include <linux/mmu_notifier.h>
71792 #include <linux/migrate.h>
71793 #include <linux/perf_event.h>
71794 +
71795 +#ifdef CONFIG_PAX_MPROTECT
71796 +#include <linux/elf.h>
71797 +#include <linux/binfmts.h>
71798 +#endif
71799 +
71800 #include <asm/uaccess.h>
71801 #include <asm/pgtable.h>
71802 #include <asm/cacheflush.h>
71803 #include <asm/tlbflush.h>
71804 +#include <asm/mmu_context.h>
71805
71806 #ifndef pgprot_modify
71807 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71808 @@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
71809 flush_tlb_range(vma, start, end);
71810 }
71811
71812 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71813 +/* called while holding the mmap semaphor for writing except stack expansion */
71814 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71815 +{
71816 + unsigned long oldlimit, newlimit = 0UL;
71817 +
71818 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71819 + return;
71820 +
71821 + spin_lock(&mm->page_table_lock);
71822 + oldlimit = mm->context.user_cs_limit;
71823 + if ((prot & VM_EXEC) && oldlimit < end)
71824 + /* USER_CS limit moved up */
71825 + newlimit = end;
71826 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71827 + /* USER_CS limit moved down */
71828 + newlimit = start;
71829 +
71830 + if (newlimit) {
71831 + mm->context.user_cs_limit = newlimit;
71832 +
71833 +#ifdef CONFIG_SMP
71834 + wmb();
71835 + cpus_clear(mm->context.cpu_user_cs_mask);
71836 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71837 +#endif
71838 +
71839 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71840 + }
71841 + spin_unlock(&mm->page_table_lock);
71842 + if (newlimit == end) {
71843 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
71844 +
71845 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
71846 + if (is_vm_hugetlb_page(vma))
71847 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
71848 + else
71849 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
71850 + }
71851 +}
71852 +#endif
71853 +
71854 int
71855 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71856 unsigned long start, unsigned long end, unsigned long newflags)
71857 @@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71858 int error;
71859 int dirty_accountable = 0;
71860
71861 +#ifdef CONFIG_PAX_SEGMEXEC
71862 + struct vm_area_struct *vma_m = NULL;
71863 + unsigned long start_m, end_m;
71864 +
71865 + start_m = start + SEGMEXEC_TASK_SIZE;
71866 + end_m = end + SEGMEXEC_TASK_SIZE;
71867 +#endif
71868 +
71869 if (newflags == oldflags) {
71870 *pprev = vma;
71871 return 0;
71872 }
71873
71874 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
71875 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
71876 +
71877 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
71878 + return -ENOMEM;
71879 +
71880 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
71881 + return -ENOMEM;
71882 + }
71883 +
71884 /*
71885 * If we make a private mapping writable we increase our commit;
71886 * but (without finer accounting) cannot reduce our commit if we
71887 @@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71888 }
71889 }
71890
71891 +#ifdef CONFIG_PAX_SEGMEXEC
71892 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
71893 + if (start != vma->vm_start) {
71894 + error = split_vma(mm, vma, start, 1);
71895 + if (error)
71896 + goto fail;
71897 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
71898 + *pprev = (*pprev)->vm_next;
71899 + }
71900 +
71901 + if (end != vma->vm_end) {
71902 + error = split_vma(mm, vma, end, 0);
71903 + if (error)
71904 + goto fail;
71905 + }
71906 +
71907 + if (pax_find_mirror_vma(vma)) {
71908 + error = __do_munmap(mm, start_m, end_m - start_m);
71909 + if (error)
71910 + goto fail;
71911 + } else {
71912 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71913 + if (!vma_m) {
71914 + error = -ENOMEM;
71915 + goto fail;
71916 + }
71917 + vma->vm_flags = newflags;
71918 + error = pax_mirror_vma(vma_m, vma);
71919 + if (error) {
71920 + vma->vm_flags = oldflags;
71921 + goto fail;
71922 + }
71923 + }
71924 + }
71925 +#endif
71926 +
71927 /*
71928 * First try to merge with previous and/or next vma.
71929 */
71930 @@ -204,9 +307,21 @@ success:
71931 * vm_flags and vm_page_prot are protected by the mmap_sem
71932 * held in write mode.
71933 */
71934 +
71935 +#ifdef CONFIG_PAX_SEGMEXEC
71936 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
71937 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
71938 +#endif
71939 +
71940 vma->vm_flags = newflags;
71941 +
71942 +#ifdef CONFIG_PAX_MPROTECT
71943 + if (mm->binfmt && mm->binfmt->handle_mprotect)
71944 + mm->binfmt->handle_mprotect(vma, newflags);
71945 +#endif
71946 +
71947 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
71948 - vm_get_page_prot(newflags));
71949 + vm_get_page_prot(vma->vm_flags));
71950
71951 if (vma_wants_writenotify(vma)) {
71952 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
71953 @@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71954 end = start + len;
71955 if (end <= start)
71956 return -ENOMEM;
71957 +
71958 +#ifdef CONFIG_PAX_SEGMEXEC
71959 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71960 + if (end > SEGMEXEC_TASK_SIZE)
71961 + return -EINVAL;
71962 + } else
71963 +#endif
71964 +
71965 + if (end > TASK_SIZE)
71966 + return -EINVAL;
71967 +
71968 if (!arch_validate_prot(prot))
71969 return -EINVAL;
71970
71971 @@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71972 /*
71973 * Does the application expect PROT_READ to imply PROT_EXEC:
71974 */
71975 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71976 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71977 prot |= PROT_EXEC;
71978
71979 vm_flags = calc_vm_prot_bits(prot);
71980 @@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71981 if (start > vma->vm_start)
71982 prev = vma;
71983
71984 +#ifdef CONFIG_PAX_MPROTECT
71985 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
71986 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
71987 +#endif
71988 +
71989 for (nstart = start ; ; ) {
71990 unsigned long newflags;
71991
71992 @@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71993
71994 /* newflags >> 4 shift VM_MAY% in place of VM_% */
71995 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
71996 + if (prot & (PROT_WRITE | PROT_EXEC))
71997 + gr_log_rwxmprotect(vma->vm_file);
71998 +
71999 + error = -EACCES;
72000 + goto out;
72001 + }
72002 +
72003 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72004 error = -EACCES;
72005 goto out;
72006 }
72007 @@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72008 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72009 if (error)
72010 goto out;
72011 +
72012 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72013 +
72014 nstart = tmp;
72015
72016 if (nstart < prev->vm_end)
72017 diff --git a/mm/mremap.c b/mm/mremap.c
72018 index db8d983..76506cb 100644
72019 --- a/mm/mremap.c
72020 +++ b/mm/mremap.c
72021 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72022 continue;
72023 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72024 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72025 +
72026 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72027 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72028 + pte = pte_exprotect(pte);
72029 +#endif
72030 +
72031 set_pte_at(mm, new_addr, new_pte, pte);
72032 }
72033
72034 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72035 if (is_vm_hugetlb_page(vma))
72036 goto Einval;
72037
72038 +#ifdef CONFIG_PAX_SEGMEXEC
72039 + if (pax_find_mirror_vma(vma))
72040 + goto Einval;
72041 +#endif
72042 +
72043 /* We can't remap across vm area boundaries */
72044 if (old_len > vma->vm_end - addr)
72045 goto Efault;
72046 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72047 unsigned long ret = -EINVAL;
72048 unsigned long charged = 0;
72049 unsigned long map_flags;
72050 + unsigned long pax_task_size = TASK_SIZE;
72051
72052 if (new_addr & ~PAGE_MASK)
72053 goto out;
72054
72055 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72056 +#ifdef CONFIG_PAX_SEGMEXEC
72057 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72058 + pax_task_size = SEGMEXEC_TASK_SIZE;
72059 +#endif
72060 +
72061 + pax_task_size -= PAGE_SIZE;
72062 +
72063 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72064 goto out;
72065
72066 /* Check if the location we're moving into overlaps the
72067 * old location at all, and fail if it does.
72068 */
72069 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72070 - goto out;
72071 -
72072 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72073 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72074 goto out;
72075
72076 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72077 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72078 struct vm_area_struct *vma;
72079 unsigned long ret = -EINVAL;
72080 unsigned long charged = 0;
72081 + unsigned long pax_task_size = TASK_SIZE;
72082
72083 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72084 goto out;
72085 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72086 if (!new_len)
72087 goto out;
72088
72089 +#ifdef CONFIG_PAX_SEGMEXEC
72090 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72091 + pax_task_size = SEGMEXEC_TASK_SIZE;
72092 +#endif
72093 +
72094 + pax_task_size -= PAGE_SIZE;
72095 +
72096 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72097 + old_len > pax_task_size || addr > pax_task_size-old_len)
72098 + goto out;
72099 +
72100 if (flags & MREMAP_FIXED) {
72101 if (flags & MREMAP_MAYMOVE)
72102 ret = mremap_to(addr, old_len, new_addr, new_len);
72103 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72104 addr + new_len);
72105 }
72106 ret = addr;
72107 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72108 goto out;
72109 }
72110 }
72111 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72112 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72113 if (ret)
72114 goto out;
72115 +
72116 + map_flags = vma->vm_flags;
72117 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72118 + if (!(ret & ~PAGE_MASK)) {
72119 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72120 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72121 + }
72122 }
72123 out:
72124 if (ret & ~PAGE_MASK)
72125 diff --git a/mm/nommu.c b/mm/nommu.c
72126 index bb8f4f0..40d3e02 100644
72127 --- a/mm/nommu.c
72128 +++ b/mm/nommu.c
72129 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72130 int sysctl_overcommit_ratio = 50; /* default is 50% */
72131 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72132 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72133 -int heap_stack_gap = 0;
72134
72135 atomic_long_t mmap_pages_allocated;
72136
72137 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72138 EXPORT_SYMBOL(find_vma);
72139
72140 /*
72141 - * find a VMA
72142 - * - we don't extend stack VMAs under NOMMU conditions
72143 - */
72144 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72145 -{
72146 - return find_vma(mm, addr);
72147 -}
72148 -
72149 -/*
72150 * expand a stack to a given address
72151 * - not supported under NOMMU conditions
72152 */
72153 @@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72154
72155 /* most fields are the same, copy all, and then fixup */
72156 *new = *vma;
72157 + INIT_LIST_HEAD(&new->anon_vma_chain);
72158 *region = *vma->vm_region;
72159 new->vm_region = region;
72160
72161 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72162 index 918330f..ae99ae1 100644
72163 --- a/mm/page_alloc.c
72164 +++ b/mm/page_alloc.c
72165 @@ -335,7 +335,7 @@ out:
72166 * This usage means that zero-order pages may not be compound.
72167 */
72168
72169 -static void free_compound_page(struct page *page)
72170 +void free_compound_page(struct page *page)
72171 {
72172 __free_pages_ok(page, compound_order(page));
72173 }
72174 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72175 int i;
72176 int bad = 0;
72177
72178 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72179 + unsigned long index = 1UL << order;
72180 +#endif
72181 +
72182 trace_mm_page_free(page, order);
72183 kmemcheck_free_shadow(page, order);
72184
72185 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72186 debug_check_no_obj_freed(page_address(page),
72187 PAGE_SIZE << order);
72188 }
72189 +
72190 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72191 + for (; index; --index)
72192 + sanitize_highpage(page + index - 1);
72193 +#endif
72194 +
72195 arch_free_page(page, order);
72196 kernel_map_pages(page, 1 << order, 0);
72197
72198 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72199 arch_alloc_page(page, order);
72200 kernel_map_pages(page, 1 << order, 1);
72201
72202 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
72203 if (gfp_flags & __GFP_ZERO)
72204 prep_zero_page(page, order, gfp_flags);
72205 +#endif
72206
72207 if (order && (gfp_flags & __GFP_COMP))
72208 prep_compound_page(page, order);
72209 @@ -3523,7 +3535,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72210 unsigned long pfn;
72211
72212 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72213 +#ifdef CONFIG_X86_32
72214 + /* boot failures in VMware 8 on 32bit vanilla since
72215 + this change */
72216 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72217 +#else
72218 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72219 +#endif
72220 return 1;
72221 }
72222 return 0;
72223 diff --git a/mm/percpu.c b/mm/percpu.c
72224 index bb4be74..a43ea85 100644
72225 --- a/mm/percpu.c
72226 +++ b/mm/percpu.c
72227 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
72228 static unsigned int pcpu_high_unit_cpu __read_mostly;
72229
72230 /* the address of the first chunk which starts with the kernel static area */
72231 -void *pcpu_base_addr __read_mostly;
72232 +void *pcpu_base_addr __read_only;
72233 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72234
72235 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72236 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
72237 index c20ff48..137702a 100644
72238 --- a/mm/process_vm_access.c
72239 +++ b/mm/process_vm_access.c
72240 @@ -13,6 +13,7 @@
72241 #include <linux/uio.h>
72242 #include <linux/sched.h>
72243 #include <linux/highmem.h>
72244 +#include <linux/security.h>
72245 #include <linux/ptrace.h>
72246 #include <linux/slab.h>
72247 #include <linux/syscalls.h>
72248 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72249 size_t iov_l_curr_offset = 0;
72250 ssize_t iov_len;
72251
72252 + return -ENOSYS; // PaX: until properly audited
72253 +
72254 /*
72255 * Work out how many pages of struct pages we're going to need
72256 * when eventually calling get_user_pages
72257 */
72258 for (i = 0; i < riovcnt; i++) {
72259 iov_len = rvec[i].iov_len;
72260 - if (iov_len > 0) {
72261 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
72262 - + iov_len)
72263 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
72264 - / PAGE_SIZE + 1;
72265 - nr_pages = max(nr_pages, nr_pages_iov);
72266 - }
72267 + if (iov_len <= 0)
72268 + continue;
72269 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
72270 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
72271 + nr_pages = max(nr_pages, nr_pages_iov);
72272 }
72273
72274 if (nr_pages == 0)
72275 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72276 goto free_proc_pages;
72277 }
72278
72279 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
72280 + rc = -EPERM;
72281 + goto put_task_struct;
72282 + }
72283 +
72284 mm = mm_access(task, PTRACE_MODE_ATTACH);
72285 if (!mm || IS_ERR(mm)) {
72286 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
72287 diff --git a/mm/rmap.c b/mm/rmap.c
72288 index 5b5ad58..0f77903 100644
72289 --- a/mm/rmap.c
72290 +++ b/mm/rmap.c
72291 @@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72292 struct anon_vma *anon_vma = vma->anon_vma;
72293 struct anon_vma_chain *avc;
72294
72295 +#ifdef CONFIG_PAX_SEGMEXEC
72296 + struct anon_vma_chain *avc_m = NULL;
72297 +#endif
72298 +
72299 might_sleep();
72300 if (unlikely(!anon_vma)) {
72301 struct mm_struct *mm = vma->vm_mm;
72302 @@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72303 if (!avc)
72304 goto out_enomem;
72305
72306 +#ifdef CONFIG_PAX_SEGMEXEC
72307 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72308 + if (!avc_m)
72309 + goto out_enomem_free_avc;
72310 +#endif
72311 +
72312 anon_vma = find_mergeable_anon_vma(vma);
72313 allocated = NULL;
72314 if (!anon_vma) {
72315 @@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72316 /* page_table_lock to protect against threads */
72317 spin_lock(&mm->page_table_lock);
72318 if (likely(!vma->anon_vma)) {
72319 +
72320 +#ifdef CONFIG_PAX_SEGMEXEC
72321 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72322 +
72323 + if (vma_m) {
72324 + BUG_ON(vma_m->anon_vma);
72325 + vma_m->anon_vma = anon_vma;
72326 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
72327 + avc_m = NULL;
72328 + }
72329 +#endif
72330 +
72331 vma->anon_vma = anon_vma;
72332 anon_vma_chain_link(vma, avc, anon_vma);
72333 allocated = NULL;
72334 @@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72335
72336 if (unlikely(allocated))
72337 put_anon_vma(allocated);
72338 +
72339 +#ifdef CONFIG_PAX_SEGMEXEC
72340 + if (unlikely(avc_m))
72341 + anon_vma_chain_free(avc_m);
72342 +#endif
72343 +
72344 if (unlikely(avc))
72345 anon_vma_chain_free(avc);
72346 }
72347 return 0;
72348
72349 out_enomem_free_avc:
72350 +
72351 +#ifdef CONFIG_PAX_SEGMEXEC
72352 + if (avc_m)
72353 + anon_vma_chain_free(avc_m);
72354 +#endif
72355 +
72356 anon_vma_chain_free(avc);
72357 out_enomem:
72358 return -ENOMEM;
72359 @@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
72360 * Attach the anon_vmas from src to dst.
72361 * Returns 0 on success, -ENOMEM on failure.
72362 */
72363 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72364 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72365 {
72366 struct anon_vma_chain *avc, *pavc;
72367 struct anon_vma *root = NULL;
72368 @@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
72369 * the corresponding VMA in the parent process is attached to.
72370 * Returns 0 on success, non-zero on failure.
72371 */
72372 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72373 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72374 {
72375 struct anon_vma_chain *avc;
72376 struct anon_vma *anon_vma;
72377 diff --git a/mm/shmem.c b/mm/shmem.c
72378 index f99ff3e..faea8b6 100644
72379 --- a/mm/shmem.c
72380 +++ b/mm/shmem.c
72381 @@ -31,7 +31,7 @@
72382 #include <linux/export.h>
72383 #include <linux/swap.h>
72384
72385 -static struct vfsmount *shm_mnt;
72386 +struct vfsmount *shm_mnt;
72387
72388 #ifdef CONFIG_SHMEM
72389 /*
72390 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72391 #define BOGO_DIRENT_SIZE 20
72392
72393 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72394 -#define SHORT_SYMLINK_LEN 128
72395 +#define SHORT_SYMLINK_LEN 64
72396
72397 struct shmem_xattr {
72398 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72399 @@ -2235,8 +2235,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72400 int err = -ENOMEM;
72401
72402 /* Round up to L1_CACHE_BYTES to resist false sharing */
72403 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72404 - L1_CACHE_BYTES), GFP_KERNEL);
72405 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72406 if (!sbinfo)
72407 return -ENOMEM;
72408
72409 diff --git a/mm/slab.c b/mm/slab.c
72410 index e901a36..ee8fe97 100644
72411 --- a/mm/slab.c
72412 +++ b/mm/slab.c
72413 @@ -153,7 +153,7 @@
72414
72415 /* Legal flag mask for kmem_cache_create(). */
72416 #if DEBUG
72417 -# define CREATE_MASK (SLAB_RED_ZONE | \
72418 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72419 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72420 SLAB_CACHE_DMA | \
72421 SLAB_STORE_USER | \
72422 @@ -161,7 +161,7 @@
72423 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72424 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72425 #else
72426 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72427 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72428 SLAB_CACHE_DMA | \
72429 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72430 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72431 @@ -290,7 +290,7 @@ struct kmem_list3 {
72432 * Need this for bootstrapping a per node allocator.
72433 */
72434 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72435 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72436 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72437 #define CACHE_CACHE 0
72438 #define SIZE_AC MAX_NUMNODES
72439 #define SIZE_L3 (2 * MAX_NUMNODES)
72440 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72441 if ((x)->max_freeable < i) \
72442 (x)->max_freeable = i; \
72443 } while (0)
72444 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72445 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72446 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72447 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72448 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72449 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72450 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72451 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72452 #else
72453 #define STATS_INC_ACTIVE(x) do { } while (0)
72454 #define STATS_DEC_ACTIVE(x) do { } while (0)
72455 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72456 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72457 */
72458 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72459 - const struct slab *slab, void *obj)
72460 + const struct slab *slab, const void *obj)
72461 {
72462 u32 offset = (obj - slab->s_mem);
72463 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72464 @@ -568,7 +568,7 @@ struct cache_names {
72465 static struct cache_names __initdata cache_names[] = {
72466 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72467 #include <linux/kmalloc_sizes.h>
72468 - {NULL,}
72469 + {NULL}
72470 #undef CACHE
72471 };
72472
72473 @@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
72474 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72475 sizes[INDEX_AC].cs_size,
72476 ARCH_KMALLOC_MINALIGN,
72477 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72478 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72479 NULL);
72480
72481 if (INDEX_AC != INDEX_L3) {
72482 @@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
72483 kmem_cache_create(names[INDEX_L3].name,
72484 sizes[INDEX_L3].cs_size,
72485 ARCH_KMALLOC_MINALIGN,
72486 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72487 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72488 NULL);
72489 }
72490
72491 @@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
72492 sizes->cs_cachep = kmem_cache_create(names->name,
72493 sizes->cs_size,
72494 ARCH_KMALLOC_MINALIGN,
72495 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72496 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72497 NULL);
72498 }
72499 #ifdef CONFIG_ZONE_DMA
72500 @@ -4390,10 +4390,10 @@ static int s_show(struct seq_file *m, void *p)
72501 }
72502 /* cpu stats */
72503 {
72504 - unsigned long allochit = atomic_read(&cachep->allochit);
72505 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72506 - unsigned long freehit = atomic_read(&cachep->freehit);
72507 - unsigned long freemiss = atomic_read(&cachep->freemiss);
72508 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72509 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72510 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72511 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72512
72513 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72514 allochit, allocmiss, freehit, freemiss);
72515 @@ -4652,13 +4652,62 @@ static int __init slab_proc_init(void)
72516 {
72517 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72518 #ifdef CONFIG_DEBUG_SLAB_LEAK
72519 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72520 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72521 #endif
72522 return 0;
72523 }
72524 module_init(slab_proc_init);
72525 #endif
72526
72527 +void check_object_size(const void *ptr, unsigned long n, bool to)
72528 +{
72529 +
72530 +#ifdef CONFIG_PAX_USERCOPY
72531 + struct page *page;
72532 + struct kmem_cache *cachep = NULL;
72533 + struct slab *slabp;
72534 + unsigned int objnr;
72535 + unsigned long offset;
72536 + const char *type;
72537 +
72538 + if (!n)
72539 + return;
72540 +
72541 + type = "<null>";
72542 + if (ZERO_OR_NULL_PTR(ptr))
72543 + goto report;
72544 +
72545 + if (!virt_addr_valid(ptr))
72546 + return;
72547 +
72548 + page = virt_to_head_page(ptr);
72549 +
72550 + type = "<process stack>";
72551 + if (!PageSlab(page)) {
72552 + if (object_is_on_stack(ptr, n) == -1)
72553 + goto report;
72554 + return;
72555 + }
72556 +
72557 + cachep = page_get_cache(page);
72558 + type = cachep->name;
72559 + if (!(cachep->flags & SLAB_USERCOPY))
72560 + goto report;
72561 +
72562 + slabp = page_get_slab(page);
72563 + objnr = obj_to_index(cachep, slabp, ptr);
72564 + BUG_ON(objnr >= cachep->num);
72565 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72566 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72567 + return;
72568 +
72569 +report:
72570 + pax_report_usercopy(ptr, n, to, type);
72571 +#endif
72572 +
72573 +}
72574 +EXPORT_SYMBOL(check_object_size);
72575 +
72576 /**
72577 * ksize - get the actual amount of memory allocated for a given object
72578 * @objp: Pointer to the object
72579 diff --git a/mm/slob.c b/mm/slob.c
72580 index 8105be4..e045f96 100644
72581 --- a/mm/slob.c
72582 +++ b/mm/slob.c
72583 @@ -29,7 +29,7 @@
72584 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72585 * alloc_pages() directly, allocating compound pages so the page order
72586 * does not have to be separately tracked, and also stores the exact
72587 - * allocation size in page->private so that it can be used to accurately
72588 + * allocation size in slob_page->size so that it can be used to accurately
72589 * provide ksize(). These objects are detected in kfree() because slob_page()
72590 * is false for them.
72591 *
72592 @@ -58,6 +58,7 @@
72593 */
72594
72595 #include <linux/kernel.h>
72596 +#include <linux/sched.h>
72597 #include <linux/slab.h>
72598 #include <linux/mm.h>
72599 #include <linux/swap.h> /* struct reclaim_state */
72600 @@ -102,7 +103,8 @@ struct slob_page {
72601 unsigned long flags; /* mandatory */
72602 atomic_t _count; /* mandatory */
72603 slobidx_t units; /* free units left in page */
72604 - unsigned long pad[2];
72605 + unsigned long pad[1];
72606 + unsigned long size; /* size when >=PAGE_SIZE */
72607 slob_t *free; /* first free slob_t in page */
72608 struct list_head list; /* linked list of free pages */
72609 };
72610 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72611 */
72612 static inline int is_slob_page(struct slob_page *sp)
72613 {
72614 - return PageSlab((struct page *)sp);
72615 + return PageSlab((struct page *)sp) && !sp->size;
72616 }
72617
72618 static inline void set_slob_page(struct slob_page *sp)
72619 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72620
72621 static inline struct slob_page *slob_page(const void *addr)
72622 {
72623 - return (struct slob_page *)virt_to_page(addr);
72624 + return (struct slob_page *)virt_to_head_page(addr);
72625 }
72626
72627 /*
72628 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72629 /*
72630 * Return the size of a slob block.
72631 */
72632 -static slobidx_t slob_units(slob_t *s)
72633 +static slobidx_t slob_units(const slob_t *s)
72634 {
72635 if (s->units > 0)
72636 return s->units;
72637 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72638 /*
72639 * Return the next free slob block pointer after this one.
72640 */
72641 -static slob_t *slob_next(slob_t *s)
72642 +static slob_t *slob_next(const slob_t *s)
72643 {
72644 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72645 slobidx_t next;
72646 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72647 /*
72648 * Returns true if s is the last free block in its page.
72649 */
72650 -static int slob_last(slob_t *s)
72651 +static int slob_last(const slob_t *s)
72652 {
72653 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72654 }
72655 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72656 if (!page)
72657 return NULL;
72658
72659 + set_slob_page(page);
72660 return page_address(page);
72661 }
72662
72663 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72664 if (!b)
72665 return NULL;
72666 sp = slob_page(b);
72667 - set_slob_page(sp);
72668
72669 spin_lock_irqsave(&slob_lock, flags);
72670 sp->units = SLOB_UNITS(PAGE_SIZE);
72671 sp->free = b;
72672 + sp->size = 0;
72673 INIT_LIST_HEAD(&sp->list);
72674 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72675 set_slob_page_free(sp, slob_list);
72676 @@ -476,10 +479,9 @@ out:
72677 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72678 */
72679
72680 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72681 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72682 {
72683 - unsigned int *m;
72684 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72685 + slob_t *m;
72686 void *ret;
72687
72688 gfp &= gfp_allowed_mask;
72689 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72690
72691 if (!m)
72692 return NULL;
72693 - *m = size;
72694 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72695 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72696 + m[0].units = size;
72697 + m[1].units = align;
72698 ret = (void *)m + align;
72699
72700 trace_kmalloc_node(_RET_IP_, ret,
72701 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72702 gfp |= __GFP_COMP;
72703 ret = slob_new_pages(gfp, order, node);
72704 if (ret) {
72705 - struct page *page;
72706 - page = virt_to_page(ret);
72707 - page->private = size;
72708 + struct slob_page *sp;
72709 + sp = slob_page(ret);
72710 + sp->size = size;
72711 }
72712
72713 trace_kmalloc_node(_RET_IP_, ret,
72714 size, PAGE_SIZE << order, gfp, node);
72715 }
72716
72717 - kmemleak_alloc(ret, size, 1, gfp);
72718 + return ret;
72719 +}
72720 +
72721 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72722 +{
72723 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72724 + void *ret = __kmalloc_node_align(size, gfp, node, align);
72725 +
72726 + if (!ZERO_OR_NULL_PTR(ret))
72727 + kmemleak_alloc(ret, size, 1, gfp);
72728 return ret;
72729 }
72730 EXPORT_SYMBOL(__kmalloc_node);
72731 @@ -533,13 +547,92 @@ void kfree(const void *block)
72732 sp = slob_page(block);
72733 if (is_slob_page(sp)) {
72734 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72735 - unsigned int *m = (unsigned int *)(block - align);
72736 - slob_free(m, *m + align);
72737 - } else
72738 + slob_t *m = (slob_t *)(block - align);
72739 + slob_free(m, m[0].units + align);
72740 + } else {
72741 + clear_slob_page(sp);
72742 + free_slob_page(sp);
72743 + sp->size = 0;
72744 put_page(&sp->page);
72745 + }
72746 }
72747 EXPORT_SYMBOL(kfree);
72748
72749 +void check_object_size(const void *ptr, unsigned long n, bool to)
72750 +{
72751 +
72752 +#ifdef CONFIG_PAX_USERCOPY
72753 + struct slob_page *sp;
72754 + const slob_t *free;
72755 + const void *base;
72756 + unsigned long flags;
72757 + const char *type;
72758 +
72759 + if (!n)
72760 + return;
72761 +
72762 + type = "<null>";
72763 + if (ZERO_OR_NULL_PTR(ptr))
72764 + goto report;
72765 +
72766 + if (!virt_addr_valid(ptr))
72767 + return;
72768 +
72769 + type = "<process stack>";
72770 + sp = slob_page(ptr);
72771 + if (!PageSlab((struct page *)sp)) {
72772 + if (object_is_on_stack(ptr, n) == -1)
72773 + goto report;
72774 + return;
72775 + }
72776 +
72777 + type = "<slob>";
72778 + if (sp->size) {
72779 + base = page_address(&sp->page);
72780 + if (base <= ptr && n <= sp->size - (ptr - base))
72781 + return;
72782 + goto report;
72783 + }
72784 +
72785 + /* some tricky double walking to find the chunk */
72786 + spin_lock_irqsave(&slob_lock, flags);
72787 + base = (void *)((unsigned long)ptr & PAGE_MASK);
72788 + free = sp->free;
72789 +
72790 + while (!slob_last(free) && (void *)free <= ptr) {
72791 + base = free + slob_units(free);
72792 + free = slob_next(free);
72793 + }
72794 +
72795 + while (base < (void *)free) {
72796 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72797 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
72798 + int offset;
72799 +
72800 + if (ptr < base + align)
72801 + break;
72802 +
72803 + offset = ptr - base - align;
72804 + if (offset >= m) {
72805 + base += size;
72806 + continue;
72807 + }
72808 +
72809 + if (n > m - offset)
72810 + break;
72811 +
72812 + spin_unlock_irqrestore(&slob_lock, flags);
72813 + return;
72814 + }
72815 +
72816 + spin_unlock_irqrestore(&slob_lock, flags);
72817 +report:
72818 + pax_report_usercopy(ptr, n, to, type);
72819 +#endif
72820 +
72821 +}
72822 +EXPORT_SYMBOL(check_object_size);
72823 +
72824 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72825 size_t ksize(const void *block)
72826 {
72827 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
72828 sp = slob_page(block);
72829 if (is_slob_page(sp)) {
72830 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72831 - unsigned int *m = (unsigned int *)(block - align);
72832 - return SLOB_UNITS(*m) * SLOB_UNIT;
72833 + slob_t *m = (slob_t *)(block - align);
72834 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72835 } else
72836 - return sp->page.private;
72837 + return sp->size;
72838 }
72839 EXPORT_SYMBOL(ksize);
72840
72841 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72842 {
72843 struct kmem_cache *c;
72844
72845 +#ifdef CONFIG_PAX_USERCOPY
72846 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
72847 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
72848 +#else
72849 c = slob_alloc(sizeof(struct kmem_cache),
72850 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
72851 +#endif
72852
72853 if (c) {
72854 c->name = name;
72855 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
72856
72857 lockdep_trace_alloc(flags);
72858
72859 +#ifdef CONFIG_PAX_USERCOPY
72860 + b = __kmalloc_node_align(c->size, flags, node, c->align);
72861 +#else
72862 if (c->size < PAGE_SIZE) {
72863 b = slob_alloc(c->size, flags, c->align, node);
72864 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72865 SLOB_UNITS(c->size) * SLOB_UNIT,
72866 flags, node);
72867 } else {
72868 + struct slob_page *sp;
72869 +
72870 b = slob_new_pages(flags, get_order(c->size), node);
72871 + sp = slob_page(b);
72872 + sp->size = c->size;
72873 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72874 PAGE_SIZE << get_order(c->size),
72875 flags, node);
72876 }
72877 +#endif
72878
72879 if (c->ctor)
72880 c->ctor(b);
72881 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
72882
72883 static void __kmem_cache_free(void *b, int size)
72884 {
72885 - if (size < PAGE_SIZE)
72886 + struct slob_page *sp = slob_page(b);
72887 +
72888 + if (is_slob_page(sp))
72889 slob_free(b, size);
72890 - else
72891 + else {
72892 + clear_slob_page(sp);
72893 + free_slob_page(sp);
72894 + sp->size = 0;
72895 slob_free_pages(b, get_order(size));
72896 + }
72897 }
72898
72899 static void kmem_rcu_free(struct rcu_head *head)
72900 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
72901
72902 void kmem_cache_free(struct kmem_cache *c, void *b)
72903 {
72904 + int size = c->size;
72905 +
72906 +#ifdef CONFIG_PAX_USERCOPY
72907 + if (size + c->align < PAGE_SIZE) {
72908 + size += c->align;
72909 + b -= c->align;
72910 + }
72911 +#endif
72912 +
72913 kmemleak_free_recursive(b, c->flags);
72914 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
72915 struct slob_rcu *slob_rcu;
72916 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
72917 - slob_rcu->size = c->size;
72918 + slob_rcu = b + (size - sizeof(struct slob_rcu));
72919 + slob_rcu->size = size;
72920 call_rcu(&slob_rcu->head, kmem_rcu_free);
72921 } else {
72922 - __kmem_cache_free(b, c->size);
72923 + __kmem_cache_free(b, size);
72924 }
72925
72926 +#ifdef CONFIG_PAX_USERCOPY
72927 + trace_kfree(_RET_IP_, b);
72928 +#else
72929 trace_kmem_cache_free(_RET_IP_, b);
72930 +#endif
72931 +
72932 }
72933 EXPORT_SYMBOL(kmem_cache_free);
72934
72935 diff --git a/mm/slub.c b/mm/slub.c
72936 index 71de9b5..dd263c5 100644
72937 --- a/mm/slub.c
72938 +++ b/mm/slub.c
72939 @@ -209,7 +209,7 @@ struct track {
72940
72941 enum track_item { TRACK_ALLOC, TRACK_FREE };
72942
72943 -#ifdef CONFIG_SYSFS
72944 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72945 static int sysfs_slab_add(struct kmem_cache *);
72946 static int sysfs_slab_alias(struct kmem_cache *, const char *);
72947 static void sysfs_slab_remove(struct kmem_cache *);
72948 @@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
72949 if (!t->addr)
72950 return;
72951
72952 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
72953 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
72954 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
72955 #ifdef CONFIG_STACKTRACE
72956 {
72957 @@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
72958
72959 page = virt_to_head_page(x);
72960
72961 + BUG_ON(!PageSlab(page));
72962 +
72963 slab_free(s, page, x, _RET_IP_);
72964
72965 trace_kmem_cache_free(_RET_IP_, x);
72966 @@ -2636,7 +2638,7 @@ static int slub_min_objects;
72967 * Merge control. If this is set then no merging of slab caches will occur.
72968 * (Could be removed. This was introduced to pacify the merge skeptics.)
72969 */
72970 -static int slub_nomerge;
72971 +static int slub_nomerge = 1;
72972
72973 /*
72974 * Calculate the order of allocation given an slab object size.
72975 @@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
72976 else
72977 s->cpu_partial = 30;
72978
72979 - s->refcount = 1;
72980 + atomic_set(&s->refcount, 1);
72981 #ifdef CONFIG_NUMA
72982 s->remote_node_defrag_ratio = 1000;
72983 #endif
72984 @@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
72985 void kmem_cache_destroy(struct kmem_cache *s)
72986 {
72987 down_write(&slub_lock);
72988 - s->refcount--;
72989 - if (!s->refcount) {
72990 + if (atomic_dec_and_test(&s->refcount)) {
72991 list_del(&s->list);
72992 up_write(&slub_lock);
72993 if (kmem_cache_close(s)) {
72994 @@ -3405,6 +3406,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
72995 EXPORT_SYMBOL(__kmalloc_node);
72996 #endif
72997
72998 +void check_object_size(const void *ptr, unsigned long n, bool to)
72999 +{
73000 +
73001 +#ifdef CONFIG_PAX_USERCOPY
73002 + struct page *page;
73003 + struct kmem_cache *s = NULL;
73004 + unsigned long offset;
73005 + const char *type;
73006 +
73007 + if (!n)
73008 + return;
73009 +
73010 + type = "<null>";
73011 + if (ZERO_OR_NULL_PTR(ptr))
73012 + goto report;
73013 +
73014 + if (!virt_addr_valid(ptr))
73015 + return;
73016 +
73017 + page = virt_to_head_page(ptr);
73018 +
73019 + type = "<process stack>";
73020 + if (!PageSlab(page)) {
73021 + if (object_is_on_stack(ptr, n) == -1)
73022 + goto report;
73023 + return;
73024 + }
73025 +
73026 + s = page->slab;
73027 + type = s->name;
73028 + if (!(s->flags & SLAB_USERCOPY))
73029 + goto report;
73030 +
73031 + offset = (ptr - page_address(page)) % s->size;
73032 + if (offset <= s->objsize && n <= s->objsize - offset)
73033 + return;
73034 +
73035 +report:
73036 + pax_report_usercopy(ptr, n, to, type);
73037 +#endif
73038 +
73039 +}
73040 +EXPORT_SYMBOL(check_object_size);
73041 +
73042 size_t ksize(const void *object)
73043 {
73044 struct page *page;
73045 @@ -3679,7 +3724,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73046 int node;
73047
73048 list_add(&s->list, &slab_caches);
73049 - s->refcount = -1;
73050 + atomic_set(&s->refcount, -1);
73051
73052 for_each_node_state(node, N_NORMAL_MEMORY) {
73053 struct kmem_cache_node *n = get_node(s, node);
73054 @@ -3799,17 +3844,17 @@ void __init kmem_cache_init(void)
73055
73056 /* Caches that are not of the two-to-the-power-of size */
73057 if (KMALLOC_MIN_SIZE <= 32) {
73058 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73059 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73060 caches++;
73061 }
73062
73063 if (KMALLOC_MIN_SIZE <= 64) {
73064 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73065 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73066 caches++;
73067 }
73068
73069 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73070 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73071 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73072 caches++;
73073 }
73074
73075 @@ -3877,7 +3922,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73076 /*
73077 * We may have set a slab to be unmergeable during bootstrap.
73078 */
73079 - if (s->refcount < 0)
73080 + if (atomic_read(&s->refcount) < 0)
73081 return 1;
73082
73083 return 0;
73084 @@ -3936,7 +3981,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73085 down_write(&slub_lock);
73086 s = find_mergeable(size, align, flags, name, ctor);
73087 if (s) {
73088 - s->refcount++;
73089 + atomic_inc(&s->refcount);
73090 /*
73091 * Adjust the object sizes so that we clear
73092 * the complete object on kzalloc.
73093 @@ -3945,7 +3990,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73094 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73095
73096 if (sysfs_slab_alias(s, name)) {
73097 - s->refcount--;
73098 + atomic_dec(&s->refcount);
73099 goto err;
73100 }
73101 up_write(&slub_lock);
73102 @@ -4074,7 +4119,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73103 }
73104 #endif
73105
73106 -#ifdef CONFIG_SYSFS
73107 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73108 static int count_inuse(struct page *page)
73109 {
73110 return page->inuse;
73111 @@ -4461,12 +4506,12 @@ static void resiliency_test(void)
73112 validate_slab_cache(kmalloc_caches[9]);
73113 }
73114 #else
73115 -#ifdef CONFIG_SYSFS
73116 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73117 static void resiliency_test(void) {};
73118 #endif
73119 #endif
73120
73121 -#ifdef CONFIG_SYSFS
73122 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73123 enum slab_stat_type {
73124 SL_ALL, /* All slabs */
73125 SL_PARTIAL, /* Only partially allocated slabs */
73126 @@ -4709,7 +4754,7 @@ SLAB_ATTR_RO(ctor);
73127
73128 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73129 {
73130 - return sprintf(buf, "%d\n", s->refcount - 1);
73131 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73132 }
73133 SLAB_ATTR_RO(aliases);
73134
73135 @@ -5280,6 +5325,7 @@ static char *create_unique_id(struct kmem_cache *s)
73136 return name;
73137 }
73138
73139 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73140 static int sysfs_slab_add(struct kmem_cache *s)
73141 {
73142 int err;
73143 @@ -5342,6 +5388,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73144 kobject_del(&s->kobj);
73145 kobject_put(&s->kobj);
73146 }
73147 +#endif
73148
73149 /*
73150 * Need to buffer aliases during bootup until sysfs becomes
73151 @@ -5355,6 +5402,7 @@ struct saved_alias {
73152
73153 static struct saved_alias *alias_list;
73154
73155 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73156 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73157 {
73158 struct saved_alias *al;
73159 @@ -5377,6 +5425,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73160 alias_list = al;
73161 return 0;
73162 }
73163 +#endif
73164
73165 static int __init slab_sysfs_init(void)
73166 {
73167 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
73168 index 1b7e22a..3fcd4f3 100644
73169 --- a/mm/sparse-vmemmap.c
73170 +++ b/mm/sparse-vmemmap.c
73171 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
73172 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73173 if (!p)
73174 return NULL;
73175 - pud_populate(&init_mm, pud, p);
73176 + pud_populate_kernel(&init_mm, pud, p);
73177 }
73178 return pud;
73179 }
73180 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
73181 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73182 if (!p)
73183 return NULL;
73184 - pgd_populate(&init_mm, pgd, p);
73185 + pgd_populate_kernel(&init_mm, pgd, p);
73186 }
73187 return pgd;
73188 }
73189 diff --git a/mm/swap.c b/mm/swap.c
73190 index 5c13f13..f1cfc13 100644
73191 --- a/mm/swap.c
73192 +++ b/mm/swap.c
73193 @@ -30,6 +30,7 @@
73194 #include <linux/backing-dev.h>
73195 #include <linux/memcontrol.h>
73196 #include <linux/gfp.h>
73197 +#include <linux/hugetlb.h>
73198
73199 #include "internal.h"
73200
73201 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
73202
73203 __page_cache_release(page);
73204 dtor = get_compound_page_dtor(page);
73205 + if (!PageHuge(page))
73206 + BUG_ON(dtor != free_compound_page);
73207 (*dtor)(page);
73208 }
73209
73210 diff --git a/mm/swapfile.c b/mm/swapfile.c
73211 index fafc26d..1b7493e 100644
73212 --- a/mm/swapfile.c
73213 +++ b/mm/swapfile.c
73214 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73215
73216 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73217 /* Activity counter to indicate that a swapon or swapoff has occurred */
73218 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
73219 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73220
73221 static inline unsigned char swap_count(unsigned char ent)
73222 {
73223 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73224 }
73225 filp_close(swap_file, NULL);
73226 err = 0;
73227 - atomic_inc(&proc_poll_event);
73228 + atomic_inc_unchecked(&proc_poll_event);
73229 wake_up_interruptible(&proc_poll_wait);
73230
73231 out_dput:
73232 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73233
73234 poll_wait(file, &proc_poll_wait, wait);
73235
73236 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
73237 - seq->poll_event = atomic_read(&proc_poll_event);
73238 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73239 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73240 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73241 }
73242
73243 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73244 return ret;
73245
73246 seq = file->private_data;
73247 - seq->poll_event = atomic_read(&proc_poll_event);
73248 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73249 return 0;
73250 }
73251
73252 @@ -2127,7 +2127,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73253 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73254
73255 mutex_unlock(&swapon_mutex);
73256 - atomic_inc(&proc_poll_event);
73257 + atomic_inc_unchecked(&proc_poll_event);
73258 wake_up_interruptible(&proc_poll_wait);
73259
73260 if (S_ISREG(inode->i_mode))
73261 diff --git a/mm/util.c b/mm/util.c
73262 index ae962b3..0bba886 100644
73263 --- a/mm/util.c
73264 +++ b/mm/util.c
73265 @@ -284,6 +284,12 @@ done:
73266 void arch_pick_mmap_layout(struct mm_struct *mm)
73267 {
73268 mm->mmap_base = TASK_UNMAPPED_BASE;
73269 +
73270 +#ifdef CONFIG_PAX_RANDMMAP
73271 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73272 + mm->mmap_base += mm->delta_mmap;
73273 +#endif
73274 +
73275 mm->get_unmapped_area = arch_get_unmapped_area;
73276 mm->unmap_area = arch_unmap_area;
73277 }
73278 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73279 index 1196c77..2e608e8 100644
73280 --- a/mm/vmalloc.c
73281 +++ b/mm/vmalloc.c
73282 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73283
73284 pte = pte_offset_kernel(pmd, addr);
73285 do {
73286 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73287 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73288 +
73289 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73290 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73291 + BUG_ON(!pte_exec(*pte));
73292 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73293 + continue;
73294 + }
73295 +#endif
73296 +
73297 + {
73298 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73299 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73300 + }
73301 } while (pte++, addr += PAGE_SIZE, addr != end);
73302 }
73303
73304 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73305 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73306 {
73307 pte_t *pte;
73308 + int ret = -ENOMEM;
73309
73310 /*
73311 * nr is a running index into the array which helps higher level
73312 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73313 pte = pte_alloc_kernel(pmd, addr);
73314 if (!pte)
73315 return -ENOMEM;
73316 +
73317 + pax_open_kernel();
73318 do {
73319 struct page *page = pages[*nr];
73320
73321 - if (WARN_ON(!pte_none(*pte)))
73322 - return -EBUSY;
73323 - if (WARN_ON(!page))
73324 - return -ENOMEM;
73325 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73326 + if (pgprot_val(prot) & _PAGE_NX)
73327 +#endif
73328 +
73329 + if (WARN_ON(!pte_none(*pte))) {
73330 + ret = -EBUSY;
73331 + goto out;
73332 + }
73333 + if (WARN_ON(!page)) {
73334 + ret = -ENOMEM;
73335 + goto out;
73336 + }
73337 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73338 (*nr)++;
73339 } while (pte++, addr += PAGE_SIZE, addr != end);
73340 - return 0;
73341 + ret = 0;
73342 +out:
73343 + pax_close_kernel();
73344 + return ret;
73345 }
73346
73347 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73348 @@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73349 pmd_t *pmd;
73350 unsigned long next;
73351
73352 - pmd = pmd_alloc(&init_mm, pud, addr);
73353 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
73354 if (!pmd)
73355 return -ENOMEM;
73356 do {
73357 @@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
73358 pud_t *pud;
73359 unsigned long next;
73360
73361 - pud = pud_alloc(&init_mm, pgd, addr);
73362 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
73363 if (!pud)
73364 return -ENOMEM;
73365 do {
73366 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
73367 * and fall back on vmalloc() if that fails. Others
73368 * just put it in the vmalloc space.
73369 */
73370 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73371 +#ifdef CONFIG_MODULES
73372 +#ifdef MODULES_VADDR
73373 unsigned long addr = (unsigned long)x;
73374 if (addr >= MODULES_VADDR && addr < MODULES_END)
73375 return 1;
73376 #endif
73377 +
73378 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73379 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73380 + return 1;
73381 +#endif
73382 +
73383 +#endif
73384 +
73385 return is_vmalloc_addr(x);
73386 }
73387
73388 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73389
73390 if (!pgd_none(*pgd)) {
73391 pud_t *pud = pud_offset(pgd, addr);
73392 +#ifdef CONFIG_X86
73393 + if (!pud_large(*pud))
73394 +#endif
73395 if (!pud_none(*pud)) {
73396 pmd_t *pmd = pmd_offset(pud, addr);
73397 +#ifdef CONFIG_X86
73398 + if (!pmd_large(*pmd))
73399 +#endif
73400 if (!pmd_none(*pmd)) {
73401 pte_t *ptep, pte;
73402
73403 @@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
73404 static struct vmap_area *alloc_vmap_area(unsigned long size,
73405 unsigned long align,
73406 unsigned long vstart, unsigned long vend,
73407 + int node, gfp_t gfp_mask) __size_overflow(1);
73408 +static struct vmap_area *alloc_vmap_area(unsigned long size,
73409 + unsigned long align,
73410 + unsigned long vstart, unsigned long vend,
73411 int node, gfp_t gfp_mask)
73412 {
73413 struct vmap_area *va;
73414 @@ -1320,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73415 struct vm_struct *area;
73416
73417 BUG_ON(in_interrupt());
73418 +
73419 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73420 + if (flags & VM_KERNEXEC) {
73421 + if (start != VMALLOC_START || end != VMALLOC_END)
73422 + return NULL;
73423 + start = (unsigned long)MODULES_EXEC_VADDR;
73424 + end = (unsigned long)MODULES_EXEC_END;
73425 + }
73426 +#endif
73427 +
73428 if (flags & VM_IOREMAP) {
73429 int bit = fls(size);
73430
73431 @@ -1552,6 +1606,11 @@ void *vmap(struct page **pages, unsigned int count,
73432 if (count > totalram_pages)
73433 return NULL;
73434
73435 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73436 + if (!(pgprot_val(prot) & _PAGE_NX))
73437 + flags |= VM_KERNEXEC;
73438 +#endif
73439 +
73440 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73441 __builtin_return_address(0));
73442 if (!area)
73443 @@ -1653,6 +1712,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73444 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73445 goto fail;
73446
73447 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73448 + if (!(pgprot_val(prot) & _PAGE_NX))
73449 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73450 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73451 + else
73452 +#endif
73453 +
73454 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73455 start, end, node, gfp_mask, caller);
73456 if (!area)
73457 @@ -1826,10 +1892,9 @@ EXPORT_SYMBOL(vzalloc_node);
73458 * For tight control over page level allocator and protection flags
73459 * use __vmalloc() instead.
73460 */
73461 -
73462 void *vmalloc_exec(unsigned long size)
73463 {
73464 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73465 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73466 -1, __builtin_return_address(0));
73467 }
73468
73469 @@ -2124,6 +2189,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73470 unsigned long uaddr = vma->vm_start;
73471 unsigned long usize = vma->vm_end - vma->vm_start;
73472
73473 + BUG_ON(vma->vm_mirror);
73474 +
73475 if ((PAGE_SIZE-1) & (unsigned long)addr)
73476 return -EINVAL;
73477
73478 @@ -2376,8 +2443,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
73479 return NULL;
73480 }
73481
73482 - vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
73483 - vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
73484 + vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
73485 + vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
73486 if (!vas || !vms)
73487 goto err_free2;
73488
73489 diff --git a/mm/vmstat.c b/mm/vmstat.c
73490 index 7db1b9b..e9f6b07 100644
73491 --- a/mm/vmstat.c
73492 +++ b/mm/vmstat.c
73493 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73494 *
73495 * vm_stat contains the global counters
73496 */
73497 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73498 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73499 EXPORT_SYMBOL(vm_stat);
73500
73501 #ifdef CONFIG_SMP
73502 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73503 v = p->vm_stat_diff[i];
73504 p->vm_stat_diff[i] = 0;
73505 local_irq_restore(flags);
73506 - atomic_long_add(v, &zone->vm_stat[i]);
73507 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73508 global_diff[i] += v;
73509 #ifdef CONFIG_NUMA
73510 /* 3 seconds idle till flush */
73511 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73512
73513 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73514 if (global_diff[i])
73515 - atomic_long_add(global_diff[i], &vm_stat[i]);
73516 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73517 }
73518
73519 #endif
73520 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73521 start_cpu_timer(cpu);
73522 #endif
73523 #ifdef CONFIG_PROC_FS
73524 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73525 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73526 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73527 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73528 + {
73529 + mode_t gr_mode = S_IRUGO;
73530 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73531 + gr_mode = S_IRUSR;
73532 +#endif
73533 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73534 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73535 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73536 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73537 +#else
73538 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73539 +#endif
73540 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73541 + }
73542 #endif
73543 return 0;
73544 }
73545 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73546 index efea35b..9c8dd0b 100644
73547 --- a/net/8021q/vlan.c
73548 +++ b/net/8021q/vlan.c
73549 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73550 err = -EPERM;
73551 if (!capable(CAP_NET_ADMIN))
73552 break;
73553 - if ((args.u.name_type >= 0) &&
73554 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73555 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73556 struct vlan_net *vn;
73557
73558 vn = net_generic(net, vlan_net_id);
73559 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73560 index fccae26..e7ece2f 100644
73561 --- a/net/9p/trans_fd.c
73562 +++ b/net/9p/trans_fd.c
73563 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73564 oldfs = get_fs();
73565 set_fs(get_ds());
73566 /* The cast to a user pointer is valid due to the set_fs() */
73567 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73568 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73569 set_fs(oldfs);
73570
73571 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73572 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73573 index 876fbe8..8bbea9f 100644
73574 --- a/net/atm/atm_misc.c
73575 +++ b/net/atm/atm_misc.c
73576 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73577 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73578 return 1;
73579 atm_return(vcc, truesize);
73580 - atomic_inc(&vcc->stats->rx_drop);
73581 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73582 return 0;
73583 }
73584 EXPORT_SYMBOL(atm_charge);
73585 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73586 }
73587 }
73588 atm_return(vcc, guess);
73589 - atomic_inc(&vcc->stats->rx_drop);
73590 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73591 return NULL;
73592 }
73593 EXPORT_SYMBOL(atm_alloc_charge);
73594 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73595
73596 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73597 {
73598 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73599 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73600 __SONET_ITEMS
73601 #undef __HANDLE_ITEM
73602 }
73603 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73604
73605 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73606 {
73607 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73608 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73609 __SONET_ITEMS
73610 #undef __HANDLE_ITEM
73611 }
73612 diff --git a/net/atm/lec.h b/net/atm/lec.h
73613 index dfc0719..47c5322 100644
73614 --- a/net/atm/lec.h
73615 +++ b/net/atm/lec.h
73616 @@ -48,7 +48,7 @@ struct lane2_ops {
73617 const u8 *tlvs, u32 sizeoftlvs);
73618 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73619 const u8 *tlvs, u32 sizeoftlvs);
73620 -};
73621 +} __no_const;
73622
73623 /*
73624 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73625 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73626 index 0919a88..a23d54e 100644
73627 --- a/net/atm/mpc.h
73628 +++ b/net/atm/mpc.h
73629 @@ -33,7 +33,7 @@ struct mpoa_client {
73630 struct mpc_parameters parameters; /* parameters for this client */
73631
73632 const struct net_device_ops *old_ops;
73633 - struct net_device_ops new_ops;
73634 + net_device_ops_no_const new_ops;
73635 };
73636
73637
73638 diff --git a/net/atm/proc.c b/net/atm/proc.c
73639 index 0d020de..011c7bb 100644
73640 --- a/net/atm/proc.c
73641 +++ b/net/atm/proc.c
73642 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73643 const struct k_atm_aal_stats *stats)
73644 {
73645 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73646 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73647 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73648 - atomic_read(&stats->rx_drop));
73649 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73650 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73651 + atomic_read_unchecked(&stats->rx_drop));
73652 }
73653
73654 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73655 diff --git a/net/atm/resources.c b/net/atm/resources.c
73656 index 23f45ce..c748f1a 100644
73657 --- a/net/atm/resources.c
73658 +++ b/net/atm/resources.c
73659 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73660 static void copy_aal_stats(struct k_atm_aal_stats *from,
73661 struct atm_aal_stats *to)
73662 {
73663 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73664 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73665 __AAL_STAT_ITEMS
73666 #undef __HANDLE_ITEM
73667 }
73668 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73669 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73670 struct atm_aal_stats *to)
73671 {
73672 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73673 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73674 __AAL_STAT_ITEMS
73675 #undef __HANDLE_ITEM
73676 }
73677 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73678 index a6d5d63..1cc6c2b 100644
73679 --- a/net/batman-adv/bat_iv_ogm.c
73680 +++ b/net/batman-adv/bat_iv_ogm.c
73681 @@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73682
73683 /* change sequence number to network order */
73684 batman_ogm_packet->seqno =
73685 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
73686 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73687
73688 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73689 batman_ogm_packet->tt_crc = htons((uint16_t)
73690 @@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73691 else
73692 batman_ogm_packet->gw_flags = NO_FLAGS;
73693
73694 - atomic_inc(&hard_iface->seqno);
73695 + atomic_inc_unchecked(&hard_iface->seqno);
73696
73697 slide_own_bcast_window(hard_iface);
73698 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73699 @@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
73700 return;
73701
73702 /* could be changed by schedule_own_packet() */
73703 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
73704 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73705
73706 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73707
73708 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73709 index 3778977..f6a9450 100644
73710 --- a/net/batman-adv/hard-interface.c
73711 +++ b/net/batman-adv/hard-interface.c
73712 @@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73713 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73714 dev_add_pack(&hard_iface->batman_adv_ptype);
73715
73716 - atomic_set(&hard_iface->seqno, 1);
73717 - atomic_set(&hard_iface->frag_seqno, 1);
73718 + atomic_set_unchecked(&hard_iface->seqno, 1);
73719 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73720 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73721 hard_iface->net_dev->name);
73722
73723 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73724 index a5590f4..8d31969 100644
73725 --- a/net/batman-adv/soft-interface.c
73726 +++ b/net/batman-adv/soft-interface.c
73727 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73728
73729 /* set broadcast sequence number */
73730 bcast_packet->seqno =
73731 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73732 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73733
73734 add_bcast_packet_to_list(bat_priv, skb, 1);
73735
73736 @@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
73737 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73738
73739 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73740 - atomic_set(&bat_priv->bcast_seqno, 1);
73741 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73742 atomic_set(&bat_priv->ttvn, 0);
73743 atomic_set(&bat_priv->tt_local_changes, 0);
73744 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73745 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73746 index 302efb5..1590365 100644
73747 --- a/net/batman-adv/types.h
73748 +++ b/net/batman-adv/types.h
73749 @@ -38,8 +38,8 @@ struct hard_iface {
73750 int16_t if_num;
73751 char if_status;
73752 struct net_device *net_dev;
73753 - atomic_t seqno;
73754 - atomic_t frag_seqno;
73755 + atomic_unchecked_t seqno;
73756 + atomic_unchecked_t frag_seqno;
73757 unsigned char *packet_buff;
73758 int packet_len;
73759 struct kobject *hardif_obj;
73760 @@ -155,7 +155,7 @@ struct bat_priv {
73761 atomic_t orig_interval; /* uint */
73762 atomic_t hop_penalty; /* uint */
73763 atomic_t log_level; /* uint */
73764 - atomic_t bcast_seqno;
73765 + atomic_unchecked_t bcast_seqno;
73766 atomic_t bcast_queue_left;
73767 atomic_t batman_queue_left;
73768 atomic_t ttvn; /* translation table version number */
73769 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73770 index 676f6a6..3b4e668 100644
73771 --- a/net/batman-adv/unicast.c
73772 +++ b/net/batman-adv/unicast.c
73773 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73774 frag1->flags = UNI_FRAG_HEAD | large_tail;
73775 frag2->flags = large_tail;
73776
73777 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73778 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73779 frag1->seqno = htons(seqno - 1);
73780 frag2->seqno = htons(seqno);
73781
73782 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73783 index 5238b6b..c9798ce 100644
73784 --- a/net/bluetooth/hci_conn.c
73785 +++ b/net/bluetooth/hci_conn.c
73786 @@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73787 memset(&cp, 0, sizeof(cp));
73788
73789 cp.handle = cpu_to_le16(conn->handle);
73790 - memcpy(cp.ltk, ltk, sizeof(ltk));
73791 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73792
73793 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73794 }
73795 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73796 index 6f9c25b..d19fd66 100644
73797 --- a/net/bluetooth/l2cap_core.c
73798 +++ b/net/bluetooth/l2cap_core.c
73799 @@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73800 break;
73801
73802 case L2CAP_CONF_RFC:
73803 - if (olen == sizeof(rfc))
73804 - memcpy(&rfc, (void *)val, olen);
73805 + if (olen != sizeof(rfc))
73806 + break;
73807 +
73808 + memcpy(&rfc, (void *)val, olen);
73809
73810 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73811 rfc.mode != chan->mode)
73812 @@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73813
73814 switch (type) {
73815 case L2CAP_CONF_RFC:
73816 - if (olen == sizeof(rfc))
73817 - memcpy(&rfc, (void *)val, olen);
73818 + if (olen != sizeof(rfc))
73819 + break;
73820 +
73821 + memcpy(&rfc, (void *)val, olen);
73822 goto done;
73823 }
73824 }
73825 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73826 index 5fe2ff3..10968b5 100644
73827 --- a/net/bridge/netfilter/ebtables.c
73828 +++ b/net/bridge/netfilter/ebtables.c
73829 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73830 tmp.valid_hooks = t->table->valid_hooks;
73831 }
73832 mutex_unlock(&ebt_mutex);
73833 - if (copy_to_user(user, &tmp, *len) != 0){
73834 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73835 BUGPRINT("c2u Didn't work\n");
73836 ret = -EFAULT;
73837 break;
73838 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
73839 index 5cf5222..6f704ad 100644
73840 --- a/net/caif/cfctrl.c
73841 +++ b/net/caif/cfctrl.c
73842 @@ -9,6 +9,7 @@
73843 #include <linux/stddef.h>
73844 #include <linux/spinlock.h>
73845 #include <linux/slab.h>
73846 +#include <linux/sched.h>
73847 #include <net/caif/caif_layer.h>
73848 #include <net/caif/cfpkt.h>
73849 #include <net/caif/cfctrl.h>
73850 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
73851 memset(&dev_info, 0, sizeof(dev_info));
73852 dev_info.id = 0xff;
73853 cfsrvl_init(&this->serv, 0, &dev_info, false);
73854 - atomic_set(&this->req_seq_no, 1);
73855 - atomic_set(&this->rsp_seq_no, 1);
73856 + atomic_set_unchecked(&this->req_seq_no, 1);
73857 + atomic_set_unchecked(&this->rsp_seq_no, 1);
73858 this->serv.layer.receive = cfctrl_recv;
73859 sprintf(this->serv.layer.name, "ctrl");
73860 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
73861 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
73862 struct cfctrl_request_info *req)
73863 {
73864 spin_lock_bh(&ctrl->info_list_lock);
73865 - atomic_inc(&ctrl->req_seq_no);
73866 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
73867 + atomic_inc_unchecked(&ctrl->req_seq_no);
73868 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
73869 list_add_tail(&req->list, &ctrl->list);
73870 spin_unlock_bh(&ctrl->info_list_lock);
73871 }
73872 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
73873 if (p != first)
73874 pr_warn("Requests are not received in order\n");
73875
73876 - atomic_set(&ctrl->rsp_seq_no,
73877 + atomic_set_unchecked(&ctrl->rsp_seq_no,
73878 p->sequence_no);
73879 list_del(&p->list);
73880 goto out;
73881 diff --git a/net/can/gw.c b/net/can/gw.c
73882 index 3d79b12..8de85fa 100644
73883 --- a/net/can/gw.c
73884 +++ b/net/can/gw.c
73885 @@ -96,7 +96,7 @@ struct cf_mod {
73886 struct {
73887 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
73888 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
73889 - } csumfunc;
73890 + } __no_const csumfunc;
73891 };
73892
73893
73894 diff --git a/net/compat.c b/net/compat.c
73895 index e055708..3f80795 100644
73896 --- a/net/compat.c
73897 +++ b/net/compat.c
73898 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
73899 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
73900 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73901 return -EFAULT;
73902 - kmsg->msg_name = compat_ptr(tmp1);
73903 - kmsg->msg_iov = compat_ptr(tmp2);
73904 - kmsg->msg_control = compat_ptr(tmp3);
73905 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
73906 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
73907 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
73908 return 0;
73909 }
73910
73911 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73912
73913 if (kern_msg->msg_namelen) {
73914 if (mode == VERIFY_READ) {
73915 - int err = move_addr_to_kernel(kern_msg->msg_name,
73916 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
73917 kern_msg->msg_namelen,
73918 kern_address);
73919 if (err < 0)
73920 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73921 kern_msg->msg_name = NULL;
73922
73923 tot_len = iov_from_user_compat_to_kern(kern_iov,
73924 - (struct compat_iovec __user *)kern_msg->msg_iov,
73925 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
73926 kern_msg->msg_iovlen);
73927 if (tot_len >= 0)
73928 kern_msg->msg_iov = kern_iov;
73929 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73930
73931 #define CMSG_COMPAT_FIRSTHDR(msg) \
73932 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
73933 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
73934 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
73935 (struct compat_cmsghdr __user *)NULL)
73936
73937 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
73938 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
73939 (ucmlen) <= (unsigned long) \
73940 ((mhdr)->msg_controllen - \
73941 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
73942 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
73943
73944 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
73945 struct compat_cmsghdr __user *cmsg, int cmsg_len)
73946 {
73947 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
73948 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
73949 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
73950 msg->msg_controllen)
73951 return NULL;
73952 return (struct compat_cmsghdr __user *)ptr;
73953 @@ -219,7 +219,7 @@ Efault:
73954
73955 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
73956 {
73957 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73958 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73959 struct compat_cmsghdr cmhdr;
73960 int cmlen;
73961
73962 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
73963
73964 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
73965 {
73966 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73967 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73968 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
73969 int fdnum = scm->fp->count;
73970 struct file **fp = scm->fp->fp;
73971 @@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
73972 return -EFAULT;
73973 old_fs = get_fs();
73974 set_fs(KERNEL_DS);
73975 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
73976 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
73977 set_fs(old_fs);
73978
73979 return err;
73980 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
73981 len = sizeof(ktime);
73982 old_fs = get_fs();
73983 set_fs(KERNEL_DS);
73984 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
73985 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
73986 set_fs(old_fs);
73987
73988 if (!err) {
73989 @@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73990 case MCAST_JOIN_GROUP:
73991 case MCAST_LEAVE_GROUP:
73992 {
73993 - struct compat_group_req __user *gr32 = (void *)optval;
73994 + struct compat_group_req __user *gr32 = (void __user *)optval;
73995 struct group_req __user *kgr =
73996 compat_alloc_user_space(sizeof(struct group_req));
73997 u32 interface;
73998 @@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
73999 case MCAST_BLOCK_SOURCE:
74000 case MCAST_UNBLOCK_SOURCE:
74001 {
74002 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74003 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74004 struct group_source_req __user *kgsr = compat_alloc_user_space(
74005 sizeof(struct group_source_req));
74006 u32 interface;
74007 @@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74008 }
74009 case MCAST_MSFILTER:
74010 {
74011 - struct compat_group_filter __user *gf32 = (void *)optval;
74012 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74013 struct group_filter __user *kgf;
74014 u32 interface, fmode, numsrc;
74015
74016 @@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74017 char __user *optval, int __user *optlen,
74018 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74019 {
74020 - struct compat_group_filter __user *gf32 = (void *)optval;
74021 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74022 struct group_filter __user *kgf;
74023 int __user *koptlen;
74024 u32 interface, fmode, numsrc;
74025 diff --git a/net/core/datagram.c b/net/core/datagram.c
74026 index e4fbfd6..6a6ac94 100644
74027 --- a/net/core/datagram.c
74028 +++ b/net/core/datagram.c
74029 @@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74030 }
74031
74032 kfree_skb(skb);
74033 - atomic_inc(&sk->sk_drops);
74034 + atomic_inc_unchecked(&sk->sk_drops);
74035 sk_mem_reclaim_partial(sk);
74036
74037 return err;
74038 diff --git a/net/core/dev.c b/net/core/dev.c
74039 index 99e1d75..adf968a 100644
74040 --- a/net/core/dev.c
74041 +++ b/net/core/dev.c
74042 @@ -1136,9 +1136,13 @@ void dev_load(struct net *net, const char *name)
74043 if (no_module && capable(CAP_NET_ADMIN))
74044 no_module = request_module("netdev-%s", name);
74045 if (no_module && capable(CAP_SYS_MODULE)) {
74046 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74047 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74048 +#else
74049 if (!request_module("%s", name))
74050 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
74051 name);
74052 +#endif
74053 }
74054 }
74055 EXPORT_SYMBOL(dev_load);
74056 @@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74057 {
74058 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74059 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74060 - atomic_long_inc(&dev->rx_dropped);
74061 + atomic_long_inc_unchecked(&dev->rx_dropped);
74062 kfree_skb(skb);
74063 return NET_RX_DROP;
74064 }
74065 @@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74066 nf_reset(skb);
74067
74068 if (unlikely(!is_skb_forwardable(dev, skb))) {
74069 - atomic_long_inc(&dev->rx_dropped);
74070 + atomic_long_inc_unchecked(&dev->rx_dropped);
74071 kfree_skb(skb);
74072 return NET_RX_DROP;
74073 }
74074 @@ -2042,7 +2046,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74075
74076 struct dev_gso_cb {
74077 void (*destructor)(struct sk_buff *skb);
74078 -};
74079 +} __no_const;
74080
74081 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74082
74083 @@ -2898,7 +2902,7 @@ enqueue:
74084
74085 local_irq_restore(flags);
74086
74087 - atomic_long_inc(&skb->dev->rx_dropped);
74088 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74089 kfree_skb(skb);
74090 return NET_RX_DROP;
74091 }
74092 @@ -2970,7 +2974,7 @@ int netif_rx_ni(struct sk_buff *skb)
74093 }
74094 EXPORT_SYMBOL(netif_rx_ni);
74095
74096 -static void net_tx_action(struct softirq_action *h)
74097 +static void net_tx_action(void)
74098 {
74099 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74100
74101 @@ -3258,7 +3262,7 @@ ncls:
74102 if (pt_prev) {
74103 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74104 } else {
74105 - atomic_long_inc(&skb->dev->rx_dropped);
74106 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74107 kfree_skb(skb);
74108 /* Jamal, now you will not able to escape explaining
74109 * me how you were going to use this. :-)
74110 @@ -3818,7 +3822,7 @@ void netif_napi_del(struct napi_struct *napi)
74111 }
74112 EXPORT_SYMBOL(netif_napi_del);
74113
74114 -static void net_rx_action(struct softirq_action *h)
74115 +static void net_rx_action(void)
74116 {
74117 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74118 unsigned long time_limit = jiffies + 2;
74119 @@ -4288,8 +4292,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
74120 else
74121 seq_printf(seq, "%04x", ntohs(pt->type));
74122
74123 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74124 + seq_printf(seq, " %-8s %p\n",
74125 + pt->dev ? pt->dev->name : "", NULL);
74126 +#else
74127 seq_printf(seq, " %-8s %pF\n",
74128 pt->dev ? pt->dev->name : "", pt->func);
74129 +#endif
74130 }
74131
74132 return 0;
74133 @@ -5839,7 +5848,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74134 } else {
74135 netdev_stats_to_stats64(storage, &dev->stats);
74136 }
74137 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74138 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74139 return storage;
74140 }
74141 EXPORT_SYMBOL(dev_get_stats);
74142 diff --git a/net/core/flow.c b/net/core/flow.c
74143 index e318c7e..168b1d0 100644
74144 --- a/net/core/flow.c
74145 +++ b/net/core/flow.c
74146 @@ -61,7 +61,7 @@ struct flow_cache {
74147 struct timer_list rnd_timer;
74148 };
74149
74150 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
74151 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74152 EXPORT_SYMBOL(flow_cache_genid);
74153 static struct flow_cache flow_cache_global;
74154 static struct kmem_cache *flow_cachep __read_mostly;
74155 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74156
74157 static int flow_entry_valid(struct flow_cache_entry *fle)
74158 {
74159 - if (atomic_read(&flow_cache_genid) != fle->genid)
74160 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74161 return 0;
74162 if (fle->object && !fle->object->ops->check(fle->object))
74163 return 0;
74164 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74165 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74166 fcp->hash_count++;
74167 }
74168 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74169 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74170 flo = fle->object;
74171 if (!flo)
74172 goto ret_object;
74173 @@ -280,7 +280,7 @@ nocache:
74174 }
74175 flo = resolver(net, key, family, dir, flo, ctx);
74176 if (fle) {
74177 - fle->genid = atomic_read(&flow_cache_genid);
74178 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
74179 if (!IS_ERR(flo))
74180 fle->object = flo;
74181 else
74182 diff --git a/net/core/iovec.c b/net/core/iovec.c
74183 index 7e7aeb0..2a998cb 100644
74184 --- a/net/core/iovec.c
74185 +++ b/net/core/iovec.c
74186 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74187 if (m->msg_namelen) {
74188 if (mode == VERIFY_READ) {
74189 void __user *namep;
74190 - namep = (void __user __force *) m->msg_name;
74191 + namep = (void __force_user *) m->msg_name;
74192 err = move_addr_to_kernel(namep, m->msg_namelen,
74193 address);
74194 if (err < 0)
74195 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74196 }
74197
74198 size = m->msg_iovlen * sizeof(struct iovec);
74199 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74200 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74201 return -EFAULT;
74202
74203 m->msg_iov = iov;
74204 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74205 index 90430b7..0032ec0 100644
74206 --- a/net/core/rtnetlink.c
74207 +++ b/net/core/rtnetlink.c
74208 @@ -56,7 +56,7 @@ struct rtnl_link {
74209 rtnl_doit_func doit;
74210 rtnl_dumpit_func dumpit;
74211 rtnl_calcit_func calcit;
74212 -};
74213 +} __no_const;
74214
74215 static DEFINE_MUTEX(rtnl_mutex);
74216
74217 diff --git a/net/core/scm.c b/net/core/scm.c
74218 index 611c5ef..88f6d6d 100644
74219 --- a/net/core/scm.c
74220 +++ b/net/core/scm.c
74221 @@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
74222 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74223 {
74224 struct cmsghdr __user *cm
74225 - = (__force struct cmsghdr __user *)msg->msg_control;
74226 + = (struct cmsghdr __force_user *)msg->msg_control;
74227 struct cmsghdr cmhdr;
74228 int cmlen = CMSG_LEN(len);
74229 int err;
74230 @@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74231 err = -EFAULT;
74232 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74233 goto out;
74234 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74235 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74236 goto out;
74237 cmlen = CMSG_SPACE(len);
74238 if (msg->msg_controllen < cmlen)
74239 @@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
74240 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74241 {
74242 struct cmsghdr __user *cm
74243 - = (__force struct cmsghdr __user*)msg->msg_control;
74244 + = (struct cmsghdr __force_user *)msg->msg_control;
74245
74246 int fdmax = 0;
74247 int fdnum = scm->fp->count;
74248 @@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74249 if (fdnum < fdmax)
74250 fdmax = fdnum;
74251
74252 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74253 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74254 i++, cmfptr++)
74255 {
74256 int new_fd;
74257 diff --git a/net/core/sock.c b/net/core/sock.c
74258 index b2e14c0..6651b32 100644
74259 --- a/net/core/sock.c
74260 +++ b/net/core/sock.c
74261 @@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74262 struct sk_buff_head *list = &sk->sk_receive_queue;
74263
74264 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
74265 - atomic_inc(&sk->sk_drops);
74266 + atomic_inc_unchecked(&sk->sk_drops);
74267 trace_sock_rcvqueue_full(sk, skb);
74268 return -ENOMEM;
74269 }
74270 @@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74271 return err;
74272
74273 if (!sk_rmem_schedule(sk, skb->truesize)) {
74274 - atomic_inc(&sk->sk_drops);
74275 + atomic_inc_unchecked(&sk->sk_drops);
74276 return -ENOBUFS;
74277 }
74278
74279 @@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74280 skb_dst_force(skb);
74281
74282 spin_lock_irqsave(&list->lock, flags);
74283 - skb->dropcount = atomic_read(&sk->sk_drops);
74284 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74285 __skb_queue_tail(list, skb);
74286 spin_unlock_irqrestore(&list->lock, flags);
74287
74288 @@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74289 skb->dev = NULL;
74290
74291 if (sk_rcvqueues_full(sk, skb)) {
74292 - atomic_inc(&sk->sk_drops);
74293 + atomic_inc_unchecked(&sk->sk_drops);
74294 goto discard_and_relse;
74295 }
74296 if (nested)
74297 @@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74298 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74299 } else if (sk_add_backlog(sk, skb)) {
74300 bh_unlock_sock(sk);
74301 - atomic_inc(&sk->sk_drops);
74302 + atomic_inc_unchecked(&sk->sk_drops);
74303 goto discard_and_relse;
74304 }
74305
74306 @@ -984,7 +984,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74307 if (len > sizeof(peercred))
74308 len = sizeof(peercred);
74309 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74310 - if (copy_to_user(optval, &peercred, len))
74311 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74312 return -EFAULT;
74313 goto lenout;
74314 }
74315 @@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74316 return -ENOTCONN;
74317 if (lv < len)
74318 return -EINVAL;
74319 - if (copy_to_user(optval, address, len))
74320 + if (len > sizeof(address) || copy_to_user(optval, address, len))
74321 return -EFAULT;
74322 goto lenout;
74323 }
74324 @@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74325
74326 if (len > lv)
74327 len = lv;
74328 - if (copy_to_user(optval, &v, len))
74329 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
74330 return -EFAULT;
74331 lenout:
74332 if (put_user(len, optlen))
74333 @@ -2128,7 +2128,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74334 */
74335 smp_wmb();
74336 atomic_set(&sk->sk_refcnt, 1);
74337 - atomic_set(&sk->sk_drops, 0);
74338 + atomic_set_unchecked(&sk->sk_drops, 0);
74339 }
74340 EXPORT_SYMBOL(sock_init_data);
74341
74342 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
74343 index b9868e1..849f809 100644
74344 --- a/net/core/sock_diag.c
74345 +++ b/net/core/sock_diag.c
74346 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
74347
74348 int sock_diag_check_cookie(void *sk, __u32 *cookie)
74349 {
74350 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74351 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
74352 cookie[1] != INET_DIAG_NOCOOKIE) &&
74353 ((u32)(unsigned long)sk != cookie[0] ||
74354 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
74355 return -ESTALE;
74356 else
74357 +#endif
74358 return 0;
74359 }
74360 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
74361
74362 void sock_diag_save_cookie(void *sk, __u32 *cookie)
74363 {
74364 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74365 + cookie[0] = 0;
74366 + cookie[1] = 0;
74367 +#else
74368 cookie[0] = (u32)(unsigned long)sk;
74369 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74370 +#endif
74371 }
74372 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
74373
74374 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74375 index 02e75d1..9a57a7c 100644
74376 --- a/net/decnet/sysctl_net_decnet.c
74377 +++ b/net/decnet/sysctl_net_decnet.c
74378 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74379
74380 if (len > *lenp) len = *lenp;
74381
74382 - if (copy_to_user(buffer, addr, len))
74383 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
74384 return -EFAULT;
74385
74386 *lenp = len;
74387 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74388
74389 if (len > *lenp) len = *lenp;
74390
74391 - if (copy_to_user(buffer, devname, len))
74392 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
74393 return -EFAULT;
74394
74395 *lenp = len;
74396 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74397 index 39a2d29..f39c0fe 100644
74398 --- a/net/econet/Kconfig
74399 +++ b/net/econet/Kconfig
74400 @@ -4,7 +4,7 @@
74401
74402 config ECONET
74403 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74404 - depends on EXPERIMENTAL && INET
74405 + depends on EXPERIMENTAL && INET && BROKEN
74406 ---help---
74407 Econet is a fairly old and slow networking protocol mainly used by
74408 Acorn computers to access file and print servers. It uses native
74409 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74410 index cbe3a68..a879b75 100644
74411 --- a/net/ipv4/fib_frontend.c
74412 +++ b/net/ipv4/fib_frontend.c
74413 @@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74414 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74415 fib_sync_up(dev);
74416 #endif
74417 - atomic_inc(&net->ipv4.dev_addr_genid);
74418 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74419 rt_cache_flush(dev_net(dev), -1);
74420 break;
74421 case NETDEV_DOWN:
74422 fib_del_ifaddr(ifa, NULL);
74423 - atomic_inc(&net->ipv4.dev_addr_genid);
74424 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74425 if (ifa->ifa_dev->ifa_list == NULL) {
74426 /* Last address was deleted from this interface.
74427 * Disable IP.
74428 @@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74429 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74430 fib_sync_up(dev);
74431 #endif
74432 - atomic_inc(&net->ipv4.dev_addr_genid);
74433 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74434 rt_cache_flush(dev_net(dev), -1);
74435 break;
74436 case NETDEV_DOWN:
74437 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74438 index 8861f91..ab1e3c1 100644
74439 --- a/net/ipv4/fib_semantics.c
74440 +++ b/net/ipv4/fib_semantics.c
74441 @@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74442 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74443 nh->nh_gw,
74444 nh->nh_parent->fib_scope);
74445 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74446 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74447
74448 return nh->nh_saddr;
74449 }
74450 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74451 index 984ec65..97ac518 100644
74452 --- a/net/ipv4/inet_hashtables.c
74453 +++ b/net/ipv4/inet_hashtables.c
74454 @@ -18,12 +18,15 @@
74455 #include <linux/sched.h>
74456 #include <linux/slab.h>
74457 #include <linux/wait.h>
74458 +#include <linux/security.h>
74459
74460 #include <net/inet_connection_sock.h>
74461 #include <net/inet_hashtables.h>
74462 #include <net/secure_seq.h>
74463 #include <net/ip.h>
74464
74465 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74466 +
74467 /*
74468 * Allocate and initialize a new local port bind bucket.
74469 * The bindhash mutex for snum's hash chain must be held here.
74470 @@ -530,6 +533,8 @@ ok:
74471 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74472 spin_unlock(&head->lock);
74473
74474 + gr_update_task_in_ip_table(current, inet_sk(sk));
74475 +
74476 if (tw) {
74477 inet_twsk_deschedule(tw, death_row);
74478 while (twrefcnt) {
74479 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74480 index d4d61b6..b81aec8 100644
74481 --- a/net/ipv4/inetpeer.c
74482 +++ b/net/ipv4/inetpeer.c
74483 @@ -487,8 +487,8 @@ relookup:
74484 if (p) {
74485 p->daddr = *daddr;
74486 atomic_set(&p->refcnt, 1);
74487 - atomic_set(&p->rid, 0);
74488 - atomic_set(&p->ip_id_count,
74489 + atomic_set_unchecked(&p->rid, 0);
74490 + atomic_set_unchecked(&p->ip_id_count,
74491 (daddr->family == AF_INET) ?
74492 secure_ip_id(daddr->addr.a4) :
74493 secure_ipv6_id(daddr->addr.a6));
74494 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74495 index 3727e23..517f5df 100644
74496 --- a/net/ipv4/ip_fragment.c
74497 +++ b/net/ipv4/ip_fragment.c
74498 @@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74499 return 0;
74500
74501 start = qp->rid;
74502 - end = atomic_inc_return(&peer->rid);
74503 + end = atomic_inc_return_unchecked(&peer->rid);
74504 qp->rid = end;
74505
74506 rc = qp->q.fragments && (end - start) > max;
74507 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74508 index 2fd0fba..83fac99 100644
74509 --- a/net/ipv4/ip_sockglue.c
74510 +++ b/net/ipv4/ip_sockglue.c
74511 @@ -1137,7 +1137,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74512 len = min_t(unsigned int, len, opt->optlen);
74513 if (put_user(len, optlen))
74514 return -EFAULT;
74515 - if (copy_to_user(optval, opt->__data, len))
74516 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74517 + copy_to_user(optval, opt->__data, len))
74518 return -EFAULT;
74519 return 0;
74520 }
74521 @@ -1268,7 +1269,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74522 if (sk->sk_type != SOCK_STREAM)
74523 return -ENOPROTOOPT;
74524
74525 - msg.msg_control = optval;
74526 + msg.msg_control = (void __force_kernel *)optval;
74527 msg.msg_controllen = len;
74528 msg.msg_flags = flags;
74529
74530 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74531 index 92ac7e7..13f93d9 100644
74532 --- a/net/ipv4/ipconfig.c
74533 +++ b/net/ipv4/ipconfig.c
74534 @@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74535
74536 mm_segment_t oldfs = get_fs();
74537 set_fs(get_ds());
74538 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74539 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74540 set_fs(oldfs);
74541 return res;
74542 }
74543 @@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74544
74545 mm_segment_t oldfs = get_fs();
74546 set_fs(get_ds());
74547 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74548 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74549 set_fs(oldfs);
74550 return res;
74551 }
74552 @@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74553
74554 mm_segment_t oldfs = get_fs();
74555 set_fs(get_ds());
74556 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74557 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74558 set_fs(oldfs);
74559 return res;
74560 }
74561 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74562 index 50009c7..5996a9f 100644
74563 --- a/net/ipv4/ping.c
74564 +++ b/net/ipv4/ping.c
74565 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74566 sk_rmem_alloc_get(sp),
74567 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74568 atomic_read(&sp->sk_refcnt), sp,
74569 - atomic_read(&sp->sk_drops), len);
74570 + atomic_read_unchecked(&sp->sk_drops), len);
74571 }
74572
74573 static int ping_seq_show(struct seq_file *seq, void *v)
74574 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74575 index bbd604c..4d5469c 100644
74576 --- a/net/ipv4/raw.c
74577 +++ b/net/ipv4/raw.c
74578 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74579 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74580 {
74581 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74582 - atomic_inc(&sk->sk_drops);
74583 + atomic_inc_unchecked(&sk->sk_drops);
74584 kfree_skb(skb);
74585 return NET_RX_DROP;
74586 }
74587 @@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
74588
74589 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74590 {
74591 + struct icmp_filter filter;
74592 +
74593 if (optlen > sizeof(struct icmp_filter))
74594 optlen = sizeof(struct icmp_filter);
74595 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74596 + if (copy_from_user(&filter, optval, optlen))
74597 return -EFAULT;
74598 + raw_sk(sk)->filter = filter;
74599 return 0;
74600 }
74601
74602 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74603 {
74604 int len, ret = -EFAULT;
74605 + struct icmp_filter filter;
74606
74607 if (get_user(len, optlen))
74608 goto out;
74609 @@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74610 if (len > sizeof(struct icmp_filter))
74611 len = sizeof(struct icmp_filter);
74612 ret = -EFAULT;
74613 - if (put_user(len, optlen) ||
74614 - copy_to_user(optval, &raw_sk(sk)->filter, len))
74615 + filter = raw_sk(sk)->filter;
74616 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74617 goto out;
74618 ret = 0;
74619 out: return ret;
74620 @@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74621 sk_wmem_alloc_get(sp),
74622 sk_rmem_alloc_get(sp),
74623 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74624 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74625 + atomic_read(&sp->sk_refcnt),
74626 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74627 + NULL,
74628 +#else
74629 + sp,
74630 +#endif
74631 + atomic_read_unchecked(&sp->sk_drops));
74632 }
74633
74634 static int raw_seq_show(struct seq_file *seq, void *v)
74635 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74636 index 167ea10..4b15883 100644
74637 --- a/net/ipv4/route.c
74638 +++ b/net/ipv4/route.c
74639 @@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74640
74641 static inline int rt_genid(struct net *net)
74642 {
74643 - return atomic_read(&net->ipv4.rt_genid);
74644 + return atomic_read_unchecked(&net->ipv4.rt_genid);
74645 }
74646
74647 #ifdef CONFIG_PROC_FS
74648 @@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
74649 unsigned char shuffle;
74650
74651 get_random_bytes(&shuffle, sizeof(shuffle));
74652 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74653 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74654 inetpeer_invalidate_tree(AF_INET);
74655 }
74656
74657 @@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
74658 error = rt->dst.error;
74659 if (peer) {
74660 inet_peer_refcheck(rt->peer);
74661 - id = atomic_read(&peer->ip_id_count) & 0xffff;
74662 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74663 if (peer->tcp_ts_stamp) {
74664 ts = peer->tcp_ts;
74665 tsage = get_seconds() - peer->tcp_ts_stamp;
74666 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74667 index 0cb86ce..8e7fda8 100644
74668 --- a/net/ipv4/tcp_ipv4.c
74669 +++ b/net/ipv4/tcp_ipv4.c
74670 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
74671 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74672
74673
74674 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74675 +extern int grsec_enable_blackhole;
74676 +#endif
74677 +
74678 #ifdef CONFIG_TCP_MD5SIG
74679 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
74680 __be32 daddr, __be32 saddr, const struct tcphdr *th);
74681 @@ -1641,6 +1645,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74682 return 0;
74683
74684 reset:
74685 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74686 + if (!grsec_enable_blackhole)
74687 +#endif
74688 tcp_v4_send_reset(rsk, skb);
74689 discard:
74690 kfree_skb(skb);
74691 @@ -1703,12 +1710,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74692 TCP_SKB_CB(skb)->sacked = 0;
74693
74694 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74695 - if (!sk)
74696 + if (!sk) {
74697 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74698 + ret = 1;
74699 +#endif
74700 goto no_tcp_socket;
74701 -
74702 + }
74703 process:
74704 - if (sk->sk_state == TCP_TIME_WAIT)
74705 + if (sk->sk_state == TCP_TIME_WAIT) {
74706 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74707 + ret = 2;
74708 +#endif
74709 goto do_time_wait;
74710 + }
74711
74712 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
74713 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74714 @@ -1758,6 +1772,10 @@ no_tcp_socket:
74715 bad_packet:
74716 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74717 } else {
74718 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74719 + if (!grsec_enable_blackhole || (ret == 1 &&
74720 + (skb->dev->flags & IFF_LOOPBACK)))
74721 +#endif
74722 tcp_v4_send_reset(NULL, skb);
74723 }
74724
74725 @@ -2419,7 +2437,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
74726 0, /* non standard timer */
74727 0, /* open_requests have no inode */
74728 atomic_read(&sk->sk_refcnt),
74729 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74730 + NULL,
74731 +#else
74732 req,
74733 +#endif
74734 len);
74735 }
74736
74737 @@ -2469,7 +2491,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
74738 sock_i_uid(sk),
74739 icsk->icsk_probes_out,
74740 sock_i_ino(sk),
74741 - atomic_read(&sk->sk_refcnt), sk,
74742 + atomic_read(&sk->sk_refcnt),
74743 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74744 + NULL,
74745 +#else
74746 + sk,
74747 +#endif
74748 jiffies_to_clock_t(icsk->icsk_rto),
74749 jiffies_to_clock_t(icsk->icsk_ack.ato),
74750 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
74751 @@ -2497,7 +2524,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
74752 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
74753 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
74754 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74755 - atomic_read(&tw->tw_refcnt), tw, len);
74756 + atomic_read(&tw->tw_refcnt),
74757 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74758 + NULL,
74759 +#else
74760 + tw,
74761 +#endif
74762 + len);
74763 }
74764
74765 #define TMPSZ 150
74766 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
74767 index 3cabafb..640525b 100644
74768 --- a/net/ipv4/tcp_minisocks.c
74769 +++ b/net/ipv4/tcp_minisocks.c
74770 @@ -27,6 +27,10 @@
74771 #include <net/inet_common.h>
74772 #include <net/xfrm.h>
74773
74774 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74775 +extern int grsec_enable_blackhole;
74776 +#endif
74777 +
74778 int sysctl_tcp_syncookies __read_mostly = 1;
74779 EXPORT_SYMBOL(sysctl_tcp_syncookies);
74780
74781 @@ -753,6 +757,10 @@ listen_overflow:
74782
74783 embryonic_reset:
74784 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
74785 +
74786 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74787 + if (!grsec_enable_blackhole)
74788 +#endif
74789 if (!(flg & TCP_FLAG_RST))
74790 req->rsk_ops->send_reset(sk, skb);
74791
74792 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
74793 index a981cdc..48f4c3a 100644
74794 --- a/net/ipv4/tcp_probe.c
74795 +++ b/net/ipv4/tcp_probe.c
74796 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
74797 if (cnt + width >= len)
74798 break;
74799
74800 - if (copy_to_user(buf + cnt, tbuf, width))
74801 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
74802 return -EFAULT;
74803 cnt += width;
74804 }
74805 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
74806 index 34d4a02..3b57f86 100644
74807 --- a/net/ipv4/tcp_timer.c
74808 +++ b/net/ipv4/tcp_timer.c
74809 @@ -22,6 +22,10 @@
74810 #include <linux/gfp.h>
74811 #include <net/tcp.h>
74812
74813 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74814 +extern int grsec_lastack_retries;
74815 +#endif
74816 +
74817 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
74818 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
74819 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
74820 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
74821 }
74822 }
74823
74824 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74825 + if ((sk->sk_state == TCP_LAST_ACK) &&
74826 + (grsec_lastack_retries > 0) &&
74827 + (grsec_lastack_retries < retry_until))
74828 + retry_until = grsec_lastack_retries;
74829 +#endif
74830 +
74831 if (retransmits_timed_out(sk, retry_until,
74832 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
74833 /* Has it gone just too far? */
74834 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
74835 index fe14105..0618260 100644
74836 --- a/net/ipv4/udp.c
74837 +++ b/net/ipv4/udp.c
74838 @@ -87,6 +87,7 @@
74839 #include <linux/types.h>
74840 #include <linux/fcntl.h>
74841 #include <linux/module.h>
74842 +#include <linux/security.h>
74843 #include <linux/socket.h>
74844 #include <linux/sockios.h>
74845 #include <linux/igmp.h>
74846 @@ -109,6 +110,10 @@
74847 #include <trace/events/udp.h>
74848 #include "udp_impl.h"
74849
74850 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74851 +extern int grsec_enable_blackhole;
74852 +#endif
74853 +
74854 struct udp_table udp_table __read_mostly;
74855 EXPORT_SYMBOL(udp_table);
74856
74857 @@ -567,6 +572,9 @@ found:
74858 return s;
74859 }
74860
74861 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
74862 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
74863 +
74864 /*
74865 * This routine is called by the ICMP module when it gets some
74866 * sort of error condition. If err < 0 then the socket should
74867 @@ -858,9 +866,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
74868 dport = usin->sin_port;
74869 if (dport == 0)
74870 return -EINVAL;
74871 +
74872 + err = gr_search_udp_sendmsg(sk, usin);
74873 + if (err)
74874 + return err;
74875 } else {
74876 if (sk->sk_state != TCP_ESTABLISHED)
74877 return -EDESTADDRREQ;
74878 +
74879 + err = gr_search_udp_sendmsg(sk, NULL);
74880 + if (err)
74881 + return err;
74882 +
74883 daddr = inet->inet_daddr;
74884 dport = inet->inet_dport;
74885 /* Open fast path for connected socket.
74886 @@ -1102,7 +1119,7 @@ static unsigned int first_packet_length(struct sock *sk)
74887 udp_lib_checksum_complete(skb)) {
74888 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74889 IS_UDPLITE(sk));
74890 - atomic_inc(&sk->sk_drops);
74891 + atomic_inc_unchecked(&sk->sk_drops);
74892 __skb_unlink(skb, rcvq);
74893 __skb_queue_tail(&list_kill, skb);
74894 }
74895 @@ -1188,6 +1205,10 @@ try_again:
74896 if (!skb)
74897 goto out;
74898
74899 + err = gr_search_udp_recvmsg(sk, skb);
74900 + if (err)
74901 + goto out_free;
74902 +
74903 ulen = skb->len - sizeof(struct udphdr);
74904 copied = len;
74905 if (copied > ulen)
74906 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74907
74908 drop:
74909 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74910 - atomic_inc(&sk->sk_drops);
74911 + atomic_inc_unchecked(&sk->sk_drops);
74912 kfree_skb(skb);
74913 return -1;
74914 }
74915 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
74916 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
74917
74918 if (!skb1) {
74919 - atomic_inc(&sk->sk_drops);
74920 + atomic_inc_unchecked(&sk->sk_drops);
74921 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
74922 IS_UDPLITE(sk));
74923 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74924 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
74925 goto csum_error;
74926
74927 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
74928 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74929 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
74930 +#endif
74931 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
74932
74933 /*
74934 @@ -2094,8 +2118,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
74935 sk_wmem_alloc_get(sp),
74936 sk_rmem_alloc_get(sp),
74937 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74938 - atomic_read(&sp->sk_refcnt), sp,
74939 - atomic_read(&sp->sk_drops), len);
74940 + atomic_read(&sp->sk_refcnt),
74941 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74942 + NULL,
74943 +#else
74944 + sp,
74945 +#endif
74946 + atomic_read_unchecked(&sp->sk_drops), len);
74947 }
74948
74949 int udp4_seq_show(struct seq_file *seq, void *v)
74950 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
74951 index 7d5cb97..c56564f 100644
74952 --- a/net/ipv6/addrconf.c
74953 +++ b/net/ipv6/addrconf.c
74954 @@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
74955 p.iph.ihl = 5;
74956 p.iph.protocol = IPPROTO_IPV6;
74957 p.iph.ttl = 64;
74958 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
74959 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
74960
74961 if (ops->ndo_do_ioctl) {
74962 mm_segment_t oldfs = get_fs();
74963 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
74964 index 02dd203..e03fcc9 100644
74965 --- a/net/ipv6/inet6_connection_sock.c
74966 +++ b/net/ipv6/inet6_connection_sock.c
74967 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
74968 #ifdef CONFIG_XFRM
74969 {
74970 struct rt6_info *rt = (struct rt6_info *)dst;
74971 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
74972 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
74973 }
74974 #endif
74975 }
74976 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
74977 #ifdef CONFIG_XFRM
74978 if (dst) {
74979 struct rt6_info *rt = (struct rt6_info *)dst;
74980 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
74981 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
74982 __sk_dst_reset(sk);
74983 dst = NULL;
74984 }
74985 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
74986 index 63dd1f8..e7f53ca 100644
74987 --- a/net/ipv6/ipv6_sockglue.c
74988 +++ b/net/ipv6/ipv6_sockglue.c
74989 @@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
74990 if (sk->sk_type != SOCK_STREAM)
74991 return -ENOPROTOOPT;
74992
74993 - msg.msg_control = optval;
74994 + msg.msg_control = (void __force_kernel *)optval;
74995 msg.msg_controllen = len;
74996 msg.msg_flags = flags;
74997
74998 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
74999 index 5bddea7..82d9d67 100644
75000 --- a/net/ipv6/raw.c
75001 +++ b/net/ipv6/raw.c
75002 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75003 {
75004 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75005 skb_checksum_complete(skb)) {
75006 - atomic_inc(&sk->sk_drops);
75007 + atomic_inc_unchecked(&sk->sk_drops);
75008 kfree_skb(skb);
75009 return NET_RX_DROP;
75010 }
75011 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75012 struct raw6_sock *rp = raw6_sk(sk);
75013
75014 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75015 - atomic_inc(&sk->sk_drops);
75016 + atomic_inc_unchecked(&sk->sk_drops);
75017 kfree_skb(skb);
75018 return NET_RX_DROP;
75019 }
75020 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75021
75022 if (inet->hdrincl) {
75023 if (skb_checksum_complete(skb)) {
75024 - atomic_inc(&sk->sk_drops);
75025 + atomic_inc_unchecked(&sk->sk_drops);
75026 kfree_skb(skb);
75027 return NET_RX_DROP;
75028 }
75029 @@ -602,7 +602,7 @@ out:
75030 return err;
75031 }
75032
75033 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75034 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75035 struct flowi6 *fl6, struct dst_entry **dstp,
75036 unsigned int flags)
75037 {
75038 @@ -914,12 +914,15 @@ do_confirm:
75039 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75040 char __user *optval, int optlen)
75041 {
75042 + struct icmp6_filter filter;
75043 +
75044 switch (optname) {
75045 case ICMPV6_FILTER:
75046 if (optlen > sizeof(struct icmp6_filter))
75047 optlen = sizeof(struct icmp6_filter);
75048 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75049 + if (copy_from_user(&filter, optval, optlen))
75050 return -EFAULT;
75051 + raw6_sk(sk)->filter = filter;
75052 return 0;
75053 default:
75054 return -ENOPROTOOPT;
75055 @@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75056 char __user *optval, int __user *optlen)
75057 {
75058 int len;
75059 + struct icmp6_filter filter;
75060
75061 switch (optname) {
75062 case ICMPV6_FILTER:
75063 @@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75064 len = sizeof(struct icmp6_filter);
75065 if (put_user(len, optlen))
75066 return -EFAULT;
75067 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75068 + filter = raw6_sk(sk)->filter;
75069 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
75070 return -EFAULT;
75071 return 0;
75072 default:
75073 @@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75074 0, 0L, 0,
75075 sock_i_uid(sp), 0,
75076 sock_i_ino(sp),
75077 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75078 + atomic_read(&sp->sk_refcnt),
75079 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75080 + NULL,
75081 +#else
75082 + sp,
75083 +#endif
75084 + atomic_read_unchecked(&sp->sk_drops));
75085 }
75086
75087 static int raw6_seq_show(struct seq_file *seq, void *v)
75088 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75089 index 98256cf..7f16dbd 100644
75090 --- a/net/ipv6/tcp_ipv6.c
75091 +++ b/net/ipv6/tcp_ipv6.c
75092 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75093 }
75094 #endif
75095
75096 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75097 +extern int grsec_enable_blackhole;
75098 +#endif
75099 +
75100 static void tcp_v6_hash(struct sock *sk)
75101 {
75102 if (sk->sk_state != TCP_CLOSE) {
75103 @@ -1542,6 +1546,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75104 return 0;
75105
75106 reset:
75107 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75108 + if (!grsec_enable_blackhole)
75109 +#endif
75110 tcp_v6_send_reset(sk, skb);
75111 discard:
75112 if (opt_skb)
75113 @@ -1623,12 +1630,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75114 TCP_SKB_CB(skb)->sacked = 0;
75115
75116 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75117 - if (!sk)
75118 + if (!sk) {
75119 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75120 + ret = 1;
75121 +#endif
75122 goto no_tcp_socket;
75123 + }
75124
75125 process:
75126 - if (sk->sk_state == TCP_TIME_WAIT)
75127 + if (sk->sk_state == TCP_TIME_WAIT) {
75128 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75129 + ret = 2;
75130 +#endif
75131 goto do_time_wait;
75132 + }
75133
75134 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75135 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75136 @@ -1676,6 +1691,10 @@ no_tcp_socket:
75137 bad_packet:
75138 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75139 } else {
75140 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75141 + if (!grsec_enable_blackhole || (ret == 1 &&
75142 + (skb->dev->flags & IFF_LOOPBACK)))
75143 +#endif
75144 tcp_v6_send_reset(NULL, skb);
75145 }
75146
75147 @@ -1930,7 +1949,13 @@ static void get_openreq6(struct seq_file *seq,
75148 uid,
75149 0, /* non standard timer */
75150 0, /* open_requests have no inode */
75151 - 0, req);
75152 + 0,
75153 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75154 + NULL
75155 +#else
75156 + req
75157 +#endif
75158 + );
75159 }
75160
75161 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75162 @@ -1980,7 +2005,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75163 sock_i_uid(sp),
75164 icsk->icsk_probes_out,
75165 sock_i_ino(sp),
75166 - atomic_read(&sp->sk_refcnt), sp,
75167 + atomic_read(&sp->sk_refcnt),
75168 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75169 + NULL,
75170 +#else
75171 + sp,
75172 +#endif
75173 jiffies_to_clock_t(icsk->icsk_rto),
75174 jiffies_to_clock_t(icsk->icsk_ack.ato),
75175 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75176 @@ -2015,7 +2045,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75177 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75178 tw->tw_substate, 0, 0,
75179 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75180 - atomic_read(&tw->tw_refcnt), tw);
75181 + atomic_read(&tw->tw_refcnt),
75182 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75183 + NULL
75184 +#else
75185 + tw
75186 +#endif
75187 + );
75188 }
75189
75190 static int tcp6_seq_show(struct seq_file *seq, void *v)
75191 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
75192 index 37b0699..d323408 100644
75193 --- a/net/ipv6/udp.c
75194 +++ b/net/ipv6/udp.c
75195 @@ -50,6 +50,10 @@
75196 #include <linux/seq_file.h>
75197 #include "udp_impl.h"
75198
75199 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75200 +extern int grsec_enable_blackhole;
75201 +#endif
75202 +
75203 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
75204 {
75205 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
75206 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
75207
75208 return 0;
75209 drop:
75210 - atomic_inc(&sk->sk_drops);
75211 + atomic_inc_unchecked(&sk->sk_drops);
75212 drop_no_sk_drops_inc:
75213 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75214 kfree_skb(skb);
75215 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75216 continue;
75217 }
75218 drop:
75219 - atomic_inc(&sk->sk_drops);
75220 + atomic_inc_unchecked(&sk->sk_drops);
75221 UDP6_INC_STATS_BH(sock_net(sk),
75222 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
75223 UDP6_INC_STATS_BH(sock_net(sk),
75224 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75225 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
75226 proto == IPPROTO_UDPLITE);
75227
75228 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75229 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75230 +#endif
75231 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75232
75233 kfree_skb(skb);
75234 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75235 if (!sock_owned_by_user(sk))
75236 udpv6_queue_rcv_skb(sk, skb);
75237 else if (sk_add_backlog(sk, skb)) {
75238 - atomic_inc(&sk->sk_drops);
75239 + atomic_inc_unchecked(&sk->sk_drops);
75240 bh_unlock_sock(sk);
75241 sock_put(sk);
75242 goto discard;
75243 @@ -1411,8 +1418,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75244 0, 0L, 0,
75245 sock_i_uid(sp), 0,
75246 sock_i_ino(sp),
75247 - atomic_read(&sp->sk_refcnt), sp,
75248 - atomic_read(&sp->sk_drops));
75249 + atomic_read(&sp->sk_refcnt),
75250 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75251 + NULL,
75252 +#else
75253 + sp,
75254 +#endif
75255 + atomic_read_unchecked(&sp->sk_drops));
75256 }
75257
75258 int udp6_seq_show(struct seq_file *seq, void *v)
75259 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75260 index 6b9d5a0..4dffaf1 100644
75261 --- a/net/irda/ircomm/ircomm_tty.c
75262 +++ b/net/irda/ircomm/ircomm_tty.c
75263 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75264 add_wait_queue(&self->open_wait, &wait);
75265
75266 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75267 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75268 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75269
75270 /* As far as I can see, we protect open_count - Jean II */
75271 spin_lock_irqsave(&self->spinlock, flags);
75272 if (!tty_hung_up_p(filp)) {
75273 extra_count = 1;
75274 - self->open_count--;
75275 + local_dec(&self->open_count);
75276 }
75277 spin_unlock_irqrestore(&self->spinlock, flags);
75278 - self->blocked_open++;
75279 + local_inc(&self->blocked_open);
75280
75281 while (1) {
75282 if (tty->termios->c_cflag & CBAUD) {
75283 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75284 }
75285
75286 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75287 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75288 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75289
75290 schedule();
75291 }
75292 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75293 if (extra_count) {
75294 /* ++ is not atomic, so this should be protected - Jean II */
75295 spin_lock_irqsave(&self->spinlock, flags);
75296 - self->open_count++;
75297 + local_inc(&self->open_count);
75298 spin_unlock_irqrestore(&self->spinlock, flags);
75299 }
75300 - self->blocked_open--;
75301 + local_dec(&self->blocked_open);
75302
75303 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75304 - __FILE__,__LINE__, tty->driver->name, self->open_count);
75305 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75306
75307 if (!retval)
75308 self->flags |= ASYNC_NORMAL_ACTIVE;
75309 @@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75310 }
75311 /* ++ is not atomic, so this should be protected - Jean II */
75312 spin_lock_irqsave(&self->spinlock, flags);
75313 - self->open_count++;
75314 + local_inc(&self->open_count);
75315
75316 tty->driver_data = self;
75317 self->tty = tty;
75318 spin_unlock_irqrestore(&self->spinlock, flags);
75319
75320 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75321 - self->line, self->open_count);
75322 + self->line, local_read(&self->open_count));
75323
75324 /* Not really used by us, but lets do it anyway */
75325 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75326 @@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75327 return;
75328 }
75329
75330 - if ((tty->count == 1) && (self->open_count != 1)) {
75331 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75332 /*
75333 * Uh, oh. tty->count is 1, which means that the tty
75334 * structure will be freed. state->count should always
75335 @@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75336 */
75337 IRDA_DEBUG(0, "%s(), bad serial port count; "
75338 "tty->count is 1, state->count is %d\n", __func__ ,
75339 - self->open_count);
75340 - self->open_count = 1;
75341 + local_read(&self->open_count));
75342 + local_set(&self->open_count, 1);
75343 }
75344
75345 - if (--self->open_count < 0) {
75346 + if (local_dec_return(&self->open_count) < 0) {
75347 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75348 - __func__, self->line, self->open_count);
75349 - self->open_count = 0;
75350 + __func__, self->line, local_read(&self->open_count));
75351 + local_set(&self->open_count, 0);
75352 }
75353 - if (self->open_count) {
75354 + if (local_read(&self->open_count)) {
75355 spin_unlock_irqrestore(&self->spinlock, flags);
75356
75357 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75358 @@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75359 tty->closing = 0;
75360 self->tty = NULL;
75361
75362 - if (self->blocked_open) {
75363 + if (local_read(&self->blocked_open)) {
75364 if (self->close_delay)
75365 schedule_timeout_interruptible(self->close_delay);
75366 wake_up_interruptible(&self->open_wait);
75367 @@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75368 spin_lock_irqsave(&self->spinlock, flags);
75369 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75370 self->tty = NULL;
75371 - self->open_count = 0;
75372 + local_set(&self->open_count, 0);
75373 spin_unlock_irqrestore(&self->spinlock, flags);
75374
75375 wake_up_interruptible(&self->open_wait);
75376 @@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75377 seq_putc(m, '\n');
75378
75379 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75380 - seq_printf(m, "Open count: %d\n", self->open_count);
75381 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75382 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75383 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75384
75385 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75386 index 07d7d55..541de95 100644
75387 --- a/net/iucv/af_iucv.c
75388 +++ b/net/iucv/af_iucv.c
75389 @@ -783,10 +783,10 @@ static int iucv_sock_autobind(struct sock *sk)
75390
75391 write_lock_bh(&iucv_sk_list.lock);
75392
75393 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75394 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75395 while (__iucv_get_sock_by_name(name)) {
75396 sprintf(name, "%08x",
75397 - atomic_inc_return(&iucv_sk_list.autobind_name));
75398 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75399 }
75400
75401 write_unlock_bh(&iucv_sk_list.lock);
75402 diff --git a/net/key/af_key.c b/net/key/af_key.c
75403 index 7e5d927..cdbb54e 100644
75404 --- a/net/key/af_key.c
75405 +++ b/net/key/af_key.c
75406 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75407 static u32 get_acqseq(void)
75408 {
75409 u32 res;
75410 - static atomic_t acqseq;
75411 + static atomic_unchecked_t acqseq;
75412
75413 do {
75414 - res = atomic_inc_return(&acqseq);
75415 + res = atomic_inc_return_unchecked(&acqseq);
75416 } while (!res);
75417 return res;
75418 }
75419 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75420 index db8fae5..ff070cd 100644
75421 --- a/net/mac80211/ieee80211_i.h
75422 +++ b/net/mac80211/ieee80211_i.h
75423 @@ -28,6 +28,7 @@
75424 #include <net/ieee80211_radiotap.h>
75425 #include <net/cfg80211.h>
75426 #include <net/mac80211.h>
75427 +#include <asm/local.h>
75428 #include "key.h"
75429 #include "sta_info.h"
75430
75431 @@ -842,7 +843,7 @@ struct ieee80211_local {
75432 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75433 spinlock_t queue_stop_reason_lock;
75434
75435 - int open_count;
75436 + local_t open_count;
75437 int monitors, cooked_mntrs;
75438 /* number of interfaces with corresponding FIF_ flags */
75439 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75440 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75441 index c20051b..2accbc4 100644
75442 --- a/net/mac80211/iface.c
75443 +++ b/net/mac80211/iface.c
75444 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75445 break;
75446 }
75447
75448 - if (local->open_count == 0) {
75449 + if (local_read(&local->open_count) == 0) {
75450 res = drv_start(local);
75451 if (res)
75452 goto err_del_bss;
75453 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75454 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75455
75456 if (!is_valid_ether_addr(dev->dev_addr)) {
75457 - if (!local->open_count)
75458 + if (!local_read(&local->open_count))
75459 drv_stop(local);
75460 return -EADDRNOTAVAIL;
75461 }
75462 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75463 mutex_unlock(&local->mtx);
75464
75465 if (coming_up)
75466 - local->open_count++;
75467 + local_inc(&local->open_count);
75468
75469 if (hw_reconf_flags)
75470 ieee80211_hw_config(local, hw_reconf_flags);
75471 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75472 err_del_interface:
75473 drv_remove_interface(local, sdata);
75474 err_stop:
75475 - if (!local->open_count)
75476 + if (!local_read(&local->open_count))
75477 drv_stop(local);
75478 err_del_bss:
75479 sdata->bss = NULL;
75480 @@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75481 }
75482
75483 if (going_down)
75484 - local->open_count--;
75485 + local_dec(&local->open_count);
75486
75487 switch (sdata->vif.type) {
75488 case NL80211_IFTYPE_AP_VLAN:
75489 @@ -550,7 +550,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75490
75491 ieee80211_recalc_ps(local, -1);
75492
75493 - if (local->open_count == 0) {
75494 + if (local_read(&local->open_count) == 0) {
75495 if (local->ops->napi_poll)
75496 napi_disable(&local->napi);
75497 ieee80211_clear_tx_pending(local);
75498 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75499 index 1633648..d45ebfa 100644
75500 --- a/net/mac80211/main.c
75501 +++ b/net/mac80211/main.c
75502 @@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75503 local->hw.conf.power_level = power;
75504 }
75505
75506 - if (changed && local->open_count) {
75507 + if (changed && local_read(&local->open_count)) {
75508 ret = drv_config(local, changed);
75509 /*
75510 * Goal:
75511 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75512 index ef8eba1..5c63952 100644
75513 --- a/net/mac80211/pm.c
75514 +++ b/net/mac80211/pm.c
75515 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75516 struct ieee80211_sub_if_data *sdata;
75517 struct sta_info *sta;
75518
75519 - if (!local->open_count)
75520 + if (!local_read(&local->open_count))
75521 goto suspend;
75522
75523 ieee80211_scan_cancel(local);
75524 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75525 cancel_work_sync(&local->dynamic_ps_enable_work);
75526 del_timer_sync(&local->dynamic_ps_timer);
75527
75528 - local->wowlan = wowlan && local->open_count;
75529 + local->wowlan = wowlan && local_read(&local->open_count);
75530 if (local->wowlan) {
75531 int err = drv_suspend(local, wowlan);
75532 if (err < 0) {
75533 @@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75534 }
75535
75536 /* stop hardware - this must stop RX */
75537 - if (local->open_count)
75538 + if (local_read(&local->open_count))
75539 ieee80211_stop_device(local);
75540
75541 suspend:
75542 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75543 index 3313c11..bec9f17 100644
75544 --- a/net/mac80211/rate.c
75545 +++ b/net/mac80211/rate.c
75546 @@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75547
75548 ASSERT_RTNL();
75549
75550 - if (local->open_count)
75551 + if (local_read(&local->open_count))
75552 return -EBUSY;
75553
75554 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75555 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75556 index c97a065..ff61928 100644
75557 --- a/net/mac80211/rc80211_pid_debugfs.c
75558 +++ b/net/mac80211/rc80211_pid_debugfs.c
75559 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75560
75561 spin_unlock_irqrestore(&events->lock, status);
75562
75563 - if (copy_to_user(buf, pb, p))
75564 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75565 return -EFAULT;
75566
75567 return p;
75568 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75569 index 3862c96..3258ddc 100644
75570 --- a/net/mac80211/util.c
75571 +++ b/net/mac80211/util.c
75572 @@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75573 }
75574 #endif
75575 /* everything else happens only if HW was up & running */
75576 - if (!local->open_count)
75577 + if (!local_read(&local->open_count))
75578 goto wake_up;
75579
75580 /*
75581 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75582 index 0c6f67e..d02cdfc 100644
75583 --- a/net/netfilter/Kconfig
75584 +++ b/net/netfilter/Kconfig
75585 @@ -836,6 +836,16 @@ config NETFILTER_XT_MATCH_ESP
75586
75587 To compile it as a module, choose M here. If unsure, say N.
75588
75589 +config NETFILTER_XT_MATCH_GRADM
75590 + tristate '"gradm" match support'
75591 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75592 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75593 + ---help---
75594 + The gradm match allows to match on grsecurity RBAC being enabled.
75595 + It is useful when iptables rules are applied early on bootup to
75596 + prevent connections to the machine (except from a trusted host)
75597 + while the RBAC system is disabled.
75598 +
75599 config NETFILTER_XT_MATCH_HASHLIMIT
75600 tristate '"hashlimit" match support'
75601 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75602 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75603 index ca36765..0882e7c 100644
75604 --- a/net/netfilter/Makefile
75605 +++ b/net/netfilter/Makefile
75606 @@ -86,6 +86,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75607 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75608 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
75609 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75610 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75611 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75612 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75613 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75614 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75615 index 29fa5ba..8debc79 100644
75616 --- a/net/netfilter/ipvs/ip_vs_conn.c
75617 +++ b/net/netfilter/ipvs/ip_vs_conn.c
75618 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75619 /* Increase the refcnt counter of the dest */
75620 atomic_inc(&dest->refcnt);
75621
75622 - conn_flags = atomic_read(&dest->conn_flags);
75623 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
75624 if (cp->protocol != IPPROTO_UDP)
75625 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75626 /* Bind with the destination and its corresponding transmitter */
75627 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75628 atomic_set(&cp->refcnt, 1);
75629
75630 atomic_set(&cp->n_control, 0);
75631 - atomic_set(&cp->in_pkts, 0);
75632 + atomic_set_unchecked(&cp->in_pkts, 0);
75633
75634 atomic_inc(&ipvs->conn_count);
75635 if (flags & IP_VS_CONN_F_NO_CPORT)
75636 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75637
75638 /* Don't drop the entry if its number of incoming packets is not
75639 located in [0, 8] */
75640 - i = atomic_read(&cp->in_pkts);
75641 + i = atomic_read_unchecked(&cp->in_pkts);
75642 if (i > 8 || i < 0) return 0;
75643
75644 if (!todrop_rate[i]) return 0;
75645 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75646 index 00bdb1d..6725a48 100644
75647 --- a/net/netfilter/ipvs/ip_vs_core.c
75648 +++ b/net/netfilter/ipvs/ip_vs_core.c
75649 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75650 ret = cp->packet_xmit(skb, cp, pd->pp);
75651 /* do not touch skb anymore */
75652
75653 - atomic_inc(&cp->in_pkts);
75654 + atomic_inc_unchecked(&cp->in_pkts);
75655 ip_vs_conn_put(cp);
75656 return ret;
75657 }
75658 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75659 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75660 pkts = sysctl_sync_threshold(ipvs);
75661 else
75662 - pkts = atomic_add_return(1, &cp->in_pkts);
75663 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75664
75665 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75666 cp->protocol == IPPROTO_SCTP) {
75667 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75668 index f558998..9cdff60 100644
75669 --- a/net/netfilter/ipvs/ip_vs_ctl.c
75670 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
75671 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75672 ip_vs_rs_hash(ipvs, dest);
75673 write_unlock_bh(&ipvs->rs_lock);
75674 }
75675 - atomic_set(&dest->conn_flags, conn_flags);
75676 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
75677
75678 /* bind the service */
75679 if (!dest->svc) {
75680 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75681 " %-7s %-6d %-10d %-10d\n",
75682 &dest->addr.in6,
75683 ntohs(dest->port),
75684 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75685 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75686 atomic_read(&dest->weight),
75687 atomic_read(&dest->activeconns),
75688 atomic_read(&dest->inactconns));
75689 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75690 "%-7s %-6d %-10d %-10d\n",
75691 ntohl(dest->addr.ip),
75692 ntohs(dest->port),
75693 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75694 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75695 atomic_read(&dest->weight),
75696 atomic_read(&dest->activeconns),
75697 atomic_read(&dest->inactconns));
75698 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
75699
75700 entry.addr = dest->addr.ip;
75701 entry.port = dest->port;
75702 - entry.conn_flags = atomic_read(&dest->conn_flags);
75703 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
75704 entry.weight = atomic_read(&dest->weight);
75705 entry.u_threshold = dest->u_threshold;
75706 entry.l_threshold = dest->l_threshold;
75707 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
75708 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
75709
75710 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
75711 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75712 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75713 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
75714 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
75715 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
75716 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
75717 index 8a0d6d6..90ec197 100644
75718 --- a/net/netfilter/ipvs/ip_vs_sync.c
75719 +++ b/net/netfilter/ipvs/ip_vs_sync.c
75720 @@ -649,7 +649,7 @@ control:
75721 * i.e only increment in_pkts for Templates.
75722 */
75723 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
75724 - int pkts = atomic_add_return(1, &cp->in_pkts);
75725 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75726
75727 if (pkts % sysctl_sync_period(ipvs) != 1)
75728 return;
75729 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
75730
75731 if (opt)
75732 memcpy(&cp->in_seq, opt, sizeof(*opt));
75733 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75734 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75735 cp->state = state;
75736 cp->old_state = cp->state;
75737 /*
75738 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
75739 index 7fd66de..e6fb361 100644
75740 --- a/net/netfilter/ipvs/ip_vs_xmit.c
75741 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
75742 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
75743 else
75744 rc = NF_ACCEPT;
75745 /* do not touch skb anymore */
75746 - atomic_inc(&cp->in_pkts);
75747 + atomic_inc_unchecked(&cp->in_pkts);
75748 goto out;
75749 }
75750
75751 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
75752 else
75753 rc = NF_ACCEPT;
75754 /* do not touch skb anymore */
75755 - atomic_inc(&cp->in_pkts);
75756 + atomic_inc_unchecked(&cp->in_pkts);
75757 goto out;
75758 }
75759
75760 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
75761 index 66b2c54..c7884e3 100644
75762 --- a/net/netfilter/nfnetlink_log.c
75763 +++ b/net/netfilter/nfnetlink_log.c
75764 @@ -70,7 +70,7 @@ struct nfulnl_instance {
75765 };
75766
75767 static DEFINE_SPINLOCK(instances_lock);
75768 -static atomic_t global_seq;
75769 +static atomic_unchecked_t global_seq;
75770
75771 #define INSTANCE_BUCKETS 16
75772 static struct hlist_head instance_table[INSTANCE_BUCKETS];
75773 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
75774 /* global sequence number */
75775 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
75776 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
75777 - htonl(atomic_inc_return(&global_seq)));
75778 + htonl(atomic_inc_return_unchecked(&global_seq)));
75779
75780 if (data_len) {
75781 struct nlattr *nla;
75782 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
75783 new file mode 100644
75784 index 0000000..6905327
75785 --- /dev/null
75786 +++ b/net/netfilter/xt_gradm.c
75787 @@ -0,0 +1,51 @@
75788 +/*
75789 + * gradm match for netfilter
75790 + * Copyright © Zbigniew Krzystolik, 2010
75791 + *
75792 + * This program is free software; you can redistribute it and/or modify
75793 + * it under the terms of the GNU General Public License; either version
75794 + * 2 or 3 as published by the Free Software Foundation.
75795 + */
75796 +#include <linux/module.h>
75797 +#include <linux/moduleparam.h>
75798 +#include <linux/skbuff.h>
75799 +#include <linux/netfilter/x_tables.h>
75800 +#include <linux/grsecurity.h>
75801 +#include <linux/netfilter/xt_gradm.h>
75802 +
75803 +static bool
75804 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
75805 +{
75806 + const struct xt_gradm_mtinfo *info = par->matchinfo;
75807 + bool retval = false;
75808 + if (gr_acl_is_enabled())
75809 + retval = true;
75810 + return retval ^ info->invflags;
75811 +}
75812 +
75813 +static struct xt_match gradm_mt_reg __read_mostly = {
75814 + .name = "gradm",
75815 + .revision = 0,
75816 + .family = NFPROTO_UNSPEC,
75817 + .match = gradm_mt,
75818 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
75819 + .me = THIS_MODULE,
75820 +};
75821 +
75822 +static int __init gradm_mt_init(void)
75823 +{
75824 + return xt_register_match(&gradm_mt_reg);
75825 +}
75826 +
75827 +static void __exit gradm_mt_exit(void)
75828 +{
75829 + xt_unregister_match(&gradm_mt_reg);
75830 +}
75831 +
75832 +module_init(gradm_mt_init);
75833 +module_exit(gradm_mt_exit);
75834 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
75835 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
75836 +MODULE_LICENSE("GPL");
75837 +MODULE_ALIAS("ipt_gradm");
75838 +MODULE_ALIAS("ip6t_gradm");
75839 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
75840 index 4fe4fb4..87a89e5 100644
75841 --- a/net/netfilter/xt_statistic.c
75842 +++ b/net/netfilter/xt_statistic.c
75843 @@ -19,7 +19,7 @@
75844 #include <linux/module.h>
75845
75846 struct xt_statistic_priv {
75847 - atomic_t count;
75848 + atomic_unchecked_t count;
75849 } ____cacheline_aligned_in_smp;
75850
75851 MODULE_LICENSE("GPL");
75852 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
75853 break;
75854 case XT_STATISTIC_MODE_NTH:
75855 do {
75856 - oval = atomic_read(&info->master->count);
75857 + oval = atomic_read_unchecked(&info->master->count);
75858 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
75859 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
75860 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
75861 if (nval == 0)
75862 ret = !ret;
75863 break;
75864 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
75865 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
75866 if (info->master == NULL)
75867 return -ENOMEM;
75868 - atomic_set(&info->master->count, info->u.nth.count);
75869 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
75870
75871 return 0;
75872 }
75873 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
75874 index faa48f7..65f7f54 100644
75875 --- a/net/netlink/af_netlink.c
75876 +++ b/net/netlink/af_netlink.c
75877 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
75878 sk->sk_error_report(sk);
75879 }
75880 }
75881 - atomic_inc(&sk->sk_drops);
75882 + atomic_inc_unchecked(&sk->sk_drops);
75883 }
75884
75885 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
75886 @@ -2013,7 +2013,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
75887 sk_wmem_alloc_get(s),
75888 nlk->cb,
75889 atomic_read(&s->sk_refcnt),
75890 - atomic_read(&s->sk_drops),
75891 + atomic_read_unchecked(&s->sk_drops),
75892 sock_i_ino(s)
75893 );
75894
75895 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
75896 index 06592d8..64860f6 100644
75897 --- a/net/netrom/af_netrom.c
75898 +++ b/net/netrom/af_netrom.c
75899 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75900 struct sock *sk = sock->sk;
75901 struct nr_sock *nr = nr_sk(sk);
75902
75903 + memset(sax, 0, sizeof(*sax));
75904 lock_sock(sk);
75905 if (peer != 0) {
75906 if (sk->sk_state != TCP_ESTABLISHED) {
75907 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75908 *uaddr_len = sizeof(struct full_sockaddr_ax25);
75909 } else {
75910 sax->fsa_ax25.sax25_family = AF_NETROM;
75911 - sax->fsa_ax25.sax25_ndigis = 0;
75912 sax->fsa_ax25.sax25_call = nr->source_addr;
75913 *uaddr_len = sizeof(struct sockaddr_ax25);
75914 }
75915 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
75916 index 4f2c0df..f0ff342 100644
75917 --- a/net/packet/af_packet.c
75918 +++ b/net/packet/af_packet.c
75919 @@ -1687,7 +1687,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75920
75921 spin_lock(&sk->sk_receive_queue.lock);
75922 po->stats.tp_packets++;
75923 - skb->dropcount = atomic_read(&sk->sk_drops);
75924 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75925 __skb_queue_tail(&sk->sk_receive_queue, skb);
75926 spin_unlock(&sk->sk_receive_queue.lock);
75927 sk->sk_data_ready(sk, skb->len);
75928 @@ -1696,7 +1696,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75929 drop_n_acct:
75930 spin_lock(&sk->sk_receive_queue.lock);
75931 po->stats.tp_drops++;
75932 - atomic_inc(&sk->sk_drops);
75933 + atomic_inc_unchecked(&sk->sk_drops);
75934 spin_unlock(&sk->sk_receive_queue.lock);
75935
75936 drop_n_restore:
75937 @@ -3294,7 +3294,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75938 case PACKET_HDRLEN:
75939 if (len > sizeof(int))
75940 len = sizeof(int);
75941 - if (copy_from_user(&val, optval, len))
75942 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
75943 return -EFAULT;
75944 switch (val) {
75945 case TPACKET_V1:
75946 @@ -3344,7 +3344,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75947
75948 if (put_user(len, optlen))
75949 return -EFAULT;
75950 - if (copy_to_user(optval, data, len))
75951 + if (len > sizeof(st) || copy_to_user(optval, data, len))
75952 return -EFAULT;
75953 return 0;
75954 }
75955 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
75956 index d65f699..05aa6ce 100644
75957 --- a/net/phonet/af_phonet.c
75958 +++ b/net/phonet/af_phonet.c
75959 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
75960 {
75961 struct phonet_protocol *pp;
75962
75963 - if (protocol >= PHONET_NPROTO)
75964 + if (protocol < 0 || protocol >= PHONET_NPROTO)
75965 return NULL;
75966
75967 rcu_read_lock();
75968 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
75969 {
75970 int err = 0;
75971
75972 - if (protocol >= PHONET_NPROTO)
75973 + if (protocol < 0 || protocol >= PHONET_NPROTO)
75974 return -EINVAL;
75975
75976 err = proto_register(pp->prot, 1);
75977 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
75978 index 9726fe6..fc4e3a4 100644
75979 --- a/net/phonet/pep.c
75980 +++ b/net/phonet/pep.c
75981 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
75982
75983 case PNS_PEP_CTRL_REQ:
75984 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
75985 - atomic_inc(&sk->sk_drops);
75986 + atomic_inc_unchecked(&sk->sk_drops);
75987 break;
75988 }
75989 __skb_pull(skb, 4);
75990 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
75991 }
75992
75993 if (pn->rx_credits == 0) {
75994 - atomic_inc(&sk->sk_drops);
75995 + atomic_inc_unchecked(&sk->sk_drops);
75996 err = -ENOBUFS;
75997 break;
75998 }
75999 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
76000 }
76001
76002 if (pn->rx_credits == 0) {
76003 - atomic_inc(&sk->sk_drops);
76004 + atomic_inc_unchecked(&sk->sk_drops);
76005 err = NET_RX_DROP;
76006 break;
76007 }
76008 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
76009 index 4c7eff3..59c727f 100644
76010 --- a/net/phonet/socket.c
76011 +++ b/net/phonet/socket.c
76012 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
76013 pn->resource, sk->sk_state,
76014 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
76015 sock_i_uid(sk), sock_i_ino(sk),
76016 - atomic_read(&sk->sk_refcnt), sk,
76017 - atomic_read(&sk->sk_drops), &len);
76018 + atomic_read(&sk->sk_refcnt),
76019 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76020 + NULL,
76021 +#else
76022 + sk,
76023 +#endif
76024 + atomic_read_unchecked(&sk->sk_drops), &len);
76025 }
76026 seq_printf(seq, "%*s\n", 127 - len, "");
76027 return 0;
76028 diff --git a/net/rds/cong.c b/net/rds/cong.c
76029 index e5b65ac..f3b6fb7 100644
76030 --- a/net/rds/cong.c
76031 +++ b/net/rds/cong.c
76032 @@ -78,7 +78,7 @@
76033 * finds that the saved generation number is smaller than the global generation
76034 * number, it wakes up the process.
76035 */
76036 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
76037 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
76038
76039 /*
76040 * Congestion monitoring
76041 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
76042 rdsdebug("waking map %p for %pI4\n",
76043 map, &map->m_addr);
76044 rds_stats_inc(s_cong_update_received);
76045 - atomic_inc(&rds_cong_generation);
76046 + atomic_inc_unchecked(&rds_cong_generation);
76047 if (waitqueue_active(&map->m_waitq))
76048 wake_up(&map->m_waitq);
76049 if (waitqueue_active(&rds_poll_waitq))
76050 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
76051
76052 int rds_cong_updated_since(unsigned long *recent)
76053 {
76054 - unsigned long gen = atomic_read(&rds_cong_generation);
76055 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
76056
76057 if (likely(*recent == gen))
76058 return 0;
76059 diff --git a/net/rds/ib.h b/net/rds/ib.h
76060 index edfaaaf..8c89879 100644
76061 --- a/net/rds/ib.h
76062 +++ b/net/rds/ib.h
76063 @@ -128,7 +128,7 @@ struct rds_ib_connection {
76064 /* sending acks */
76065 unsigned long i_ack_flags;
76066 #ifdef KERNEL_HAS_ATOMIC64
76067 - atomic64_t i_ack_next; /* next ACK to send */
76068 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76069 #else
76070 spinlock_t i_ack_lock; /* protect i_ack_next */
76071 u64 i_ack_next; /* next ACK to send */
76072 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
76073 index a1e1162..265e129 100644
76074 --- a/net/rds/ib_cm.c
76075 +++ b/net/rds/ib_cm.c
76076 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
76077 /* Clear the ACK state */
76078 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76079 #ifdef KERNEL_HAS_ATOMIC64
76080 - atomic64_set(&ic->i_ack_next, 0);
76081 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76082 #else
76083 ic->i_ack_next = 0;
76084 #endif
76085 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
76086 index 8d19491..05a3e65 100644
76087 --- a/net/rds/ib_recv.c
76088 +++ b/net/rds/ib_recv.c
76089 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76090 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
76091 int ack_required)
76092 {
76093 - atomic64_set(&ic->i_ack_next, seq);
76094 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76095 if (ack_required) {
76096 smp_mb__before_clear_bit();
76097 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76098 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76099 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76100 smp_mb__after_clear_bit();
76101
76102 - return atomic64_read(&ic->i_ack_next);
76103 + return atomic64_read_unchecked(&ic->i_ack_next);
76104 }
76105 #endif
76106
76107 diff --git a/net/rds/iw.h b/net/rds/iw.h
76108 index 04ce3b1..48119a6 100644
76109 --- a/net/rds/iw.h
76110 +++ b/net/rds/iw.h
76111 @@ -134,7 +134,7 @@ struct rds_iw_connection {
76112 /* sending acks */
76113 unsigned long i_ack_flags;
76114 #ifdef KERNEL_HAS_ATOMIC64
76115 - atomic64_t i_ack_next; /* next ACK to send */
76116 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76117 #else
76118 spinlock_t i_ack_lock; /* protect i_ack_next */
76119 u64 i_ack_next; /* next ACK to send */
76120 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
76121 index a91e1db..cf3053f 100644
76122 --- a/net/rds/iw_cm.c
76123 +++ b/net/rds/iw_cm.c
76124 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
76125 /* Clear the ACK state */
76126 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76127 #ifdef KERNEL_HAS_ATOMIC64
76128 - atomic64_set(&ic->i_ack_next, 0);
76129 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76130 #else
76131 ic->i_ack_next = 0;
76132 #endif
76133 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
76134 index 4503335..db566b4 100644
76135 --- a/net/rds/iw_recv.c
76136 +++ b/net/rds/iw_recv.c
76137 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76138 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
76139 int ack_required)
76140 {
76141 - atomic64_set(&ic->i_ack_next, seq);
76142 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76143 if (ack_required) {
76144 smp_mb__before_clear_bit();
76145 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76146 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76147 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76148 smp_mb__after_clear_bit();
76149
76150 - return atomic64_read(&ic->i_ack_next);
76151 + return atomic64_read_unchecked(&ic->i_ack_next);
76152 }
76153 #endif
76154
76155 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
76156 index edac9ef..16bcb98 100644
76157 --- a/net/rds/tcp.c
76158 +++ b/net/rds/tcp.c
76159 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
76160 int val = 1;
76161
76162 set_fs(KERNEL_DS);
76163 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
76164 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
76165 sizeof(val));
76166 set_fs(oldfs);
76167 }
76168 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
76169 index 1b4fd68..2234175 100644
76170 --- a/net/rds/tcp_send.c
76171 +++ b/net/rds/tcp_send.c
76172 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
76173
76174 oldfs = get_fs();
76175 set_fs(KERNEL_DS);
76176 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
76177 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
76178 sizeof(val));
76179 set_fs(oldfs);
76180 }
76181 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
76182 index 74c064c..fdec26f 100644
76183 --- a/net/rxrpc/af_rxrpc.c
76184 +++ b/net/rxrpc/af_rxrpc.c
76185 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
76186 __be32 rxrpc_epoch;
76187
76188 /* current debugging ID */
76189 -atomic_t rxrpc_debug_id;
76190 +atomic_unchecked_t rxrpc_debug_id;
76191
76192 /* count of skbs currently in use */
76193 atomic_t rxrpc_n_skbs;
76194 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
76195 index c3126e8..21facc7 100644
76196 --- a/net/rxrpc/ar-ack.c
76197 +++ b/net/rxrpc/ar-ack.c
76198 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76199
76200 _enter("{%d,%d,%d,%d},",
76201 call->acks_hard, call->acks_unacked,
76202 - atomic_read(&call->sequence),
76203 + atomic_read_unchecked(&call->sequence),
76204 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
76205
76206 stop = 0;
76207 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76208
76209 /* each Tx packet has a new serial number */
76210 sp->hdr.serial =
76211 - htonl(atomic_inc_return(&call->conn->serial));
76212 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
76213
76214 hdr = (struct rxrpc_header *) txb->head;
76215 hdr->serial = sp->hdr.serial;
76216 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
76217 */
76218 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
76219 {
76220 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
76221 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
76222 }
76223
76224 /*
76225 @@ -629,7 +629,7 @@ process_further:
76226
76227 latest = ntohl(sp->hdr.serial);
76228 hard = ntohl(ack.firstPacket);
76229 - tx = atomic_read(&call->sequence);
76230 + tx = atomic_read_unchecked(&call->sequence);
76231
76232 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76233 latest,
76234 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
76235 goto maybe_reschedule;
76236
76237 send_ACK_with_skew:
76238 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
76239 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
76240 ntohl(ack.serial));
76241 send_ACK:
76242 mtu = call->conn->trans->peer->if_mtu;
76243 @@ -1173,7 +1173,7 @@ send_ACK:
76244 ackinfo.rxMTU = htonl(5692);
76245 ackinfo.jumbo_max = htonl(4);
76246
76247 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76248 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76249 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76250 ntohl(hdr.serial),
76251 ntohs(ack.maxSkew),
76252 @@ -1191,7 +1191,7 @@ send_ACK:
76253 send_message:
76254 _debug("send message");
76255
76256 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76257 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76258 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
76259 send_message_2:
76260
76261 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
76262 index bf656c2..48f9d27 100644
76263 --- a/net/rxrpc/ar-call.c
76264 +++ b/net/rxrpc/ar-call.c
76265 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
76266 spin_lock_init(&call->lock);
76267 rwlock_init(&call->state_lock);
76268 atomic_set(&call->usage, 1);
76269 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
76270 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76271 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
76272
76273 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
76274 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
76275 index 4106ca9..a338d7a 100644
76276 --- a/net/rxrpc/ar-connection.c
76277 +++ b/net/rxrpc/ar-connection.c
76278 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
76279 rwlock_init(&conn->lock);
76280 spin_lock_init(&conn->state_lock);
76281 atomic_set(&conn->usage, 1);
76282 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
76283 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76284 conn->avail_calls = RXRPC_MAXCALLS;
76285 conn->size_align = 4;
76286 conn->header_size = sizeof(struct rxrpc_header);
76287 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
76288 index e7ed43a..6afa140 100644
76289 --- a/net/rxrpc/ar-connevent.c
76290 +++ b/net/rxrpc/ar-connevent.c
76291 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
76292
76293 len = iov[0].iov_len + iov[1].iov_len;
76294
76295 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76296 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76297 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
76298
76299 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76300 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
76301 index 1a2b0633..e8d1382 100644
76302 --- a/net/rxrpc/ar-input.c
76303 +++ b/net/rxrpc/ar-input.c
76304 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
76305 /* track the latest serial number on this connection for ACK packet
76306 * information */
76307 serial = ntohl(sp->hdr.serial);
76308 - hi_serial = atomic_read(&call->conn->hi_serial);
76309 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
76310 while (serial > hi_serial)
76311 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
76312 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
76313 serial);
76314
76315 /* request ACK generation for any ACK or DATA packet that requests
76316 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
76317 index 8e22bd3..f66d1c0 100644
76318 --- a/net/rxrpc/ar-internal.h
76319 +++ b/net/rxrpc/ar-internal.h
76320 @@ -272,8 +272,8 @@ struct rxrpc_connection {
76321 int error; /* error code for local abort */
76322 int debug_id; /* debug ID for printks */
76323 unsigned call_counter; /* call ID counter */
76324 - atomic_t serial; /* packet serial number counter */
76325 - atomic_t hi_serial; /* highest serial number received */
76326 + atomic_unchecked_t serial; /* packet serial number counter */
76327 + atomic_unchecked_t hi_serial; /* highest serial number received */
76328 u8 avail_calls; /* number of calls available */
76329 u8 size_align; /* data size alignment (for security) */
76330 u8 header_size; /* rxrpc + security header size */
76331 @@ -346,7 +346,7 @@ struct rxrpc_call {
76332 spinlock_t lock;
76333 rwlock_t state_lock; /* lock for state transition */
76334 atomic_t usage;
76335 - atomic_t sequence; /* Tx data packet sequence counter */
76336 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
76337 u32 abort_code; /* local/remote abort code */
76338 enum { /* current state of call */
76339 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
76340 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
76341 */
76342 extern atomic_t rxrpc_n_skbs;
76343 extern __be32 rxrpc_epoch;
76344 -extern atomic_t rxrpc_debug_id;
76345 +extern atomic_unchecked_t rxrpc_debug_id;
76346 extern struct workqueue_struct *rxrpc_workqueue;
76347
76348 /*
76349 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
76350 index 87f7135..74d3703 100644
76351 --- a/net/rxrpc/ar-local.c
76352 +++ b/net/rxrpc/ar-local.c
76353 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
76354 spin_lock_init(&local->lock);
76355 rwlock_init(&local->services_lock);
76356 atomic_set(&local->usage, 1);
76357 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
76358 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76359 memcpy(&local->srx, srx, sizeof(*srx));
76360 }
76361
76362 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
76363 index 16ae887..d24f12b 100644
76364 --- a/net/rxrpc/ar-output.c
76365 +++ b/net/rxrpc/ar-output.c
76366 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
76367 sp->hdr.cid = call->cid;
76368 sp->hdr.callNumber = call->call_id;
76369 sp->hdr.seq =
76370 - htonl(atomic_inc_return(&call->sequence));
76371 + htonl(atomic_inc_return_unchecked(&call->sequence));
76372 sp->hdr.serial =
76373 - htonl(atomic_inc_return(&conn->serial));
76374 + htonl(atomic_inc_return_unchecked(&conn->serial));
76375 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
76376 sp->hdr.userStatus = 0;
76377 sp->hdr.securityIndex = conn->security_ix;
76378 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
76379 index 2754f09..b20e38f 100644
76380 --- a/net/rxrpc/ar-peer.c
76381 +++ b/net/rxrpc/ar-peer.c
76382 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
76383 INIT_LIST_HEAD(&peer->error_targets);
76384 spin_lock_init(&peer->lock);
76385 atomic_set(&peer->usage, 1);
76386 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
76387 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76388 memcpy(&peer->srx, srx, sizeof(*srx));
76389
76390 rxrpc_assess_MTU_size(peer);
76391 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
76392 index 38047f7..9f48511 100644
76393 --- a/net/rxrpc/ar-proc.c
76394 +++ b/net/rxrpc/ar-proc.c
76395 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
76396 atomic_read(&conn->usage),
76397 rxrpc_conn_states[conn->state],
76398 key_serial(conn->key),
76399 - atomic_read(&conn->serial),
76400 - atomic_read(&conn->hi_serial));
76401 + atomic_read_unchecked(&conn->serial),
76402 + atomic_read_unchecked(&conn->hi_serial));
76403
76404 return 0;
76405 }
76406 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
76407 index 92df566..87ec1bf 100644
76408 --- a/net/rxrpc/ar-transport.c
76409 +++ b/net/rxrpc/ar-transport.c
76410 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
76411 spin_lock_init(&trans->client_lock);
76412 rwlock_init(&trans->conn_lock);
76413 atomic_set(&trans->usage, 1);
76414 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
76415 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76416
76417 if (peer->srx.transport.family == AF_INET) {
76418 switch (peer->srx.transport_type) {
76419 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
76420 index 7635107..4670276 100644
76421 --- a/net/rxrpc/rxkad.c
76422 +++ b/net/rxrpc/rxkad.c
76423 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
76424
76425 len = iov[0].iov_len + iov[1].iov_len;
76426
76427 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76428 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76429 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
76430
76431 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76432 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
76433
76434 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
76435
76436 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
76437 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76438 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
76439
76440 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
76441 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
76442 index 1e2eee8..ce3967e 100644
76443 --- a/net/sctp/proc.c
76444 +++ b/net/sctp/proc.c
76445 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
76446 seq_printf(seq,
76447 "%8pK %8pK %-3d %-3d %-2d %-4d "
76448 "%4d %8d %8d %7d %5lu %-5d %5d ",
76449 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
76450 + assoc, sk,
76451 + sctp_sk(sk)->type, sk->sk_state,
76452 assoc->state, hash,
76453 assoc->assoc_id,
76454 assoc->sndbuf_used,
76455 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
76456 index 92ba71d..9a97902 100644
76457 --- a/net/sctp/socket.c
76458 +++ b/net/sctp/socket.c
76459 @@ -4569,7 +4569,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
76460 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
76461 if (space_left < addrlen)
76462 return -ENOMEM;
76463 - if (copy_to_user(to, &temp, addrlen))
76464 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
76465 return -EFAULT;
76466 to += addrlen;
76467 cnt++;
76468 diff --git a/net/socket.c b/net/socket.c
76469 index 851edcd..b786851 100644
76470 --- a/net/socket.c
76471 +++ b/net/socket.c
76472 @@ -88,6 +88,7 @@
76473 #include <linux/nsproxy.h>
76474 #include <linux/magic.h>
76475 #include <linux/slab.h>
76476 +#include <linux/in.h>
76477
76478 #include <asm/uaccess.h>
76479 #include <asm/unistd.h>
76480 @@ -105,6 +106,8 @@
76481 #include <linux/sockios.h>
76482 #include <linux/atalk.h>
76483
76484 +#include <linux/grsock.h>
76485 +
76486 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
76487 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
76488 unsigned long nr_segs, loff_t pos);
76489 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
76490 &sockfs_dentry_operations, SOCKFS_MAGIC);
76491 }
76492
76493 -static struct vfsmount *sock_mnt __read_mostly;
76494 +struct vfsmount *sock_mnt __read_mostly;
76495
76496 static struct file_system_type sock_fs_type = {
76497 .name = "sockfs",
76498 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
76499 return -EAFNOSUPPORT;
76500 if (type < 0 || type >= SOCK_MAX)
76501 return -EINVAL;
76502 + if (protocol < 0)
76503 + return -EINVAL;
76504
76505 /* Compatibility.
76506
76507 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
76508 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
76509 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
76510
76511 + if(!gr_search_socket(family, type, protocol)) {
76512 + retval = -EACCES;
76513 + goto out;
76514 + }
76515 +
76516 + if (gr_handle_sock_all(family, type, protocol)) {
76517 + retval = -EACCES;
76518 + goto out;
76519 + }
76520 +
76521 retval = sock_create(family, type, protocol, &sock);
76522 if (retval < 0)
76523 goto out;
76524 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76525 if (sock) {
76526 err = move_addr_to_kernel(umyaddr, addrlen, &address);
76527 if (err >= 0) {
76528 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
76529 + err = -EACCES;
76530 + goto error;
76531 + }
76532 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
76533 + if (err)
76534 + goto error;
76535 +
76536 err = security_socket_bind(sock,
76537 (struct sockaddr *)&address,
76538 addrlen);
76539 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76540 (struct sockaddr *)
76541 &address, addrlen);
76542 }
76543 +error:
76544 fput_light(sock->file, fput_needed);
76545 }
76546 return err;
76547 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
76548 if ((unsigned)backlog > somaxconn)
76549 backlog = somaxconn;
76550
76551 + if (gr_handle_sock_server_other(sock->sk)) {
76552 + err = -EPERM;
76553 + goto error;
76554 + }
76555 +
76556 + err = gr_search_listen(sock);
76557 + if (err)
76558 + goto error;
76559 +
76560 err = security_socket_listen(sock, backlog);
76561 if (!err)
76562 err = sock->ops->listen(sock, backlog);
76563
76564 +error:
76565 fput_light(sock->file, fput_needed);
76566 }
76567 return err;
76568 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76569 newsock->type = sock->type;
76570 newsock->ops = sock->ops;
76571
76572 + if (gr_handle_sock_server_other(sock->sk)) {
76573 + err = -EPERM;
76574 + sock_release(newsock);
76575 + goto out_put;
76576 + }
76577 +
76578 + err = gr_search_accept(sock);
76579 + if (err) {
76580 + sock_release(newsock);
76581 + goto out_put;
76582 + }
76583 +
76584 /*
76585 * We don't need try_module_get here, as the listening socket (sock)
76586 * has the protocol module (sock->ops->owner) held.
76587 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76588 fd_install(newfd, newfile);
76589 err = newfd;
76590
76591 + gr_attach_curr_ip(newsock->sk);
76592 +
76593 out_put:
76594 fput_light(sock->file, fput_needed);
76595 out:
76596 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76597 int, addrlen)
76598 {
76599 struct socket *sock;
76600 + struct sockaddr *sck;
76601 struct sockaddr_storage address;
76602 int err, fput_needed;
76603
76604 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76605 if (err < 0)
76606 goto out_put;
76607
76608 + sck = (struct sockaddr *)&address;
76609 +
76610 + if (gr_handle_sock_client(sck)) {
76611 + err = -EACCES;
76612 + goto out_put;
76613 + }
76614 +
76615 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
76616 + if (err)
76617 + goto out_put;
76618 +
76619 err =
76620 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
76621 if (err)
76622 @@ -1966,7 +2026,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
76623 * checking falls down on this.
76624 */
76625 if (copy_from_user(ctl_buf,
76626 - (void __user __force *)msg_sys->msg_control,
76627 + (void __force_user *)msg_sys->msg_control,
76628 ctl_len))
76629 goto out_freectl;
76630 msg_sys->msg_control = ctl_buf;
76631 @@ -2136,7 +2196,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
76632 * kernel msghdr to use the kernel address space)
76633 */
76634
76635 - uaddr = (__force void __user *)msg_sys->msg_name;
76636 + uaddr = (void __force_user *)msg_sys->msg_name;
76637 uaddr_len = COMPAT_NAMELEN(msg);
76638 if (MSG_CMSG_COMPAT & flags) {
76639 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
76640 @@ -2758,7 +2818,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76641 }
76642
76643 ifr = compat_alloc_user_space(buf_size);
76644 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
76645 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
76646
76647 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
76648 return -EFAULT;
76649 @@ -2782,12 +2842,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76650 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
76651
76652 if (copy_in_user(rxnfc, compat_rxnfc,
76653 - (void *)(&rxnfc->fs.m_ext + 1) -
76654 - (void *)rxnfc) ||
76655 + (void __user *)(&rxnfc->fs.m_ext + 1) -
76656 + (void __user *)rxnfc) ||
76657 copy_in_user(&rxnfc->fs.ring_cookie,
76658 &compat_rxnfc->fs.ring_cookie,
76659 - (void *)(&rxnfc->fs.location + 1) -
76660 - (void *)&rxnfc->fs.ring_cookie) ||
76661 + (void __user *)(&rxnfc->fs.location + 1) -
76662 + (void __user *)&rxnfc->fs.ring_cookie) ||
76663 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
76664 sizeof(rxnfc->rule_cnt)))
76665 return -EFAULT;
76666 @@ -2799,12 +2859,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76667
76668 if (convert_out) {
76669 if (copy_in_user(compat_rxnfc, rxnfc,
76670 - (const void *)(&rxnfc->fs.m_ext + 1) -
76671 - (const void *)rxnfc) ||
76672 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
76673 + (const void __user *)rxnfc) ||
76674 copy_in_user(&compat_rxnfc->fs.ring_cookie,
76675 &rxnfc->fs.ring_cookie,
76676 - (const void *)(&rxnfc->fs.location + 1) -
76677 - (const void *)&rxnfc->fs.ring_cookie) ||
76678 + (const void __user *)(&rxnfc->fs.location + 1) -
76679 + (const void __user *)&rxnfc->fs.ring_cookie) ||
76680 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
76681 sizeof(rxnfc->rule_cnt)))
76682 return -EFAULT;
76683 @@ -2874,7 +2934,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
76684 old_fs = get_fs();
76685 set_fs(KERNEL_DS);
76686 err = dev_ioctl(net, cmd,
76687 - (struct ifreq __user __force *) &kifr);
76688 + (struct ifreq __force_user *) &kifr);
76689 set_fs(old_fs);
76690
76691 return err;
76692 @@ -2983,7 +3043,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
76693
76694 old_fs = get_fs();
76695 set_fs(KERNEL_DS);
76696 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
76697 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
76698 set_fs(old_fs);
76699
76700 if (cmd == SIOCGIFMAP && !err) {
76701 @@ -3088,7 +3148,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
76702 ret |= __get_user(rtdev, &(ur4->rt_dev));
76703 if (rtdev) {
76704 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
76705 - r4.rt_dev = (char __user __force *)devname;
76706 + r4.rt_dev = (char __force_user *)devname;
76707 devname[15] = 0;
76708 } else
76709 r4.rt_dev = NULL;
76710 @@ -3314,8 +3374,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
76711 int __user *uoptlen;
76712 int err;
76713
76714 - uoptval = (char __user __force *) optval;
76715 - uoptlen = (int __user __force *) optlen;
76716 + uoptval = (char __force_user *) optval;
76717 + uoptlen = (int __force_user *) optlen;
76718
76719 set_fs(KERNEL_DS);
76720 if (level == SOL_SOCKET)
76721 @@ -3335,7 +3395,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
76722 char __user *uoptval;
76723 int err;
76724
76725 - uoptval = (char __user __force *) optval;
76726 + uoptval = (char __force_user *) optval;
76727
76728 set_fs(KERNEL_DS);
76729 if (level == SOL_SOCKET)
76730 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
76731 index 994cfea..5343b6b 100644
76732 --- a/net/sunrpc/sched.c
76733 +++ b/net/sunrpc/sched.c
76734 @@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
76735 #ifdef RPC_DEBUG
76736 static void rpc_task_set_debuginfo(struct rpc_task *task)
76737 {
76738 - static atomic_t rpc_pid;
76739 + static atomic_unchecked_t rpc_pid;
76740
76741 - task->tk_pid = atomic_inc_return(&rpc_pid);
76742 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
76743 }
76744 #else
76745 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
76746 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
76747 index 8343737..677025e 100644
76748 --- a/net/sunrpc/xprtrdma/svc_rdma.c
76749 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
76750 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
76751 static unsigned int min_max_inline = 4096;
76752 static unsigned int max_max_inline = 65536;
76753
76754 -atomic_t rdma_stat_recv;
76755 -atomic_t rdma_stat_read;
76756 -atomic_t rdma_stat_write;
76757 -atomic_t rdma_stat_sq_starve;
76758 -atomic_t rdma_stat_rq_starve;
76759 -atomic_t rdma_stat_rq_poll;
76760 -atomic_t rdma_stat_rq_prod;
76761 -atomic_t rdma_stat_sq_poll;
76762 -atomic_t rdma_stat_sq_prod;
76763 +atomic_unchecked_t rdma_stat_recv;
76764 +atomic_unchecked_t rdma_stat_read;
76765 +atomic_unchecked_t rdma_stat_write;
76766 +atomic_unchecked_t rdma_stat_sq_starve;
76767 +atomic_unchecked_t rdma_stat_rq_starve;
76768 +atomic_unchecked_t rdma_stat_rq_poll;
76769 +atomic_unchecked_t rdma_stat_rq_prod;
76770 +atomic_unchecked_t rdma_stat_sq_poll;
76771 +atomic_unchecked_t rdma_stat_sq_prod;
76772
76773 /* Temporary NFS request map and context caches */
76774 struct kmem_cache *svc_rdma_map_cachep;
76775 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
76776 len -= *ppos;
76777 if (len > *lenp)
76778 len = *lenp;
76779 - if (len && copy_to_user(buffer, str_buf, len))
76780 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
76781 return -EFAULT;
76782 *lenp = len;
76783 *ppos += len;
76784 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
76785 {
76786 .procname = "rdma_stat_read",
76787 .data = &rdma_stat_read,
76788 - .maxlen = sizeof(atomic_t),
76789 + .maxlen = sizeof(atomic_unchecked_t),
76790 .mode = 0644,
76791 .proc_handler = read_reset_stat,
76792 },
76793 {
76794 .procname = "rdma_stat_recv",
76795 .data = &rdma_stat_recv,
76796 - .maxlen = sizeof(atomic_t),
76797 + .maxlen = sizeof(atomic_unchecked_t),
76798 .mode = 0644,
76799 .proc_handler = read_reset_stat,
76800 },
76801 {
76802 .procname = "rdma_stat_write",
76803 .data = &rdma_stat_write,
76804 - .maxlen = sizeof(atomic_t),
76805 + .maxlen = sizeof(atomic_unchecked_t),
76806 .mode = 0644,
76807 .proc_handler = read_reset_stat,
76808 },
76809 {
76810 .procname = "rdma_stat_sq_starve",
76811 .data = &rdma_stat_sq_starve,
76812 - .maxlen = sizeof(atomic_t),
76813 + .maxlen = sizeof(atomic_unchecked_t),
76814 .mode = 0644,
76815 .proc_handler = read_reset_stat,
76816 },
76817 {
76818 .procname = "rdma_stat_rq_starve",
76819 .data = &rdma_stat_rq_starve,
76820 - .maxlen = sizeof(atomic_t),
76821 + .maxlen = sizeof(atomic_unchecked_t),
76822 .mode = 0644,
76823 .proc_handler = read_reset_stat,
76824 },
76825 {
76826 .procname = "rdma_stat_rq_poll",
76827 .data = &rdma_stat_rq_poll,
76828 - .maxlen = sizeof(atomic_t),
76829 + .maxlen = sizeof(atomic_unchecked_t),
76830 .mode = 0644,
76831 .proc_handler = read_reset_stat,
76832 },
76833 {
76834 .procname = "rdma_stat_rq_prod",
76835 .data = &rdma_stat_rq_prod,
76836 - .maxlen = sizeof(atomic_t),
76837 + .maxlen = sizeof(atomic_unchecked_t),
76838 .mode = 0644,
76839 .proc_handler = read_reset_stat,
76840 },
76841 {
76842 .procname = "rdma_stat_sq_poll",
76843 .data = &rdma_stat_sq_poll,
76844 - .maxlen = sizeof(atomic_t),
76845 + .maxlen = sizeof(atomic_unchecked_t),
76846 .mode = 0644,
76847 .proc_handler = read_reset_stat,
76848 },
76849 {
76850 .procname = "rdma_stat_sq_prod",
76851 .data = &rdma_stat_sq_prod,
76852 - .maxlen = sizeof(atomic_t),
76853 + .maxlen = sizeof(atomic_unchecked_t),
76854 .mode = 0644,
76855 .proc_handler = read_reset_stat,
76856 },
76857 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76858 index 41cb63b..c4a1489 100644
76859 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76860 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76861 @@ -501,7 +501,7 @@ next_sge:
76862 svc_rdma_put_context(ctxt, 0);
76863 goto out;
76864 }
76865 - atomic_inc(&rdma_stat_read);
76866 + atomic_inc_unchecked(&rdma_stat_read);
76867
76868 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
76869 chl_map->ch[ch_no].count -= read_wr.num_sge;
76870 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76871 dto_q);
76872 list_del_init(&ctxt->dto_q);
76873 } else {
76874 - atomic_inc(&rdma_stat_rq_starve);
76875 + atomic_inc_unchecked(&rdma_stat_rq_starve);
76876 clear_bit(XPT_DATA, &xprt->xpt_flags);
76877 ctxt = NULL;
76878 }
76879 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76880 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
76881 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
76882 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
76883 - atomic_inc(&rdma_stat_recv);
76884 + atomic_inc_unchecked(&rdma_stat_recv);
76885
76886 /* Build up the XDR from the receive buffers. */
76887 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
76888 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76889 index 42eb7ba..c887c45 100644
76890 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76891 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76892 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
76893 write_wr.wr.rdma.remote_addr = to;
76894
76895 /* Post It */
76896 - atomic_inc(&rdma_stat_write);
76897 + atomic_inc_unchecked(&rdma_stat_write);
76898 if (svc_rdma_send(xprt, &write_wr))
76899 goto err;
76900 return 0;
76901 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76902 index 73b428b..5f3f8f3 100644
76903 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
76904 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76905 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76906 return;
76907
76908 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
76909 - atomic_inc(&rdma_stat_rq_poll);
76910 + atomic_inc_unchecked(&rdma_stat_rq_poll);
76911
76912 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
76913 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
76914 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76915 }
76916
76917 if (ctxt)
76918 - atomic_inc(&rdma_stat_rq_prod);
76919 + atomic_inc_unchecked(&rdma_stat_rq_prod);
76920
76921 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
76922 /*
76923 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76924 return;
76925
76926 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
76927 - atomic_inc(&rdma_stat_sq_poll);
76928 + atomic_inc_unchecked(&rdma_stat_sq_poll);
76929 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
76930 if (wc.status != IB_WC_SUCCESS)
76931 /* Close the transport */
76932 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76933 }
76934
76935 if (ctxt)
76936 - atomic_inc(&rdma_stat_sq_prod);
76937 + atomic_inc_unchecked(&rdma_stat_sq_prod);
76938 }
76939
76940 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
76941 @@ -1266,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
76942 spin_lock_bh(&xprt->sc_lock);
76943 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
76944 spin_unlock_bh(&xprt->sc_lock);
76945 - atomic_inc(&rdma_stat_sq_starve);
76946 + atomic_inc_unchecked(&rdma_stat_sq_starve);
76947
76948 /* See if we can opportunistically reap SQ WR to make room */
76949 sq_cq_reap(xprt);
76950 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
76951 index c3e65ae..f512a2b 100644
76952 --- a/net/sysctl_net.c
76953 +++ b/net/sysctl_net.c
76954 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
76955 struct ctl_table *table)
76956 {
76957 /* Allow network administrator to have same access as root. */
76958 - if (capable(CAP_NET_ADMIN)) {
76959 + if (capable_nolog(CAP_NET_ADMIN)) {
76960 int mode = (table->mode >> 6) & 7;
76961 return (mode << 6) | (mode << 3) | mode;
76962 }
76963 diff --git a/net/tipc/link.c b/net/tipc/link.c
76964 index b4b9b30..5b62131 100644
76965 --- a/net/tipc/link.c
76966 +++ b/net/tipc/link.c
76967 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
76968 struct tipc_msg fragm_hdr;
76969 struct sk_buff *buf, *buf_chain, *prev;
76970 u32 fragm_crs, fragm_rest, hsz, sect_rest;
76971 - const unchar *sect_crs;
76972 + const unchar __user *sect_crs;
76973 int curr_sect;
76974 u32 fragm_no;
76975
76976 @@ -1247,7 +1247,7 @@ again:
76977
76978 if (!sect_rest) {
76979 sect_rest = msg_sect[++curr_sect].iov_len;
76980 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
76981 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
76982 }
76983
76984 if (sect_rest < fragm_rest)
76985 @@ -1266,7 +1266,7 @@ error:
76986 }
76987 } else
76988 skb_copy_to_linear_data_offset(buf, fragm_crs,
76989 - sect_crs, sz);
76990 + (const void __force_kernel *)sect_crs, sz);
76991 sect_crs += sz;
76992 sect_rest -= sz;
76993 fragm_crs += sz;
76994 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
76995 index e3afe16..333ea83 100644
76996 --- a/net/tipc/msg.c
76997 +++ b/net/tipc/msg.c
76998 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
76999 msg_sect[cnt].iov_len);
77000 else
77001 skb_copy_to_linear_data_offset(*buf, pos,
77002 - msg_sect[cnt].iov_base,
77003 + (const void __force_kernel *)msg_sect[cnt].iov_base,
77004 msg_sect[cnt].iov_len);
77005 pos += msg_sect[cnt].iov_len;
77006 }
77007 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
77008 index b2964e9..fdf2e27 100644
77009 --- a/net/tipc/subscr.c
77010 +++ b/net/tipc/subscr.c
77011 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
77012 {
77013 struct iovec msg_sect;
77014
77015 - msg_sect.iov_base = (void *)&sub->evt;
77016 + msg_sect.iov_base = (void __force_user *)&sub->evt;
77017 msg_sect.iov_len = sizeof(struct tipc_event);
77018
77019 sub->evt.event = htohl(event, sub->swap);
77020 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
77021 index d510353..26c8a32 100644
77022 --- a/net/unix/af_unix.c
77023 +++ b/net/unix/af_unix.c
77024 @@ -779,6 +779,12 @@ static struct sock *unix_find_other(struct net *net,
77025 err = -ECONNREFUSED;
77026 if (!S_ISSOCK(inode->i_mode))
77027 goto put_fail;
77028 +
77029 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
77030 + err = -EACCES;
77031 + goto put_fail;
77032 + }
77033 +
77034 u = unix_find_socket_byinode(inode);
77035 if (!u)
77036 goto put_fail;
77037 @@ -799,6 +805,13 @@ static struct sock *unix_find_other(struct net *net,
77038 if (u) {
77039 struct dentry *dentry;
77040 dentry = unix_sk(u)->path.dentry;
77041 +
77042 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
77043 + err = -EPERM;
77044 + sock_put(u);
77045 + goto fail;
77046 + }
77047 +
77048 if (dentry)
77049 touch_atime(&unix_sk(u)->path);
77050 } else
77051 @@ -881,11 +894,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
77052 err = security_path_mknod(&path, dentry, mode, 0);
77053 if (err)
77054 goto out_mknod_drop_write;
77055 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
77056 + err = -EACCES;
77057 + goto out_mknod_drop_write;
77058 + }
77059 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
77060 out_mknod_drop_write:
77061 mnt_drop_write(path.mnt);
77062 if (err)
77063 goto out_mknod_dput;
77064 +
77065 + gr_handle_create(dentry, path.mnt);
77066 +
77067 mutex_unlock(&path.dentry->d_inode->i_mutex);
77068 dput(path.dentry);
77069 path.dentry = dentry;
77070 diff --git a/net/wireless/core.h b/net/wireless/core.h
77071 index 3ac2dd0..fbe533e 100644
77072 --- a/net/wireless/core.h
77073 +++ b/net/wireless/core.h
77074 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
77075 struct mutex mtx;
77076
77077 /* rfkill support */
77078 - struct rfkill_ops rfkill_ops;
77079 + rfkill_ops_no_const rfkill_ops;
77080 struct rfkill *rfkill;
77081 struct work_struct rfkill_sync;
77082
77083 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
77084 index af648e0..6185d3a 100644
77085 --- a/net/wireless/wext-core.c
77086 +++ b/net/wireless/wext-core.c
77087 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77088 */
77089
77090 /* Support for very large requests */
77091 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
77092 - (user_length > descr->max_tokens)) {
77093 + if (user_length > descr->max_tokens) {
77094 /* Allow userspace to GET more than max so
77095 * we can support any size GET requests.
77096 * There is still a limit : -ENOMEM.
77097 @@ -787,22 +786,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77098 }
77099 }
77100
77101 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
77102 - /*
77103 - * If this is a GET, but not NOMAX, it means that the extra
77104 - * data is not bounded by userspace, but by max_tokens. Thus
77105 - * set the length to max_tokens. This matches the extra data
77106 - * allocation.
77107 - * The driver should fill it with the number of tokens it
77108 - * provided, and it may check iwp->length rather than having
77109 - * knowledge of max_tokens. If the driver doesn't change the
77110 - * iwp->length, this ioctl just copies back max_token tokens
77111 - * filled with zeroes. Hopefully the driver isn't claiming
77112 - * them to be valid data.
77113 - */
77114 - iwp->length = descr->max_tokens;
77115 - }
77116 -
77117 err = handler(dev, info, (union iwreq_data *) iwp, extra);
77118
77119 iwp->length += essid_compat;
77120 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
77121 index a15d2a0..12142af 100644
77122 --- a/net/xfrm/xfrm_policy.c
77123 +++ b/net/xfrm/xfrm_policy.c
77124 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
77125 {
77126 policy->walk.dead = 1;
77127
77128 - atomic_inc(&policy->genid);
77129 + atomic_inc_unchecked(&policy->genid);
77130
77131 if (del_timer(&policy->timer))
77132 xfrm_pol_put(policy);
77133 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
77134 hlist_add_head(&policy->bydst, chain);
77135 xfrm_pol_hold(policy);
77136 net->xfrm.policy_count[dir]++;
77137 - atomic_inc(&flow_cache_genid);
77138 + atomic_inc_unchecked(&flow_cache_genid);
77139 if (delpol)
77140 __xfrm_policy_unlink(delpol, dir);
77141 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
77142 @@ -1530,7 +1530,7 @@ free_dst:
77143 goto out;
77144 }
77145
77146 -static int inline
77147 +static inline int
77148 xfrm_dst_alloc_copy(void **target, const void *src, int size)
77149 {
77150 if (!*target) {
77151 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
77152 return 0;
77153 }
77154
77155 -static int inline
77156 +static inline int
77157 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77158 {
77159 #ifdef CONFIG_XFRM_SUB_POLICY
77160 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77161 #endif
77162 }
77163
77164 -static int inline
77165 +static inline int
77166 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
77167 {
77168 #ifdef CONFIG_XFRM_SUB_POLICY
77169 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
77170
77171 xdst->num_pols = num_pols;
77172 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
77173 - xdst->policy_genid = atomic_read(&pols[0]->genid);
77174 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
77175
77176 return xdst;
77177 }
77178 @@ -2348,7 +2348,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
77179 if (xdst->xfrm_genid != dst->xfrm->genid)
77180 return 0;
77181 if (xdst->num_pols > 0 &&
77182 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
77183 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
77184 return 0;
77185
77186 mtu = dst_mtu(dst->child);
77187 @@ -2885,7 +2885,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
77188 sizeof(pol->xfrm_vec[i].saddr));
77189 pol->xfrm_vec[i].encap_family = mp->new_family;
77190 /* flush bundles */
77191 - atomic_inc(&pol->genid);
77192 + atomic_inc_unchecked(&pol->genid);
77193 }
77194 }
77195
77196 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
77197 index ff1720d..d428ee7 100644
77198 --- a/scripts/Makefile.build
77199 +++ b/scripts/Makefile.build
77200 @@ -111,7 +111,7 @@ endif
77201 endif
77202
77203 # Do not include host rules unless needed
77204 -ifneq ($(hostprogs-y)$(hostprogs-m),)
77205 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
77206 include scripts/Makefile.host
77207 endif
77208
77209 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
77210 index 686cb0d..9d653bf 100644
77211 --- a/scripts/Makefile.clean
77212 +++ b/scripts/Makefile.clean
77213 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
77214 __clean-files := $(extra-y) $(always) \
77215 $(targets) $(clean-files) \
77216 $(host-progs) \
77217 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
77218 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
77219 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
77220
77221 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
77222
77223 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
77224 index 1ac414f..a1c1451 100644
77225 --- a/scripts/Makefile.host
77226 +++ b/scripts/Makefile.host
77227 @@ -31,6 +31,7 @@
77228 # Note: Shared libraries consisting of C++ files are not supported
77229
77230 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
77231 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
77232
77233 # C code
77234 # Executables compiled from a single .c file
77235 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
77236 # Shared libaries (only .c supported)
77237 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
77238 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
77239 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
77240 # Remove .so files from "xxx-objs"
77241 host-cobjs := $(filter-out %.so,$(host-cobjs))
77242
77243 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
77244 index cb1f50c..cef2a7c 100644
77245 --- a/scripts/basic/fixdep.c
77246 +++ b/scripts/basic/fixdep.c
77247 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
77248 /*
77249 * Lookup a value in the configuration string.
77250 */
77251 -static int is_defined_config(const char *name, int len, unsigned int hash)
77252 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
77253 {
77254 struct item *aux;
77255
77256 @@ -211,10 +211,10 @@ static void clear_config(void)
77257 /*
77258 * Record the use of a CONFIG_* word.
77259 */
77260 -static void use_config(const char *m, int slen)
77261 +static void use_config(const char *m, unsigned int slen)
77262 {
77263 unsigned int hash = strhash(m, slen);
77264 - int c, i;
77265 + unsigned int c, i;
77266
77267 if (is_defined_config(m, slen, hash))
77268 return;
77269 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
77270
77271 static void parse_config_file(const char *map, size_t len)
77272 {
77273 - const int *end = (const int *) (map + len);
77274 + const unsigned int *end = (const unsigned int *) (map + len);
77275 /* start at +1, so that p can never be < map */
77276 - const int *m = (const int *) map + 1;
77277 + const unsigned int *m = (const unsigned int *) map + 1;
77278 const char *p, *q;
77279
77280 for (; m < end; m++) {
77281 @@ -406,7 +406,7 @@ static void print_deps(void)
77282 static void traps(void)
77283 {
77284 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
77285 - int *p = (int *)test;
77286 + unsigned int *p = (unsigned int *)test;
77287
77288 if (*p != INT_CONF) {
77289 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
77290 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
77291 new file mode 100644
77292 index 0000000..8729101
77293 --- /dev/null
77294 +++ b/scripts/gcc-plugin.sh
77295 @@ -0,0 +1,2 @@
77296 +#!/bin/sh
77297 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
77298 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
77299 index 44ddaa5..a3119bd 100644
77300 --- a/scripts/mod/file2alias.c
77301 +++ b/scripts/mod/file2alias.c
77302 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
77303 unsigned long size, unsigned long id_size,
77304 void *symval)
77305 {
77306 - int i;
77307 + unsigned int i;
77308
77309 if (size % id_size || size < id_size) {
77310 if (cross_build != 0)
77311 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
77312 /* USB is special because the bcdDevice can be matched against a numeric range */
77313 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
77314 static void do_usb_entry(struct usb_device_id *id,
77315 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
77316 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
77317 unsigned char range_lo, unsigned char range_hi,
77318 unsigned char max, struct module *mod)
77319 {
77320 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
77321 {
77322 unsigned int devlo, devhi;
77323 unsigned char chi, clo, max;
77324 - int ndigits;
77325 + unsigned int ndigits;
77326
77327 id->match_flags = TO_NATIVE(id->match_flags);
77328 id->idVendor = TO_NATIVE(id->idVendor);
77329 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
77330 for (i = 0; i < count; i++) {
77331 const char *id = (char *)devs[i].id;
77332 char acpi_id[sizeof(devs[0].id)];
77333 - int j;
77334 + unsigned int j;
77335
77336 buf_printf(&mod->dev_table_buf,
77337 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77338 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77339
77340 for (j = 0; j < PNP_MAX_DEVICES; j++) {
77341 const char *id = (char *)card->devs[j].id;
77342 - int i2, j2;
77343 + unsigned int i2, j2;
77344 int dup = 0;
77345
77346 if (!id[0])
77347 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77348 /* add an individual alias for every device entry */
77349 if (!dup) {
77350 char acpi_id[sizeof(card->devs[0].id)];
77351 - int k;
77352 + unsigned int k;
77353
77354 buf_printf(&mod->dev_table_buf,
77355 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77356 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
77357 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
77358 char *alias)
77359 {
77360 - int i, j;
77361 + unsigned int i, j;
77362
77363 sprintf(alias, "dmi*");
77364
77365 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
77366 index c4e7d15..4241aef 100644
77367 --- a/scripts/mod/modpost.c
77368 +++ b/scripts/mod/modpost.c
77369 @@ -922,6 +922,7 @@ enum mismatch {
77370 ANY_INIT_TO_ANY_EXIT,
77371 ANY_EXIT_TO_ANY_INIT,
77372 EXPORT_TO_INIT_EXIT,
77373 + DATA_TO_TEXT
77374 };
77375
77376 struct sectioncheck {
77377 @@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[] = {
77378 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
77379 .mismatch = EXPORT_TO_INIT_EXIT,
77380 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
77381 +},
77382 +/* Do not reference code from writable data */
77383 +{
77384 + .fromsec = { DATA_SECTIONS, NULL },
77385 + .tosec = { TEXT_SECTIONS, NULL },
77386 + .mismatch = DATA_TO_TEXT
77387 }
77388 };
77389
77390 @@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
77391 continue;
77392 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
77393 continue;
77394 - if (sym->st_value == addr)
77395 - return sym;
77396 /* Find a symbol nearby - addr are maybe negative */
77397 d = sym->st_value - addr;
77398 + if (d == 0)
77399 + return sym;
77400 if (d < 0)
77401 d = addr - sym->st_value;
77402 if (d < distance) {
77403 @@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const char *modname,
77404 tosym, prl_to, prl_to, tosym);
77405 free(prl_to);
77406 break;
77407 + case DATA_TO_TEXT:
77408 +/*
77409 + fprintf(stderr,
77410 + "The variable %s references\n"
77411 + "the %s %s%s%s\n",
77412 + fromsym, to, sec2annotation(tosec), tosym, to_p);
77413 +*/
77414 + break;
77415 }
77416 fprintf(stderr, "\n");
77417 }
77418 @@ -1668,7 +1683,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
77419 static void check_sec_ref(struct module *mod, const char *modname,
77420 struct elf_info *elf)
77421 {
77422 - int i;
77423 + unsigned int i;
77424 Elf_Shdr *sechdrs = elf->sechdrs;
77425
77426 /* Walk through all sections */
77427 @@ -1766,7 +1781,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
77428 va_end(ap);
77429 }
77430
77431 -void buf_write(struct buffer *buf, const char *s, int len)
77432 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
77433 {
77434 if (buf->size - buf->pos < len) {
77435 buf->size += len + SZ;
77436 @@ -1984,7 +1999,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
77437 if (fstat(fileno(file), &st) < 0)
77438 goto close_write;
77439
77440 - if (st.st_size != b->pos)
77441 + if (st.st_size != (off_t)b->pos)
77442 goto close_write;
77443
77444 tmp = NOFAIL(malloc(b->pos));
77445 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
77446 index 51207e4..f7d603d 100644
77447 --- a/scripts/mod/modpost.h
77448 +++ b/scripts/mod/modpost.h
77449 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
77450
77451 struct buffer {
77452 char *p;
77453 - int pos;
77454 - int size;
77455 + unsigned int pos;
77456 + unsigned int size;
77457 };
77458
77459 void __attribute__((format(printf, 2, 3)))
77460 buf_printf(struct buffer *buf, const char *fmt, ...);
77461
77462 void
77463 -buf_write(struct buffer *buf, const char *s, int len);
77464 +buf_write(struct buffer *buf, const char *s, unsigned int len);
77465
77466 struct module {
77467 struct module *next;
77468 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
77469 index 9dfcd6d..099068e 100644
77470 --- a/scripts/mod/sumversion.c
77471 +++ b/scripts/mod/sumversion.c
77472 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
77473 goto out;
77474 }
77475
77476 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
77477 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
77478 warn("writing sum in %s failed: %s\n",
77479 filename, strerror(errno));
77480 goto out;
77481 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
77482 index 5c11312..72742b5 100644
77483 --- a/scripts/pnmtologo.c
77484 +++ b/scripts/pnmtologo.c
77485 @@ -237,14 +237,14 @@ static void write_header(void)
77486 fprintf(out, " * Linux logo %s\n", logoname);
77487 fputs(" */\n\n", out);
77488 fputs("#include <linux/linux_logo.h>\n\n", out);
77489 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
77490 + fprintf(out, "static unsigned char %s_data[] = {\n",
77491 logoname);
77492 }
77493
77494 static void write_footer(void)
77495 {
77496 fputs("\n};\n\n", out);
77497 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
77498 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
77499 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
77500 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
77501 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
77502 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
77503 fputs("\n};\n\n", out);
77504
77505 /* write logo clut */
77506 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
77507 + fprintf(out, "static unsigned char %s_clut[] = {\n",
77508 logoname);
77509 write_hex_cnt = 0;
77510 for (i = 0; i < logo_clutsize; i++) {
77511 diff --git a/security/Kconfig b/security/Kconfig
77512 index ccc61f8..5effdb4 100644
77513 --- a/security/Kconfig
77514 +++ b/security/Kconfig
77515 @@ -4,6 +4,640 @@
77516
77517 menu "Security options"
77518
77519 +source grsecurity/Kconfig
77520 +
77521 +menu "PaX"
77522 +
77523 + config ARCH_TRACK_EXEC_LIMIT
77524 + bool
77525 +
77526 + config PAX_KERNEXEC_PLUGIN
77527 + bool
77528 +
77529 + config PAX_PER_CPU_PGD
77530 + bool
77531 +
77532 + config TASK_SIZE_MAX_SHIFT
77533 + int
77534 + depends on X86_64
77535 + default 47 if !PAX_PER_CPU_PGD
77536 + default 42 if PAX_PER_CPU_PGD
77537 +
77538 + config PAX_ENABLE_PAE
77539 + bool
77540 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
77541 +
77542 +config PAX
77543 + bool "Enable various PaX features"
77544 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
77545 + help
77546 + This allows you to enable various PaX features. PaX adds
77547 + intrusion prevention mechanisms to the kernel that reduce
77548 + the risks posed by exploitable memory corruption bugs.
77549 +
77550 +menu "PaX Control"
77551 + depends on PAX
77552 +
77553 +config PAX_SOFTMODE
77554 + bool 'Support soft mode'
77555 + help
77556 + Enabling this option will allow you to run PaX in soft mode, that
77557 + is, PaX features will not be enforced by default, only on executables
77558 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
77559 + support as they are the only way to mark executables for soft mode use.
77560 +
77561 + Soft mode can be activated by using the "pax_softmode=1" kernel command
77562 + line option on boot. Furthermore you can control various PaX features
77563 + at runtime via the entries in /proc/sys/kernel/pax.
77564 +
77565 +config PAX_EI_PAX
77566 + bool 'Use legacy ELF header marking'
77567 + help
77568 + Enabling this option will allow you to control PaX features on
77569 + a per executable basis via the 'chpax' utility available at
77570 + http://pax.grsecurity.net/. The control flags will be read from
77571 + an otherwise reserved part of the ELF header. This marking has
77572 + numerous drawbacks (no support for soft-mode, toolchain does not
77573 + know about the non-standard use of the ELF header) therefore it
77574 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
77575 + support.
77576 +
77577 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77578 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
77579 + option otherwise they will not get any protection.
77580 +
77581 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
77582 + support as well, they will override the legacy EI_PAX marks.
77583 +
77584 +config PAX_PT_PAX_FLAGS
77585 + bool 'Use ELF program header marking'
77586 + help
77587 + Enabling this option will allow you to control PaX features on
77588 + a per executable basis via the 'paxctl' utility available at
77589 + http://pax.grsecurity.net/. The control flags will be read from
77590 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
77591 + has the benefits of supporting both soft mode and being fully
77592 + integrated into the toolchain (the binutils patch is available
77593 + from http://pax.grsecurity.net).
77594 +
77595 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77596 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77597 + support otherwise they will not get any protection.
77598 +
77599 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77600 + must make sure that the marks are the same if a binary has both marks.
77601 +
77602 + Note that if you enable the legacy EI_PAX marking support as well,
77603 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
77604 +
77605 +config PAX_XATTR_PAX_FLAGS
77606 + bool 'Use filesystem extended attributes marking'
77607 + select CIFS_XATTR if CIFS
77608 + select EXT2_FS_XATTR if EXT2_FS
77609 + select EXT3_FS_XATTR if EXT3_FS
77610 + select EXT4_FS_XATTR if EXT4_FS
77611 + select JFFS2_FS_XATTR if JFFS2_FS
77612 + select REISERFS_FS_XATTR if REISERFS_FS
77613 + select SQUASHFS_XATTR if SQUASHFS
77614 + select TMPFS_XATTR if TMPFS
77615 + select UBIFS_FS_XATTR if UBIFS_FS
77616 + help
77617 + Enabling this option will allow you to control PaX features on
77618 + a per executable basis via the 'setfattr' utility. The control
77619 + flags will be read from the user.pax.flags extended attribute of
77620 + the file. This marking has the benefit of supporting binary-only
77621 + applications that self-check themselves (e.g., skype) and would
77622 + not tolerate chpax/paxctl changes. The main drawback is that
77623 + extended attributes are not supported by some filesystems (e.g.,
77624 + isofs, udf, vfat) so copying files through such filesystems will
77625 + lose the extended attributes and these PaX markings.
77626 +
77627 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77628 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77629 + support otherwise they will not get any protection.
77630 +
77631 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77632 + must make sure that the marks are the same if a binary has both marks.
77633 +
77634 + Note that if you enable the legacy EI_PAX marking support as well,
77635 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
77636 +
77637 +choice
77638 + prompt 'MAC system integration'
77639 + default PAX_HAVE_ACL_FLAGS
77640 + help
77641 + Mandatory Access Control systems have the option of controlling
77642 + PaX flags on a per executable basis, choose the method supported
77643 + by your particular system.
77644 +
77645 + - "none": if your MAC system does not interact with PaX,
77646 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
77647 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
77648 +
77649 + NOTE: this option is for developers/integrators only.
77650 +
77651 + config PAX_NO_ACL_FLAGS
77652 + bool 'none'
77653 +
77654 + config PAX_HAVE_ACL_FLAGS
77655 + bool 'direct'
77656 +
77657 + config PAX_HOOK_ACL_FLAGS
77658 + bool 'hook'
77659 +endchoice
77660 +
77661 +endmenu
77662 +
77663 +menu "Non-executable pages"
77664 + depends on PAX
77665 +
77666 +config PAX_NOEXEC
77667 + bool "Enforce non-executable pages"
77668 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
77669 + help
77670 + By design some architectures do not allow for protecting memory
77671 + pages against execution or even if they do, Linux does not make
77672 + use of this feature. In practice this means that if a page is
77673 + readable (such as the stack or heap) it is also executable.
77674 +
77675 + There is a well known exploit technique that makes use of this
77676 + fact and a common programming mistake where an attacker can
77677 + introduce code of his choice somewhere in the attacked program's
77678 + memory (typically the stack or the heap) and then execute it.
77679 +
77680 + If the attacked program was running with different (typically
77681 + higher) privileges than that of the attacker, then he can elevate
77682 + his own privilege level (e.g. get a root shell, write to files for
77683 + which he does not have write access to, etc).
77684 +
77685 + Enabling this option will let you choose from various features
77686 + that prevent the injection and execution of 'foreign' code in
77687 + a program.
77688 +
77689 + This will also break programs that rely on the old behaviour and
77690 + expect that dynamically allocated memory via the malloc() family
77691 + of functions is executable (which it is not). Notable examples
77692 + are the XFree86 4.x server, the java runtime and wine.
77693 +
77694 +config PAX_PAGEEXEC
77695 + bool "Paging based non-executable pages"
77696 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
77697 + select S390_SWITCH_AMODE if S390
77698 + select S390_EXEC_PROTECT if S390
77699 + select ARCH_TRACK_EXEC_LIMIT if X86_32
77700 + help
77701 + This implementation is based on the paging feature of the CPU.
77702 + On i386 without hardware non-executable bit support there is a
77703 + variable but usually low performance impact, however on Intel's
77704 + P4 core based CPUs it is very high so you should not enable this
77705 + for kernels meant to be used on such CPUs.
77706 +
77707 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
77708 + with hardware non-executable bit support there is no performance
77709 + impact, on ppc the impact is negligible.
77710 +
77711 + Note that several architectures require various emulations due to
77712 + badly designed userland ABIs, this will cause a performance impact
77713 + but will disappear as soon as userland is fixed. For example, ppc
77714 + userland MUST have been built with secure-plt by a recent toolchain.
77715 +
77716 +config PAX_SEGMEXEC
77717 + bool "Segmentation based non-executable pages"
77718 + depends on PAX_NOEXEC && X86_32
77719 + help
77720 + This implementation is based on the segmentation feature of the
77721 + CPU and has a very small performance impact, however applications
77722 + will be limited to a 1.5 GB address space instead of the normal
77723 + 3 GB.
77724 +
77725 +config PAX_EMUTRAMP
77726 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
77727 + default y if PARISC
77728 + help
77729 + There are some programs and libraries that for one reason or
77730 + another attempt to execute special small code snippets from
77731 + non-executable memory pages. Most notable examples are the
77732 + signal handler return code generated by the kernel itself and
77733 + the GCC trampolines.
77734 +
77735 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
77736 + such programs will no longer work under your kernel.
77737 +
77738 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
77739 + utilities to enable trampoline emulation for the affected programs
77740 + yet still have the protection provided by the non-executable pages.
77741 +
77742 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
77743 + your system will not even boot.
77744 +
77745 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
77746 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
77747 + for the affected files.
77748 +
77749 + NOTE: enabling this feature *may* open up a loophole in the
77750 + protection provided by non-executable pages that an attacker
77751 + could abuse. Therefore the best solution is to not have any
77752 + files on your system that would require this option. This can
77753 + be achieved by not using libc5 (which relies on the kernel
77754 + signal handler return code) and not using or rewriting programs
77755 + that make use of the nested function implementation of GCC.
77756 + Skilled users can just fix GCC itself so that it implements
77757 + nested function calls in a way that does not interfere with PaX.
77758 +
77759 +config PAX_EMUSIGRT
77760 + bool "Automatically emulate sigreturn trampolines"
77761 + depends on PAX_EMUTRAMP && PARISC
77762 + default y
77763 + help
77764 + Enabling this option will have the kernel automatically detect
77765 + and emulate signal return trampolines executing on the stack
77766 + that would otherwise lead to task termination.
77767 +
77768 + This solution is intended as a temporary one for users with
77769 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
77770 + Modula-3 runtime, etc) or executables linked to such, basically
77771 + everything that does not specify its own SA_RESTORER function in
77772 + normal executable memory like glibc 2.1+ does.
77773 +
77774 + On parisc you MUST enable this option, otherwise your system will
77775 + not even boot.
77776 +
77777 + NOTE: this feature cannot be disabled on a per executable basis
77778 + and since it *does* open up a loophole in the protection provided
77779 + by non-executable pages, the best solution is to not have any
77780 + files on your system that would require this option.
77781 +
77782 +config PAX_MPROTECT
77783 + bool "Restrict mprotect()"
77784 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
77785 + help
77786 + Enabling this option will prevent programs from
77787 + - changing the executable status of memory pages that were
77788 + not originally created as executable,
77789 + - making read-only executable pages writable again,
77790 + - creating executable pages from anonymous memory,
77791 + - making read-only-after-relocations (RELRO) data pages writable again.
77792 +
77793 + You should say Y here to complete the protection provided by
77794 + the enforcement of non-executable pages.
77795 +
77796 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77797 + this feature on a per file basis.
77798 +
77799 +config PAX_MPROTECT_COMPAT
77800 + bool "Use legacy/compat protection demoting (read help)"
77801 + depends on PAX_MPROTECT
77802 + default n
77803 + help
77804 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
77805 + by sending the proper error code to the application. For some broken
77806 + userland, this can cause problems with Python or other applications. The
77807 + current implementation however allows for applications like clamav to
77808 + detect if JIT compilation/execution is allowed and to fall back gracefully
77809 + to an interpreter-based mode if it does not. While we encourage everyone
77810 + to use the current implementation as-is and push upstream to fix broken
77811 + userland (note that the RWX logging option can assist with this), in some
77812 + environments this may not be possible. Having to disable MPROTECT
77813 + completely on certain binaries reduces the security benefit of PaX,
77814 + so this option is provided for those environments to revert to the old
77815 + behavior.
77816 +
77817 +config PAX_ELFRELOCS
77818 + bool "Allow ELF text relocations (read help)"
77819 + depends on PAX_MPROTECT
77820 + default n
77821 + help
77822 + Non-executable pages and mprotect() restrictions are effective
77823 + in preventing the introduction of new executable code into an
77824 + attacked task's address space. There remain only two venues
77825 + for this kind of attack: if the attacker can execute already
77826 + existing code in the attacked task then he can either have it
77827 + create and mmap() a file containing his code or have it mmap()
77828 + an already existing ELF library that does not have position
77829 + independent code in it and use mprotect() on it to make it
77830 + writable and copy his code there. While protecting against
77831 + the former approach is beyond PaX, the latter can be prevented
77832 + by having only PIC ELF libraries on one's system (which do not
77833 + need to relocate their code). If you are sure this is your case,
77834 + as is the case with all modern Linux distributions, then leave
77835 + this option disabled. You should say 'n' here.
77836 +
77837 +config PAX_ETEXECRELOCS
77838 + bool "Allow ELF ET_EXEC text relocations"
77839 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
77840 + select PAX_ELFRELOCS
77841 + default y
77842 + help
77843 + On some architectures there are incorrectly created applications
77844 + that require text relocations and would not work without enabling
77845 + this option. If you are an alpha, ia64 or parisc user, you should
77846 + enable this option and disable it once you have made sure that
77847 + none of your applications need it.
77848 +
77849 +config PAX_EMUPLT
77850 + bool "Automatically emulate ELF PLT"
77851 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
77852 + default y
77853 + help
77854 + Enabling this option will have the kernel automatically detect
77855 + and emulate the Procedure Linkage Table entries in ELF files.
77856 + On some architectures such entries are in writable memory, and
77857 + become non-executable leading to task termination. Therefore
77858 + it is mandatory that you enable this option on alpha, parisc,
77859 + sparc and sparc64, otherwise your system would not even boot.
77860 +
77861 + NOTE: this feature *does* open up a loophole in the protection
77862 + provided by the non-executable pages, therefore the proper
77863 + solution is to modify the toolchain to produce a PLT that does
77864 + not need to be writable.
77865 +
77866 +config PAX_DLRESOLVE
77867 + bool 'Emulate old glibc resolver stub'
77868 + depends on PAX_EMUPLT && SPARC
77869 + default n
77870 + help
77871 + This option is needed if userland has an old glibc (before 2.4)
77872 + that puts a 'save' instruction into the runtime generated resolver
77873 + stub that needs special emulation.
77874 +
77875 +config PAX_KERNEXEC
77876 + bool "Enforce non-executable kernel pages"
77877 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
77878 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
77879 + select PAX_KERNEXEC_PLUGIN if X86_64
77880 + help
77881 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
77882 + that is, enabling this option will make it harder to inject
77883 + and execute 'foreign' code in kernel memory itself.
77884 +
77885 + Note that on x86_64 kernels there is a known regression when
77886 + this feature and KVM/VMX are both enabled in the host kernel.
77887 +
77888 +choice
77889 + prompt "Return Address Instrumentation Method"
77890 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
77891 + depends on PAX_KERNEXEC_PLUGIN
77892 + help
77893 + Select the method used to instrument function pointer dereferences.
77894 + Note that binary modules cannot be instrumented by this approach.
77895 +
77896 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
77897 + bool "bts"
77898 + help
77899 + This method is compatible with binary only modules but has
77900 + a higher runtime overhead.
77901 +
77902 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
77903 + bool "or"
77904 + depends on !PARAVIRT
77905 + help
77906 + This method is incompatible with binary only modules but has
77907 + a lower runtime overhead.
77908 +endchoice
77909 +
77910 +config PAX_KERNEXEC_PLUGIN_METHOD
77911 + string
77912 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
77913 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
77914 + default ""
77915 +
77916 +config PAX_KERNEXEC_MODULE_TEXT
77917 + int "Minimum amount of memory reserved for module code"
77918 + default "4"
77919 + depends on PAX_KERNEXEC && X86_32 && MODULES
77920 + help
77921 + Due to implementation details the kernel must reserve a fixed
77922 + amount of memory for module code at compile time that cannot be
77923 + changed at runtime. Here you can specify the minimum amount
77924 + in MB that will be reserved. Due to the same implementation
77925 + details this size will always be rounded up to the next 2/4 MB
77926 + boundary (depends on PAE) so the actually available memory for
77927 + module code will usually be more than this minimum.
77928 +
77929 + The default 4 MB should be enough for most users but if you have
77930 + an excessive number of modules (e.g., most distribution configs
77931 + compile many drivers as modules) or use huge modules such as
77932 + nvidia's kernel driver, you will need to adjust this amount.
77933 + A good rule of thumb is to look at your currently loaded kernel
77934 + modules and add up their sizes.
77935 +
77936 +endmenu
77937 +
77938 +menu "Address Space Layout Randomization"
77939 + depends on PAX
77940 +
77941 +config PAX_ASLR
77942 + bool "Address Space Layout Randomization"
77943 + help
77944 + Many if not most exploit techniques rely on the knowledge of
77945 + certain addresses in the attacked program. The following options
77946 + will allow the kernel to apply a certain amount of randomization
77947 + to specific parts of the program thereby forcing an attacker to
77948 + guess them in most cases. Any failed guess will most likely crash
77949 + the attacked program which allows the kernel to detect such attempts
77950 + and react on them. PaX itself provides no reaction mechanisms,
77951 + instead it is strongly encouraged that you make use of Nergal's
77952 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
77953 + (http://www.grsecurity.net/) built-in crash detection features or
77954 + develop one yourself.
77955 +
77956 + By saying Y here you can choose to randomize the following areas:
77957 + - top of the task's kernel stack
77958 + - top of the task's userland stack
77959 + - base address for mmap() requests that do not specify one
77960 + (this includes all libraries)
77961 + - base address of the main executable
77962 +
77963 + It is strongly recommended to say Y here as address space layout
77964 + randomization has negligible impact on performance yet it provides
77965 + a very effective protection.
77966 +
77967 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77968 + this feature on a per file basis.
77969 +
77970 +config PAX_RANDKSTACK
77971 + bool "Randomize kernel stack base"
77972 + depends on X86_TSC && X86
77973 + help
77974 + By saying Y here the kernel will randomize every task's kernel
77975 + stack on every system call. This will not only force an attacker
77976 + to guess it but also prevent him from making use of possible
77977 + leaked information about it.
77978 +
77979 + Since the kernel stack is a rather scarce resource, randomization
77980 + may cause unexpected stack overflows, therefore you should very
77981 + carefully test your system. Note that once enabled in the kernel
77982 + configuration, this feature cannot be disabled on a per file basis.
77983 +
77984 +config PAX_RANDUSTACK
77985 + bool "Randomize user stack base"
77986 + depends on PAX_ASLR
77987 + help
77988 + By saying Y here the kernel will randomize every task's userland
77989 + stack. The randomization is done in two steps where the second
77990 + one may apply a big amount of shift to the top of the stack and
77991 + cause problems for programs that want to use lots of memory (more
77992 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
77993 + For this reason the second step can be controlled by 'chpax' or
77994 + 'paxctl' on a per file basis.
77995 +
77996 +config PAX_RANDMMAP
77997 + bool "Randomize mmap() base"
77998 + depends on PAX_ASLR
77999 + help
78000 + By saying Y here the kernel will use a randomized base address for
78001 + mmap() requests that do not specify one themselves. As a result
78002 + all dynamically loaded libraries will appear at random addresses
78003 + and therefore be harder to exploit by a technique where an attacker
78004 + attempts to execute library code for his purposes (e.g. spawn a
78005 + shell from an exploited program that is running at an elevated
78006 + privilege level).
78007 +
78008 + Furthermore, if a program is relinked as a dynamic ELF file, its
78009 + base address will be randomized as well, completing the full
78010 + randomization of the address space layout. Attacking such programs
78011 + becomes a guess game. You can find an example of doing this at
78012 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
78013 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
78014 +
78015 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
78016 + feature on a per file basis.
78017 +
78018 +endmenu
78019 +
78020 +menu "Miscellaneous hardening features"
78021 +
78022 +config PAX_MEMORY_SANITIZE
78023 + bool "Sanitize all freed memory"
78024 + depends on !HIBERNATION
78025 + help
78026 + By saying Y here the kernel will erase memory pages as soon as they
78027 + are freed. This in turn reduces the lifetime of data stored in the
78028 + pages, making it less likely that sensitive information such as
78029 + passwords, cryptographic secrets, etc stay in memory for too long.
78030 +
78031 + This is especially useful for programs whose runtime is short, long
78032 + lived processes and the kernel itself benefit from this as long as
78033 + they operate on whole memory pages and ensure timely freeing of pages
78034 + that may hold sensitive information.
78035 +
78036 + The tradeoff is performance impact, on a single CPU system kernel
78037 + compilation sees a 3% slowdown, other systems and workloads may vary
78038 + and you are advised to test this feature on your expected workload
78039 + before deploying it.
78040 +
78041 + Note that this feature does not protect data stored in live pages,
78042 + e.g., process memory swapped to disk may stay there for a long time.
78043 +
78044 +config PAX_MEMORY_STACKLEAK
78045 + bool "Sanitize kernel stack"
78046 + depends on X86
78047 + help
78048 + By saying Y here the kernel will erase the kernel stack before it
78049 + returns from a system call. This in turn reduces the information
78050 + that a kernel stack leak bug can reveal.
78051 +
78052 + Note that such a bug can still leak information that was put on
78053 + the stack by the current system call (the one eventually triggering
78054 + the bug) but traces of earlier system calls on the kernel stack
78055 + cannot leak anymore.
78056 +
78057 + The tradeoff is performance impact: on a single CPU system kernel
78058 + compilation sees a 1% slowdown, other systems and workloads may vary
78059 + and you are advised to test this feature on your expected workload
78060 + before deploying it.
78061 +
78062 + Note: full support for this feature requires gcc with plugin support
78063 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
78064 + versions means that functions with large enough stack frames may
78065 + leave uninitialized memory behind that may be exposed to a later
78066 + syscall leaking the stack.
78067 +
78068 +config PAX_MEMORY_UDEREF
78069 + bool "Prevent invalid userland pointer dereference"
78070 + depends on X86 && !UML_X86 && !XEN
78071 + select PAX_PER_CPU_PGD if X86_64
78072 + help
78073 + By saying Y here the kernel will be prevented from dereferencing
78074 + userland pointers in contexts where the kernel expects only kernel
78075 + pointers. This is both a useful runtime debugging feature and a
78076 + security measure that prevents exploiting a class of kernel bugs.
78077 +
78078 + The tradeoff is that some virtualization solutions may experience
78079 + a huge slowdown and therefore you should not enable this feature
78080 + for kernels meant to run in such environments. Whether a given VM
78081 + solution is affected or not is best determined by simply trying it
78082 + out, the performance impact will be obvious right on boot as this
78083 + mechanism engages from very early on. A good rule of thumb is that
78084 + VMs running on CPUs without hardware virtualization support (i.e.,
78085 + the majority of IA-32 CPUs) will likely experience the slowdown.
78086 +
78087 +config PAX_REFCOUNT
78088 + bool "Prevent various kernel object reference counter overflows"
78089 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
78090 + help
78091 + By saying Y here the kernel will detect and prevent overflowing
78092 + various (but not all) kinds of object reference counters. Such
78093 + overflows can normally occur due to bugs only and are often, if
78094 + not always, exploitable.
78095 +
78096 + The tradeoff is that data structures protected by an overflowed
78097 + refcount will never be freed and therefore will leak memory. Note
78098 + that this leak also happens even without this protection but in
78099 + that case the overflow can eventually trigger the freeing of the
78100 + data structure while it is still being used elsewhere, resulting
78101 + in the exploitable situation that this feature prevents.
78102 +
78103 + Since this has a negligible performance impact, you should enable
78104 + this feature.
78105 +
78106 +config PAX_USERCOPY
78107 + bool "Harden heap object copies between kernel and userland"
78108 + depends on X86 || PPC || SPARC || ARM
78109 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
78110 + help
78111 + By saying Y here the kernel will enforce the size of heap objects
78112 + when they are copied in either direction between the kernel and
78113 + userland, even if only a part of the heap object is copied.
78114 +
78115 + Specifically, this checking prevents information leaking from the
78116 + kernel heap during kernel to userland copies (if the kernel heap
78117 + object is otherwise fully initialized) and prevents kernel heap
78118 + overflows during userland to kernel copies.
78119 +
78120 + Note that the current implementation provides the strictest bounds
78121 + checks for the SLUB allocator.
78122 +
78123 + Enabling this option also enables per-slab cache protection against
78124 + data in a given cache being copied into/out of via userland
78125 + accessors. Though the whitelist of regions will be reduced over
78126 + time, it notably protects important data structures like task structs.
78127 +
78128 + If frame pointers are enabled on x86, this option will also restrict
78129 + copies into and out of the kernel stack to local variables within a
78130 + single frame.
78131 +
78132 + Since this has a negligible performance impact, you should enable
78133 + this feature.
78134 +
78135 +config PAX_SIZE_OVERFLOW
78136 + bool "Prevent various integer overflows in function size parameters"
78137 + depends on X86
78138 + help
78139 + By saying Y here the kernel recomputes expressions of function
78140 + arguments marked by a size_overflow attribute with double integer
78141 + precision (DImode/TImode for 32/64 bit integer types).
78142 +
78143 + The recomputed argument is checked against INT_MAX and an event
78144 + is logged on overflow and the triggering process is killed.
78145 +
78146 + Homepage:
78147 + http://www.grsecurity.net/~ephox/overflow_plugin/
78148 +
78149 +endmenu
78150 +
78151 +endmenu
78152 +
78153 config KEYS
78154 bool "Enable access key retention support"
78155 help
78156 @@ -169,7 +803,7 @@ config INTEL_TXT
78157 config LSM_MMAP_MIN_ADDR
78158 int "Low address space for LSM to protect from user allocation"
78159 depends on SECURITY && SECURITY_SELINUX
78160 - default 32768 if ARM
78161 + default 32768 if ALPHA || ARM || PARISC || SPARC32
78162 default 65536
78163 help
78164 This is the portion of low virtual memory which should be protected
78165 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
78166 index ad05d39..afffccb 100644
78167 --- a/security/apparmor/lsm.c
78168 +++ b/security/apparmor/lsm.c
78169 @@ -622,7 +622,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
78170 return error;
78171 }
78172
78173 -static struct security_operations apparmor_ops = {
78174 +static struct security_operations apparmor_ops __read_only = {
78175 .name = "apparmor",
78176
78177 .ptrace_access_check = apparmor_ptrace_access_check,
78178 diff --git a/security/commoncap.c b/security/commoncap.c
78179 index 71a166a..851bb3e 100644
78180 --- a/security/commoncap.c
78181 +++ b/security/commoncap.c
78182 @@ -576,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
78183 {
78184 const struct cred *cred = current_cred();
78185
78186 + if (gr_acl_enable_at_secure())
78187 + return 1;
78188 +
78189 if (cred->uid != 0) {
78190 if (bprm->cap_effective)
78191 return 1;
78192 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
78193 index 3ccf7ac..d73ad64 100644
78194 --- a/security/integrity/ima/ima.h
78195 +++ b/security/integrity/ima/ima.h
78196 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78197 extern spinlock_t ima_queue_lock;
78198
78199 struct ima_h_table {
78200 - atomic_long_t len; /* number of stored measurements in the list */
78201 - atomic_long_t violations;
78202 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
78203 + atomic_long_unchecked_t violations;
78204 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
78205 };
78206 extern struct ima_h_table ima_htable;
78207 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
78208 index 88a2788..581ab92 100644
78209 --- a/security/integrity/ima/ima_api.c
78210 +++ b/security/integrity/ima/ima_api.c
78211 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78212 int result;
78213
78214 /* can overflow, only indicator */
78215 - atomic_long_inc(&ima_htable.violations);
78216 + atomic_long_inc_unchecked(&ima_htable.violations);
78217
78218 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
78219 if (!entry) {
78220 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
78221 index e1aa2b4..52027bf 100644
78222 --- a/security/integrity/ima/ima_fs.c
78223 +++ b/security/integrity/ima/ima_fs.c
78224 @@ -28,12 +28,12 @@
78225 static int valid_policy = 1;
78226 #define TMPBUFLEN 12
78227 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
78228 - loff_t *ppos, atomic_long_t *val)
78229 + loff_t *ppos, atomic_long_unchecked_t *val)
78230 {
78231 char tmpbuf[TMPBUFLEN];
78232 ssize_t len;
78233
78234 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
78235 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
78236 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
78237 }
78238
78239 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
78240 index 55a6271..ad829c3 100644
78241 --- a/security/integrity/ima/ima_queue.c
78242 +++ b/security/integrity/ima/ima_queue.c
78243 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
78244 INIT_LIST_HEAD(&qe->later);
78245 list_add_tail_rcu(&qe->later, &ima_measurements);
78246
78247 - atomic_long_inc(&ima_htable.len);
78248 + atomic_long_inc_unchecked(&ima_htable.len);
78249 key = ima_hash_key(entry->digest);
78250 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
78251 return 0;
78252 diff --git a/security/keys/compat.c b/security/keys/compat.c
78253 index 4c48e13..7abdac9 100644
78254 --- a/security/keys/compat.c
78255 +++ b/security/keys/compat.c
78256 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
78257 if (ret == 0)
78258 goto no_payload_free;
78259
78260 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78261 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78262
78263 if (iov != iovstack)
78264 kfree(iov);
78265 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
78266 index fb767c6..b9c49c0 100644
78267 --- a/security/keys/keyctl.c
78268 +++ b/security/keys/keyctl.c
78269 @@ -935,7 +935,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
78270 /*
78271 * Copy the iovec data from userspace
78272 */
78273 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78274 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
78275 unsigned ioc)
78276 {
78277 for (; ioc > 0; ioc--) {
78278 @@ -957,7 +957,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78279 * If successful, 0 will be returned.
78280 */
78281 long keyctl_instantiate_key_common(key_serial_t id,
78282 - const struct iovec *payload_iov,
78283 + const struct iovec __user *payload_iov,
78284 unsigned ioc,
78285 size_t plen,
78286 key_serial_t ringid)
78287 @@ -1052,7 +1052,7 @@ long keyctl_instantiate_key(key_serial_t id,
78288 [0].iov_len = plen
78289 };
78290
78291 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
78292 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
78293 }
78294
78295 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
78296 @@ -1085,7 +1085,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
78297 if (ret == 0)
78298 goto no_payload_free;
78299
78300 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78301 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78302
78303 if (iov != iovstack)
78304 kfree(iov);
78305 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
78306 index d605f75..2bc6be9 100644
78307 --- a/security/keys/keyring.c
78308 +++ b/security/keys/keyring.c
78309 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
78310 ret = -EFAULT;
78311
78312 for (loop = 0; loop < klist->nkeys; loop++) {
78313 + key_serial_t serial;
78314 key = klist->keys[loop];
78315 + serial = key->serial;
78316
78317 tmp = sizeof(key_serial_t);
78318 if (tmp > buflen)
78319 tmp = buflen;
78320
78321 - if (copy_to_user(buffer,
78322 - &key->serial,
78323 - tmp) != 0)
78324 + if (copy_to_user(buffer, &serial, tmp))
78325 goto error;
78326
78327 buflen -= tmp;
78328 diff --git a/security/min_addr.c b/security/min_addr.c
78329 index f728728..6457a0c 100644
78330 --- a/security/min_addr.c
78331 +++ b/security/min_addr.c
78332 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
78333 */
78334 static void update_mmap_min_addr(void)
78335 {
78336 +#ifndef SPARC
78337 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
78338 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
78339 mmap_min_addr = dac_mmap_min_addr;
78340 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
78341 #else
78342 mmap_min_addr = dac_mmap_min_addr;
78343 #endif
78344 +#endif
78345 }
78346
78347 /*
78348 diff --git a/security/security.c b/security/security.c
78349 index bf619ff..8179030 100644
78350 --- a/security/security.c
78351 +++ b/security/security.c
78352 @@ -20,6 +20,7 @@
78353 #include <linux/ima.h>
78354 #include <linux/evm.h>
78355 #include <linux/fsnotify.h>
78356 +#include <linux/mm.h>
78357 #include <net/flow.h>
78358
78359 #define MAX_LSM_EVM_XATTR 2
78360 @@ -28,8 +29,8 @@
78361 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
78362 CONFIG_DEFAULT_SECURITY;
78363
78364 -static struct security_operations *security_ops;
78365 -static struct security_operations default_security_ops = {
78366 +static struct security_operations *security_ops __read_only;
78367 +static struct security_operations default_security_ops __read_only = {
78368 .name = "default",
78369 };
78370
78371 @@ -70,7 +71,9 @@ int __init security_init(void)
78372
78373 void reset_security_ops(void)
78374 {
78375 + pax_open_kernel();
78376 security_ops = &default_security_ops;
78377 + pax_close_kernel();
78378 }
78379
78380 /* Save user chosen LSM */
78381 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
78382 index d85b793..a164832 100644
78383 --- a/security/selinux/hooks.c
78384 +++ b/security/selinux/hooks.c
78385 @@ -95,8 +95,6 @@
78386
78387 #define NUM_SEL_MNT_OPTS 5
78388
78389 -extern struct security_operations *security_ops;
78390 -
78391 /* SECMARK reference count */
78392 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
78393
78394 @@ -5520,7 +5518,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
78395
78396 #endif
78397
78398 -static struct security_operations selinux_ops = {
78399 +static struct security_operations selinux_ops __read_only = {
78400 .name = "selinux",
78401
78402 .ptrace_access_check = selinux_ptrace_access_check,
78403 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
78404 index c220f31..89fab3f 100644
78405 --- a/security/selinux/include/xfrm.h
78406 +++ b/security/selinux/include/xfrm.h
78407 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
78408
78409 static inline void selinux_xfrm_notify_policyload(void)
78410 {
78411 - atomic_inc(&flow_cache_genid);
78412 + atomic_inc_unchecked(&flow_cache_genid);
78413 }
78414 #else
78415 static inline int selinux_xfrm_enabled(void)
78416 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
78417 index 45c32f0..0038be2 100644
78418 --- a/security/smack/smack_lsm.c
78419 +++ b/security/smack/smack_lsm.c
78420 @@ -3500,7 +3500,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
78421 return 0;
78422 }
78423
78424 -struct security_operations smack_ops = {
78425 +struct security_operations smack_ops __read_only = {
78426 .name = "smack",
78427
78428 .ptrace_access_check = smack_ptrace_access_check,
78429 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
78430 index 620d37c..e2ad89b 100644
78431 --- a/security/tomoyo/tomoyo.c
78432 +++ b/security/tomoyo/tomoyo.c
78433 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
78434 * tomoyo_security_ops is a "struct security_operations" which is used for
78435 * registering TOMOYO.
78436 */
78437 -static struct security_operations tomoyo_security_ops = {
78438 +static struct security_operations tomoyo_security_ops __read_only = {
78439 .name = "tomoyo",
78440 .cred_alloc_blank = tomoyo_cred_alloc_blank,
78441 .cred_prepare = tomoyo_cred_prepare,
78442 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
78443 index 51d6709..1f3dbe2 100644
78444 --- a/security/yama/Kconfig
78445 +++ b/security/yama/Kconfig
78446 @@ -1,6 +1,6 @@
78447 config SECURITY_YAMA
78448 bool "Yama support"
78449 - depends on SECURITY
78450 + depends on SECURITY && !GRKERNSEC
78451 select SECURITYFS
78452 select SECURITY_PATH
78453 default n
78454 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
78455 index 270790d..c67dfcb 100644
78456 --- a/sound/aoa/codecs/onyx.c
78457 +++ b/sound/aoa/codecs/onyx.c
78458 @@ -54,7 +54,7 @@ struct onyx {
78459 spdif_locked:1,
78460 analog_locked:1,
78461 original_mute:2;
78462 - int open_count;
78463 + local_t open_count;
78464 struct codec_info *codec_info;
78465
78466 /* mutex serializes concurrent access to the device
78467 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
78468 struct onyx *onyx = cii->codec_data;
78469
78470 mutex_lock(&onyx->mutex);
78471 - onyx->open_count++;
78472 + local_inc(&onyx->open_count);
78473 mutex_unlock(&onyx->mutex);
78474
78475 return 0;
78476 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
78477 struct onyx *onyx = cii->codec_data;
78478
78479 mutex_lock(&onyx->mutex);
78480 - onyx->open_count--;
78481 - if (!onyx->open_count)
78482 + if (local_dec_and_test(&onyx->open_count))
78483 onyx->spdif_locked = onyx->analog_locked = 0;
78484 mutex_unlock(&onyx->mutex);
78485
78486 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
78487 index ffd2025..df062c9 100644
78488 --- a/sound/aoa/codecs/onyx.h
78489 +++ b/sound/aoa/codecs/onyx.h
78490 @@ -11,6 +11,7 @@
78491 #include <linux/i2c.h>
78492 #include <asm/pmac_low_i2c.h>
78493 #include <asm/prom.h>
78494 +#include <asm/local.h>
78495
78496 /* PCM3052 register definitions */
78497
78498 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
78499 index 08fde00..0bf641a 100644
78500 --- a/sound/core/oss/pcm_oss.c
78501 +++ b/sound/core/oss/pcm_oss.c
78502 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
78503 if (in_kernel) {
78504 mm_segment_t fs;
78505 fs = snd_enter_user();
78506 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78507 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78508 snd_leave_user(fs);
78509 } else {
78510 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78511 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78512 }
78513 if (ret != -EPIPE && ret != -ESTRPIPE)
78514 break;
78515 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
78516 if (in_kernel) {
78517 mm_segment_t fs;
78518 fs = snd_enter_user();
78519 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78520 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78521 snd_leave_user(fs);
78522 } else {
78523 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78524 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78525 }
78526 if (ret == -EPIPE) {
78527 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
78528 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
78529 struct snd_pcm_plugin_channel *channels;
78530 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
78531 if (!in_kernel) {
78532 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
78533 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
78534 return -EFAULT;
78535 buf = runtime->oss.buffer;
78536 }
78537 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
78538 }
78539 } else {
78540 tmp = snd_pcm_oss_write2(substream,
78541 - (const char __force *)buf,
78542 + (const char __force_kernel *)buf,
78543 runtime->oss.period_bytes, 0);
78544 if (tmp <= 0)
78545 goto err;
78546 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
78547 struct snd_pcm_runtime *runtime = substream->runtime;
78548 snd_pcm_sframes_t frames, frames1;
78549 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
78550 - char __user *final_dst = (char __force __user *)buf;
78551 + char __user *final_dst = (char __force_user *)buf;
78552 if (runtime->oss.plugin_first) {
78553 struct snd_pcm_plugin_channel *channels;
78554 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
78555 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
78556 xfer += tmp;
78557 runtime->oss.buffer_used -= tmp;
78558 } else {
78559 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
78560 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
78561 runtime->oss.period_bytes, 0);
78562 if (tmp <= 0)
78563 goto err;
78564 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
78565 size1);
78566 size1 /= runtime->channels; /* frames */
78567 fs = snd_enter_user();
78568 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
78569 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
78570 snd_leave_user(fs);
78571 }
78572 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
78573 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
78574 index 91cdf94..4085161 100644
78575 --- a/sound/core/pcm_compat.c
78576 +++ b/sound/core/pcm_compat.c
78577 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
78578 int err;
78579
78580 fs = snd_enter_user();
78581 - err = snd_pcm_delay(substream, &delay);
78582 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
78583 snd_leave_user(fs);
78584 if (err < 0)
78585 return err;
78586 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
78587 index 3fe99e6..26952e4 100644
78588 --- a/sound/core/pcm_native.c
78589 +++ b/sound/core/pcm_native.c
78590 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
78591 switch (substream->stream) {
78592 case SNDRV_PCM_STREAM_PLAYBACK:
78593 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
78594 - (void __user *)arg);
78595 + (void __force_user *)arg);
78596 break;
78597 case SNDRV_PCM_STREAM_CAPTURE:
78598 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
78599 - (void __user *)arg);
78600 + (void __force_user *)arg);
78601 break;
78602 default:
78603 result = -EINVAL;
78604 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
78605 index 5cf8d65..912a79c 100644
78606 --- a/sound/core/seq/seq_device.c
78607 +++ b/sound/core/seq/seq_device.c
78608 @@ -64,7 +64,7 @@ struct ops_list {
78609 int argsize; /* argument size */
78610
78611 /* operators */
78612 - struct snd_seq_dev_ops ops;
78613 + struct snd_seq_dev_ops *ops;
78614
78615 /* registred devices */
78616 struct list_head dev_list; /* list of devices */
78617 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
78618
78619 mutex_lock(&ops->reg_mutex);
78620 /* copy driver operators */
78621 - ops->ops = *entry;
78622 + ops->ops = entry;
78623 ops->driver |= DRIVER_LOADED;
78624 ops->argsize = argsize;
78625
78626 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
78627 dev->name, ops->id, ops->argsize, dev->argsize);
78628 return -EINVAL;
78629 }
78630 - if (ops->ops.init_device(dev) >= 0) {
78631 + if (ops->ops->init_device(dev) >= 0) {
78632 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
78633 ops->num_init_devices++;
78634 } else {
78635 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
78636 dev->name, ops->id, ops->argsize, dev->argsize);
78637 return -EINVAL;
78638 }
78639 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
78640 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
78641 dev->status = SNDRV_SEQ_DEVICE_FREE;
78642 dev->driver_data = NULL;
78643 ops->num_init_devices--;
78644 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
78645 index 621e60e..f4543f5 100644
78646 --- a/sound/drivers/mts64.c
78647 +++ b/sound/drivers/mts64.c
78648 @@ -29,6 +29,7 @@
78649 #include <sound/initval.h>
78650 #include <sound/rawmidi.h>
78651 #include <sound/control.h>
78652 +#include <asm/local.h>
78653
78654 #define CARD_NAME "Miditerminal 4140"
78655 #define DRIVER_NAME "MTS64"
78656 @@ -67,7 +68,7 @@ struct mts64 {
78657 struct pardevice *pardev;
78658 int pardev_claimed;
78659
78660 - int open_count;
78661 + local_t open_count;
78662 int current_midi_output_port;
78663 int current_midi_input_port;
78664 u8 mode[MTS64_NUM_INPUT_PORTS];
78665 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78666 {
78667 struct mts64 *mts = substream->rmidi->private_data;
78668
78669 - if (mts->open_count == 0) {
78670 + if (local_read(&mts->open_count) == 0) {
78671 /* We don't need a spinlock here, because this is just called
78672 if the device has not been opened before.
78673 So there aren't any IRQs from the device */
78674 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78675
78676 msleep(50);
78677 }
78678 - ++(mts->open_count);
78679 + local_inc(&mts->open_count);
78680
78681 return 0;
78682 }
78683 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78684 struct mts64 *mts = substream->rmidi->private_data;
78685 unsigned long flags;
78686
78687 - --(mts->open_count);
78688 - if (mts->open_count == 0) {
78689 + if (local_dec_return(&mts->open_count) == 0) {
78690 /* We need the spinlock_irqsave here because we can still
78691 have IRQs at this point */
78692 spin_lock_irqsave(&mts->lock, flags);
78693 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78694
78695 msleep(500);
78696
78697 - } else if (mts->open_count < 0)
78698 - mts->open_count = 0;
78699 + } else if (local_read(&mts->open_count) < 0)
78700 + local_set(&mts->open_count, 0);
78701
78702 return 0;
78703 }
78704 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
78705 index b953fb4..1999c01 100644
78706 --- a/sound/drivers/opl4/opl4_lib.c
78707 +++ b/sound/drivers/opl4/opl4_lib.c
78708 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
78709 MODULE_DESCRIPTION("OPL4 driver");
78710 MODULE_LICENSE("GPL");
78711
78712 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
78713 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
78714 {
78715 int timeout = 10;
78716 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
78717 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
78718 index 3e32bd3..46fc152 100644
78719 --- a/sound/drivers/portman2x4.c
78720 +++ b/sound/drivers/portman2x4.c
78721 @@ -48,6 +48,7 @@
78722 #include <sound/initval.h>
78723 #include <sound/rawmidi.h>
78724 #include <sound/control.h>
78725 +#include <asm/local.h>
78726
78727 #define CARD_NAME "Portman 2x4"
78728 #define DRIVER_NAME "portman"
78729 @@ -85,7 +86,7 @@ struct portman {
78730 struct pardevice *pardev;
78731 int pardev_claimed;
78732
78733 - int open_count;
78734 + local_t open_count;
78735 int mode[PORTMAN_NUM_INPUT_PORTS];
78736 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
78737 };
78738 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
78739 index 87657dd..a8268d4 100644
78740 --- a/sound/firewire/amdtp.c
78741 +++ b/sound/firewire/amdtp.c
78742 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
78743 ptr = s->pcm_buffer_pointer + data_blocks;
78744 if (ptr >= pcm->runtime->buffer_size)
78745 ptr -= pcm->runtime->buffer_size;
78746 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
78747 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
78748
78749 s->pcm_period_pointer += data_blocks;
78750 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
78751 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
78752 */
78753 void amdtp_out_stream_update(struct amdtp_out_stream *s)
78754 {
78755 - ACCESS_ONCE(s->source_node_id_field) =
78756 + ACCESS_ONCE_RW(s->source_node_id_field) =
78757 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
78758 }
78759 EXPORT_SYMBOL(amdtp_out_stream_update);
78760 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
78761 index 537a9cb..8e8c8e9 100644
78762 --- a/sound/firewire/amdtp.h
78763 +++ b/sound/firewire/amdtp.h
78764 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
78765 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
78766 struct snd_pcm_substream *pcm)
78767 {
78768 - ACCESS_ONCE(s->pcm) = pcm;
78769 + ACCESS_ONCE_RW(s->pcm) = pcm;
78770 }
78771
78772 /**
78773 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
78774 index d428ffe..751ef78 100644
78775 --- a/sound/firewire/isight.c
78776 +++ b/sound/firewire/isight.c
78777 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
78778 ptr += count;
78779 if (ptr >= runtime->buffer_size)
78780 ptr -= runtime->buffer_size;
78781 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
78782 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
78783
78784 isight->period_counter += count;
78785 if (isight->period_counter >= runtime->period_size) {
78786 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
78787 if (err < 0)
78788 return err;
78789
78790 - ACCESS_ONCE(isight->pcm_active) = true;
78791 + ACCESS_ONCE_RW(isight->pcm_active) = true;
78792
78793 return 0;
78794 }
78795 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
78796 {
78797 struct isight *isight = substream->private_data;
78798
78799 - ACCESS_ONCE(isight->pcm_active) = false;
78800 + ACCESS_ONCE_RW(isight->pcm_active) = false;
78801
78802 mutex_lock(&isight->mutex);
78803 isight_stop_streaming(isight);
78804 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
78805
78806 switch (cmd) {
78807 case SNDRV_PCM_TRIGGER_START:
78808 - ACCESS_ONCE(isight->pcm_running) = true;
78809 + ACCESS_ONCE_RW(isight->pcm_running) = true;
78810 break;
78811 case SNDRV_PCM_TRIGGER_STOP:
78812 - ACCESS_ONCE(isight->pcm_running) = false;
78813 + ACCESS_ONCE_RW(isight->pcm_running) = false;
78814 break;
78815 default:
78816 return -EINVAL;
78817 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
78818 index 7bd5e33..1fcab12 100644
78819 --- a/sound/isa/cmi8330.c
78820 +++ b/sound/isa/cmi8330.c
78821 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
78822
78823 struct snd_pcm *pcm;
78824 struct snd_cmi8330_stream {
78825 - struct snd_pcm_ops ops;
78826 + snd_pcm_ops_no_const ops;
78827 snd_pcm_open_callback_t open;
78828 void *private_data; /* sb or wss */
78829 } streams[2];
78830 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
78831 index 733b014..56ce96f 100644
78832 --- a/sound/oss/sb_audio.c
78833 +++ b/sound/oss/sb_audio.c
78834 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
78835 buf16 = (signed short *)(localbuf + localoffs);
78836 while (c)
78837 {
78838 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78839 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78840 if (copy_from_user(lbuf8,
78841 userbuf+useroffs + p,
78842 locallen))
78843 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
78844 index 09d4648..cf234c7 100644
78845 --- a/sound/oss/swarm_cs4297a.c
78846 +++ b/sound/oss/swarm_cs4297a.c
78847 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
78848 {
78849 struct cs4297a_state *s;
78850 u32 pwr, id;
78851 - mm_segment_t fs;
78852 int rval;
78853 #ifndef CONFIG_BCM_CS4297A_CSWARM
78854 u64 cfg;
78855 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
78856 if (!rval) {
78857 char *sb1250_duart_present;
78858
78859 +#if 0
78860 + mm_segment_t fs;
78861 fs = get_fs();
78862 set_fs(KERNEL_DS);
78863 -#if 0
78864 val = SOUND_MASK_LINE;
78865 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
78866 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
78867 val = initvol[i].vol;
78868 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
78869 }
78870 + set_fs(fs);
78871 // cs4297a_write_ac97(s, 0x18, 0x0808);
78872 #else
78873 // cs4297a_write_ac97(s, 0x5e, 0x180);
78874 cs4297a_write_ac97(s, 0x02, 0x0808);
78875 cs4297a_write_ac97(s, 0x18, 0x0808);
78876 #endif
78877 - set_fs(fs);
78878
78879 list_add(&s->list, &cs4297a_devs);
78880
78881 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
78882 index 56b4f74..7cfd41a 100644
78883 --- a/sound/pci/hda/hda_codec.h
78884 +++ b/sound/pci/hda/hda_codec.h
78885 @@ -611,7 +611,7 @@ struct hda_bus_ops {
78886 /* notify power-up/down from codec to controller */
78887 void (*pm_notify)(struct hda_bus *bus);
78888 #endif
78889 -};
78890 +} __no_const;
78891
78892 /* template to pass to the bus constructor */
78893 struct hda_bus_template {
78894 @@ -713,6 +713,7 @@ struct hda_codec_ops {
78895 #endif
78896 void (*reboot_notify)(struct hda_codec *codec);
78897 };
78898 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
78899
78900 /* record for amp information cache */
78901 struct hda_cache_head {
78902 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
78903 struct snd_pcm_substream *substream);
78904 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
78905 struct snd_pcm_substream *substream);
78906 -};
78907 +} __no_const;
78908
78909 /* PCM information for each substream */
78910 struct hda_pcm_stream {
78911 @@ -801,7 +802,7 @@ struct hda_codec {
78912 const char *modelname; /* model name for preset */
78913
78914 /* set by patch */
78915 - struct hda_codec_ops patch_ops;
78916 + hda_codec_ops_no_const patch_ops;
78917
78918 /* PCM to create, set by patch_ops.build_pcms callback */
78919 unsigned int num_pcms;
78920 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
78921 index 0da778a..bc38b84 100644
78922 --- a/sound/pci/ice1712/ice1712.h
78923 +++ b/sound/pci/ice1712/ice1712.h
78924 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
78925 unsigned int mask_flags; /* total mask bits */
78926 struct snd_akm4xxx_ops {
78927 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
78928 - } ops;
78929 + } __no_const ops;
78930 };
78931
78932 struct snd_ice1712_spdif {
78933 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
78934 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78935 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78936 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78937 - } ops;
78938 + } __no_const ops;
78939 };
78940
78941
78942 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
78943 index a8159b81..5f006a5 100644
78944 --- a/sound/pci/ymfpci/ymfpci_main.c
78945 +++ b/sound/pci/ymfpci/ymfpci_main.c
78946 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
78947 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
78948 break;
78949 }
78950 - if (atomic_read(&chip->interrupt_sleep_count)) {
78951 - atomic_set(&chip->interrupt_sleep_count, 0);
78952 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
78953 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78954 wake_up(&chip->interrupt_sleep);
78955 }
78956 __end:
78957 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
78958 continue;
78959 init_waitqueue_entry(&wait, current);
78960 add_wait_queue(&chip->interrupt_sleep, &wait);
78961 - atomic_inc(&chip->interrupt_sleep_count);
78962 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
78963 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
78964 remove_wait_queue(&chip->interrupt_sleep, &wait);
78965 }
78966 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
78967 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
78968 spin_unlock(&chip->reg_lock);
78969
78970 - if (atomic_read(&chip->interrupt_sleep_count)) {
78971 - atomic_set(&chip->interrupt_sleep_count, 0);
78972 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
78973 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78974 wake_up(&chip->interrupt_sleep);
78975 }
78976 }
78977 @@ -2398,7 +2398,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
78978 spin_lock_init(&chip->reg_lock);
78979 spin_lock_init(&chip->voice_lock);
78980 init_waitqueue_head(&chip->interrupt_sleep);
78981 - atomic_set(&chip->interrupt_sleep_count, 0);
78982 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78983 chip->card = card;
78984 chip->pci = pci;
78985 chip->irq = -1;
78986 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
78987 index 0ad8dca..7186339 100644
78988 --- a/sound/soc/soc-pcm.c
78989 +++ b/sound/soc/soc-pcm.c
78990 @@ -641,7 +641,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
78991 struct snd_soc_platform *platform = rtd->platform;
78992 struct snd_soc_dai *codec_dai = rtd->codec_dai;
78993 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
78994 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
78995 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
78996 struct snd_pcm *pcm;
78997 char new_name[64];
78998 int ret = 0, playback = 0, capture = 0;
78999 diff --git a/sound/usb/card.h b/sound/usb/card.h
79000 index da5fa1a..113cd02 100644
79001 --- a/sound/usb/card.h
79002 +++ b/sound/usb/card.h
79003 @@ -45,6 +45,7 @@ struct snd_urb_ops {
79004 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79005 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79006 };
79007 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
79008
79009 struct snd_usb_substream {
79010 struct snd_usb_stream *stream;
79011 @@ -94,7 +95,7 @@ struct snd_usb_substream {
79012 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
79013 spinlock_t lock;
79014
79015 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
79016 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
79017 int last_frame_number; /* stored frame number */
79018 int last_delay; /* stored delay */
79019 };
79020 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
79021 new file mode 100644
79022 index 0000000..ca64170
79023 --- /dev/null
79024 +++ b/tools/gcc/Makefile
79025 @@ -0,0 +1,26 @@
79026 +#CC := gcc
79027 +#PLUGIN_SOURCE_FILES := pax_plugin.c
79028 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
79029 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
79030 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
79031 +
79032 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
79033 +CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer
79034 +
79035 +hostlibs-y := constify_plugin.so
79036 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
79037 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
79038 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
79039 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
79040 +hostlibs-y += colorize_plugin.so
79041 +hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
79042 +
79043 +always := $(hostlibs-y)
79044 +
79045 +constify_plugin-objs := constify_plugin.o
79046 +stackleak_plugin-objs := stackleak_plugin.o
79047 +kallocstat_plugin-objs := kallocstat_plugin.o
79048 +kernexec_plugin-objs := kernexec_plugin.o
79049 +checker_plugin-objs := checker_plugin.o
79050 +colorize_plugin-objs := colorize_plugin.o
79051 +size_overflow_plugin-objs := size_overflow_plugin.o
79052 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
79053 new file mode 100644
79054 index 0000000..d41b5af
79055 --- /dev/null
79056 +++ b/tools/gcc/checker_plugin.c
79057 @@ -0,0 +1,171 @@
79058 +/*
79059 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79060 + * Licensed under the GPL v2
79061 + *
79062 + * Note: the choice of the license means that the compilation process is
79063 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79064 + * but for the kernel it doesn't matter since it doesn't link against
79065 + * any of the gcc libraries
79066 + *
79067 + * gcc plugin to implement various sparse (source code checker) features
79068 + *
79069 + * TODO:
79070 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
79071 + *
79072 + * BUGS:
79073 + * - none known
79074 + */
79075 +#include "gcc-plugin.h"
79076 +#include "config.h"
79077 +#include "system.h"
79078 +#include "coretypes.h"
79079 +#include "tree.h"
79080 +#include "tree-pass.h"
79081 +#include "flags.h"
79082 +#include "intl.h"
79083 +#include "toplev.h"
79084 +#include "plugin.h"
79085 +//#include "expr.h" where are you...
79086 +#include "diagnostic.h"
79087 +#include "plugin-version.h"
79088 +#include "tm.h"
79089 +#include "function.h"
79090 +#include "basic-block.h"
79091 +#include "gimple.h"
79092 +#include "rtl.h"
79093 +#include "emit-rtl.h"
79094 +#include "tree-flow.h"
79095 +#include "target.h"
79096 +
79097 +extern void c_register_addr_space (const char *str, addr_space_t as);
79098 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
79099 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
79100 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
79101 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
79102 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
79103 +
79104 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79105 +extern rtx emit_move_insn(rtx x, rtx y);
79106 +
79107 +int plugin_is_GPL_compatible;
79108 +
79109 +static struct plugin_info checker_plugin_info = {
79110 + .version = "201111150100",
79111 +};
79112 +
79113 +#define ADDR_SPACE_KERNEL 0
79114 +#define ADDR_SPACE_FORCE_KERNEL 1
79115 +#define ADDR_SPACE_USER 2
79116 +#define ADDR_SPACE_FORCE_USER 3
79117 +#define ADDR_SPACE_IOMEM 0
79118 +#define ADDR_SPACE_FORCE_IOMEM 0
79119 +#define ADDR_SPACE_PERCPU 0
79120 +#define ADDR_SPACE_FORCE_PERCPU 0
79121 +#define ADDR_SPACE_RCU 0
79122 +#define ADDR_SPACE_FORCE_RCU 0
79123 +
79124 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
79125 +{
79126 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
79127 +}
79128 +
79129 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
79130 +{
79131 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
79132 +}
79133 +
79134 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
79135 +{
79136 + return default_addr_space_valid_pointer_mode(mode, as);
79137 +}
79138 +
79139 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
79140 +{
79141 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
79142 +}
79143 +
79144 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
79145 +{
79146 + return default_addr_space_legitimize_address(x, oldx, mode, as);
79147 +}
79148 +
79149 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
79150 +{
79151 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
79152 + return true;
79153 +
79154 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
79155 + return true;
79156 +
79157 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
79158 + return true;
79159 +
79160 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
79161 + return true;
79162 +
79163 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
79164 + return true;
79165 +
79166 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
79167 + return true;
79168 +
79169 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
79170 + return true;
79171 +
79172 + return subset == superset;
79173 +}
79174 +
79175 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
79176 +{
79177 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
79178 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
79179 +
79180 + return op;
79181 +}
79182 +
79183 +static void register_checker_address_spaces(void *event_data, void *data)
79184 +{
79185 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
79186 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
79187 + c_register_addr_space("__user", ADDR_SPACE_USER);
79188 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
79189 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
79190 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
79191 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
79192 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
79193 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
79194 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
79195 +
79196 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
79197 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
79198 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
79199 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
79200 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
79201 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
79202 + targetm.addr_space.convert = checker_addr_space_convert;
79203 +}
79204 +
79205 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79206 +{
79207 + const char * const plugin_name = plugin_info->base_name;
79208 + const int argc = plugin_info->argc;
79209 + const struct plugin_argument * const argv = plugin_info->argv;
79210 + int i;
79211 +
79212 + if (!plugin_default_version_check(version, &gcc_version)) {
79213 + error(G_("incompatible gcc/plugin versions"));
79214 + return 1;
79215 + }
79216 +
79217 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
79218 +
79219 + for (i = 0; i < argc; ++i)
79220 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79221 +
79222 + if (TARGET_64BIT == 0)
79223 + return 0;
79224 +
79225 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
79226 +
79227 + return 0;
79228 +}
79229 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
79230 new file mode 100644
79231 index 0000000..ee950d0
79232 --- /dev/null
79233 +++ b/tools/gcc/colorize_plugin.c
79234 @@ -0,0 +1,147 @@
79235 +/*
79236 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
79237 + * Licensed under the GPL v2
79238 + *
79239 + * Note: the choice of the license means that the compilation process is
79240 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79241 + * but for the kernel it doesn't matter since it doesn't link against
79242 + * any of the gcc libraries
79243 + *
79244 + * gcc plugin to colorize diagnostic output
79245 + *
79246 + */
79247 +
79248 +#include "gcc-plugin.h"
79249 +#include "config.h"
79250 +#include "system.h"
79251 +#include "coretypes.h"
79252 +#include "tree.h"
79253 +#include "tree-pass.h"
79254 +#include "flags.h"
79255 +#include "intl.h"
79256 +#include "toplev.h"
79257 +#include "plugin.h"
79258 +#include "diagnostic.h"
79259 +#include "plugin-version.h"
79260 +#include "tm.h"
79261 +
79262 +int plugin_is_GPL_compatible;
79263 +
79264 +static struct plugin_info colorize_plugin_info = {
79265 + .version = "201203092200",
79266 +};
79267 +
79268 +#define GREEN "\033[32m\033[2m"
79269 +#define LIGHTGREEN "\033[32m\033[1m"
79270 +#define YELLOW "\033[33m\033[2m"
79271 +#define LIGHTYELLOW "\033[33m\033[1m"
79272 +#define RED "\033[31m\033[2m"
79273 +#define LIGHTRED "\033[31m\033[1m"
79274 +#define BLUE "\033[34m\033[2m"
79275 +#define LIGHTBLUE "\033[34m\033[1m"
79276 +#define BRIGHT "\033[m\033[1m"
79277 +#define NORMAL "\033[m"
79278 +
79279 +static diagnostic_starter_fn old_starter;
79280 +static diagnostic_finalizer_fn old_finalizer;
79281 +
79282 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79283 +{
79284 + const char *color;
79285 + char *newprefix;
79286 +
79287 + switch (diagnostic->kind) {
79288 + case DK_NOTE:
79289 + color = LIGHTBLUE;
79290 + break;
79291 +
79292 + case DK_PEDWARN:
79293 + case DK_WARNING:
79294 + color = LIGHTYELLOW;
79295 + break;
79296 +
79297 + case DK_ERROR:
79298 + case DK_FATAL:
79299 + case DK_ICE:
79300 + case DK_PERMERROR:
79301 + case DK_SORRY:
79302 + color = LIGHTRED;
79303 + break;
79304 +
79305 + default:
79306 + color = NORMAL;
79307 + }
79308 +
79309 + old_starter(context, diagnostic);
79310 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
79311 + return;
79312 + pp_destroy_prefix(context->printer);
79313 + pp_set_prefix(context->printer, newprefix);
79314 +}
79315 +
79316 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79317 +{
79318 + old_finalizer(context, diagnostic);
79319 +}
79320 +
79321 +static void colorize_arm(void)
79322 +{
79323 + old_starter = diagnostic_starter(global_dc);
79324 + old_finalizer = diagnostic_finalizer(global_dc);
79325 +
79326 + diagnostic_starter(global_dc) = start_colorize;
79327 + diagnostic_finalizer(global_dc) = finalize_colorize;
79328 +}
79329 +
79330 +static unsigned int execute_colorize_rearm(void)
79331 +{
79332 + if (diagnostic_starter(global_dc) == start_colorize)
79333 + return 0;
79334 +
79335 + colorize_arm();
79336 + return 0;
79337 +}
79338 +
79339 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
79340 + .pass = {
79341 + .type = SIMPLE_IPA_PASS,
79342 + .name = "colorize_rearm",
79343 + .gate = NULL,
79344 + .execute = execute_colorize_rearm,
79345 + .sub = NULL,
79346 + .next = NULL,
79347 + .static_pass_number = 0,
79348 + .tv_id = TV_NONE,
79349 + .properties_required = 0,
79350 + .properties_provided = 0,
79351 + .properties_destroyed = 0,
79352 + .todo_flags_start = 0,
79353 + .todo_flags_finish = 0
79354 + }
79355 +};
79356 +
79357 +static void colorize_start_unit(void *gcc_data, void *user_data)
79358 +{
79359 + colorize_arm();
79360 +}
79361 +
79362 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79363 +{
79364 + const char * const plugin_name = plugin_info->base_name;
79365 + struct register_pass_info colorize_rearm_pass_info = {
79366 + .pass = &pass_ipa_colorize_rearm.pass,
79367 + .reference_pass_name = "*free_lang_data",
79368 + .ref_pass_instance_number = 0,
79369 + .pos_op = PASS_POS_INSERT_AFTER
79370 + };
79371 +
79372 + if (!plugin_default_version_check(version, &gcc_version)) {
79373 + error(G_("incompatible gcc/plugin versions"));
79374 + return 1;
79375 + }
79376 +
79377 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
79378 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
79379 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
79380 + return 0;
79381 +}
79382 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
79383 new file mode 100644
79384 index 0000000..89b7f56
79385 --- /dev/null
79386 +++ b/tools/gcc/constify_plugin.c
79387 @@ -0,0 +1,328 @@
79388 +/*
79389 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
79390 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
79391 + * Licensed under the GPL v2, or (at your option) v3
79392 + *
79393 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
79394 + *
79395 + * Homepage:
79396 + * http://www.grsecurity.net/~ephox/const_plugin/
79397 + *
79398 + * Usage:
79399 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
79400 + * $ gcc -fplugin=constify_plugin.so test.c -O2
79401 + */
79402 +
79403 +#include "gcc-plugin.h"
79404 +#include "config.h"
79405 +#include "system.h"
79406 +#include "coretypes.h"
79407 +#include "tree.h"
79408 +#include "tree-pass.h"
79409 +#include "flags.h"
79410 +#include "intl.h"
79411 +#include "toplev.h"
79412 +#include "plugin.h"
79413 +#include "diagnostic.h"
79414 +#include "plugin-version.h"
79415 +#include "tm.h"
79416 +#include "function.h"
79417 +#include "basic-block.h"
79418 +#include "gimple.h"
79419 +#include "rtl.h"
79420 +#include "emit-rtl.h"
79421 +#include "tree-flow.h"
79422 +
79423 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
79424 +
79425 +int plugin_is_GPL_compatible;
79426 +
79427 +static struct plugin_info const_plugin_info = {
79428 + .version = "201205300030",
79429 + .help = "no-constify\tturn off constification\n",
79430 +};
79431 +
79432 +static void deconstify_tree(tree node);
79433 +
79434 +static void deconstify_type(tree type)
79435 +{
79436 + tree field;
79437 +
79438 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
79439 + tree type = TREE_TYPE(field);
79440 +
79441 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79442 + continue;
79443 + if (!TYPE_READONLY(type))
79444 + continue;
79445 +
79446 + deconstify_tree(field);
79447 + }
79448 + TYPE_READONLY(type) = 0;
79449 + C_TYPE_FIELDS_READONLY(type) = 0;
79450 +}
79451 +
79452 +static void deconstify_tree(tree node)
79453 +{
79454 + tree old_type, new_type, field;
79455 +
79456 + old_type = TREE_TYPE(node);
79457 +
79458 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
79459 +
79460 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
79461 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
79462 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
79463 + DECL_FIELD_CONTEXT(field) = new_type;
79464 +
79465 + deconstify_type(new_type);
79466 +
79467 + TREE_READONLY(node) = 0;
79468 + TREE_TYPE(node) = new_type;
79469 +}
79470 +
79471 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79472 +{
79473 + tree type;
79474 +
79475 + *no_add_attrs = true;
79476 + if (TREE_CODE(*node) == FUNCTION_DECL) {
79477 + error("%qE attribute does not apply to functions", name);
79478 + return NULL_TREE;
79479 + }
79480 +
79481 + if (TREE_CODE(*node) == VAR_DECL) {
79482 + error("%qE attribute does not apply to variables", name);
79483 + return NULL_TREE;
79484 + }
79485 +
79486 + if (TYPE_P(*node)) {
79487 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
79488 + *no_add_attrs = false;
79489 + else
79490 + error("%qE attribute applies to struct and union types only", name);
79491 + return NULL_TREE;
79492 + }
79493 +
79494 + type = TREE_TYPE(*node);
79495 +
79496 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
79497 + error("%qE attribute applies to struct and union types only", name);
79498 + return NULL_TREE;
79499 + }
79500 +
79501 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
79502 + error("%qE attribute is already applied to the type", name);
79503 + return NULL_TREE;
79504 + }
79505 +
79506 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
79507 + error("%qE attribute used on type that is not constified", name);
79508 + return NULL_TREE;
79509 + }
79510 +
79511 + if (TREE_CODE(*node) == TYPE_DECL) {
79512 + deconstify_tree(*node);
79513 + return NULL_TREE;
79514 + }
79515 +
79516 + return NULL_TREE;
79517 +}
79518 +
79519 +static void constify_type(tree type)
79520 +{
79521 + TYPE_READONLY(type) = 1;
79522 + C_TYPE_FIELDS_READONLY(type) = 1;
79523 +}
79524 +
79525 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79526 +{
79527 + *no_add_attrs = true;
79528 + if (!TYPE_P(*node)) {
79529 + error("%qE attribute applies to types only", name);
79530 + return NULL_TREE;
79531 + }
79532 +
79533 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
79534 + error("%qE attribute applies to struct and union types only", name);
79535 + return NULL_TREE;
79536 + }
79537 +
79538 + *no_add_attrs = false;
79539 + constify_type(*node);
79540 + return NULL_TREE;
79541 +}
79542 +
79543 +static struct attribute_spec no_const_attr = {
79544 + .name = "no_const",
79545 + .min_length = 0,
79546 + .max_length = 0,
79547 + .decl_required = false,
79548 + .type_required = false,
79549 + .function_type_required = false,
79550 + .handler = handle_no_const_attribute,
79551 +#if BUILDING_GCC_VERSION >= 4007
79552 + .affects_type_identity = true
79553 +#endif
79554 +};
79555 +
79556 +static struct attribute_spec do_const_attr = {
79557 + .name = "do_const",
79558 + .min_length = 0,
79559 + .max_length = 0,
79560 + .decl_required = false,
79561 + .type_required = false,
79562 + .function_type_required = false,
79563 + .handler = handle_do_const_attribute,
79564 +#if BUILDING_GCC_VERSION >= 4007
79565 + .affects_type_identity = true
79566 +#endif
79567 +};
79568 +
79569 +static void register_attributes(void *event_data, void *data)
79570 +{
79571 + register_attribute(&no_const_attr);
79572 + register_attribute(&do_const_attr);
79573 +}
79574 +
79575 +static bool is_fptr(tree field)
79576 +{
79577 + tree ptr = TREE_TYPE(field);
79578 +
79579 + if (TREE_CODE(ptr) != POINTER_TYPE)
79580 + return false;
79581 +
79582 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
79583 +}
79584 +
79585 +static bool walk_struct(tree node)
79586 +{
79587 + tree field;
79588 +
79589 + if (TYPE_FIELDS(node) == NULL_TREE)
79590 + return false;
79591 +
79592 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
79593 + gcc_assert(!TYPE_READONLY(node));
79594 + deconstify_type(node);
79595 + return false;
79596 + }
79597 +
79598 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
79599 + tree type = TREE_TYPE(field);
79600 + enum tree_code code = TREE_CODE(type);
79601 + if (code == RECORD_TYPE || code == UNION_TYPE) {
79602 + if (!(walk_struct(type)))
79603 + return false;
79604 + } else if (!is_fptr(field) && !TREE_READONLY(field))
79605 + return false;
79606 + }
79607 + return true;
79608 +}
79609 +
79610 +static void finish_type(void *event_data, void *data)
79611 +{
79612 + tree type = (tree)event_data;
79613 +
79614 + if (type == NULL_TREE)
79615 + return;
79616 +
79617 + if (TYPE_READONLY(type))
79618 + return;
79619 +
79620 + if (walk_struct(type))
79621 + constify_type(type);
79622 +}
79623 +
79624 +static unsigned int check_local_variables(void);
79625 +
79626 +struct gimple_opt_pass pass_local_variable = {
79627 + {
79628 + .type = GIMPLE_PASS,
79629 + .name = "check_local_variables",
79630 + .gate = NULL,
79631 + .execute = check_local_variables,
79632 + .sub = NULL,
79633 + .next = NULL,
79634 + .static_pass_number = 0,
79635 + .tv_id = TV_NONE,
79636 + .properties_required = 0,
79637 + .properties_provided = 0,
79638 + .properties_destroyed = 0,
79639 + .todo_flags_start = 0,
79640 + .todo_flags_finish = 0
79641 + }
79642 +};
79643 +
79644 +static unsigned int check_local_variables(void)
79645 +{
79646 + tree var;
79647 + referenced_var_iterator rvi;
79648 +
79649 +#if BUILDING_GCC_VERSION == 4005
79650 + FOR_EACH_REFERENCED_VAR(var, rvi) {
79651 +#else
79652 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
79653 +#endif
79654 + tree type = TREE_TYPE(var);
79655 +
79656 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
79657 + continue;
79658 +
79659 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79660 + continue;
79661 +
79662 + if (!TYPE_READONLY(type))
79663 + continue;
79664 +
79665 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
79666 +// continue;
79667 +
79668 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
79669 +// continue;
79670 +
79671 + if (walk_struct(type)) {
79672 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
79673 + return 1;
79674 + }
79675 + }
79676 + return 0;
79677 +}
79678 +
79679 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79680 +{
79681 + const char * const plugin_name = plugin_info->base_name;
79682 + const int argc = plugin_info->argc;
79683 + const struct plugin_argument * const argv = plugin_info->argv;
79684 + int i;
79685 + bool constify = true;
79686 +
79687 + struct register_pass_info local_variable_pass_info = {
79688 + .pass = &pass_local_variable.pass,
79689 + .reference_pass_name = "*referenced_vars",
79690 + .ref_pass_instance_number = 0,
79691 + .pos_op = PASS_POS_INSERT_AFTER
79692 + };
79693 +
79694 + if (!plugin_default_version_check(version, &gcc_version)) {
79695 + error(G_("incompatible gcc/plugin versions"));
79696 + return 1;
79697 + }
79698 +
79699 + for (i = 0; i < argc; ++i) {
79700 + if (!(strcmp(argv[i].key, "no-constify"))) {
79701 + constify = false;
79702 + continue;
79703 + }
79704 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79705 + }
79706 +
79707 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
79708 + if (constify) {
79709 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
79710 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
79711 + }
79712 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
79713 +
79714 + return 0;
79715 +}
79716 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
79717 new file mode 100644
79718 index 0000000..a5eabce
79719 --- /dev/null
79720 +++ b/tools/gcc/kallocstat_plugin.c
79721 @@ -0,0 +1,167 @@
79722 +/*
79723 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79724 + * Licensed under the GPL v2
79725 + *
79726 + * Note: the choice of the license means that the compilation process is
79727 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79728 + * but for the kernel it doesn't matter since it doesn't link against
79729 + * any of the gcc libraries
79730 + *
79731 + * gcc plugin to find the distribution of k*alloc sizes
79732 + *
79733 + * TODO:
79734 + *
79735 + * BUGS:
79736 + * - none known
79737 + */
79738 +#include "gcc-plugin.h"
79739 +#include "config.h"
79740 +#include "system.h"
79741 +#include "coretypes.h"
79742 +#include "tree.h"
79743 +#include "tree-pass.h"
79744 +#include "flags.h"
79745 +#include "intl.h"
79746 +#include "toplev.h"
79747 +#include "plugin.h"
79748 +//#include "expr.h" where are you...
79749 +#include "diagnostic.h"
79750 +#include "plugin-version.h"
79751 +#include "tm.h"
79752 +#include "function.h"
79753 +#include "basic-block.h"
79754 +#include "gimple.h"
79755 +#include "rtl.h"
79756 +#include "emit-rtl.h"
79757 +
79758 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79759 +
79760 +int plugin_is_GPL_compatible;
79761 +
79762 +static const char * const kalloc_functions[] = {
79763 + "__kmalloc",
79764 + "kmalloc",
79765 + "kmalloc_large",
79766 + "kmalloc_node",
79767 + "kmalloc_order",
79768 + "kmalloc_order_trace",
79769 + "kmalloc_slab",
79770 + "kzalloc",
79771 + "kzalloc_node",
79772 +};
79773 +
79774 +static struct plugin_info kallocstat_plugin_info = {
79775 + .version = "201111150100",
79776 +};
79777 +
79778 +static unsigned int execute_kallocstat(void);
79779 +
79780 +static struct gimple_opt_pass kallocstat_pass = {
79781 + .pass = {
79782 + .type = GIMPLE_PASS,
79783 + .name = "kallocstat",
79784 + .gate = NULL,
79785 + .execute = execute_kallocstat,
79786 + .sub = NULL,
79787 + .next = NULL,
79788 + .static_pass_number = 0,
79789 + .tv_id = TV_NONE,
79790 + .properties_required = 0,
79791 + .properties_provided = 0,
79792 + .properties_destroyed = 0,
79793 + .todo_flags_start = 0,
79794 + .todo_flags_finish = 0
79795 + }
79796 +};
79797 +
79798 +static bool is_kalloc(const char *fnname)
79799 +{
79800 + size_t i;
79801 +
79802 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
79803 + if (!strcmp(fnname, kalloc_functions[i]))
79804 + return true;
79805 + return false;
79806 +}
79807 +
79808 +static unsigned int execute_kallocstat(void)
79809 +{
79810 + basic_block bb;
79811 +
79812 + // 1. loop through BBs and GIMPLE statements
79813 + FOR_EACH_BB(bb) {
79814 + gimple_stmt_iterator gsi;
79815 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79816 + // gimple match:
79817 + tree fndecl, size;
79818 + gimple call_stmt;
79819 + const char *fnname;
79820 +
79821 + // is it a call
79822 + call_stmt = gsi_stmt(gsi);
79823 + if (!is_gimple_call(call_stmt))
79824 + continue;
79825 + fndecl = gimple_call_fndecl(call_stmt);
79826 + if (fndecl == NULL_TREE)
79827 + continue;
79828 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
79829 + continue;
79830 +
79831 + // is it a call to k*alloc
79832 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
79833 + if (!is_kalloc(fnname))
79834 + continue;
79835 +
79836 + // is the size arg the result of a simple const assignment
79837 + size = gimple_call_arg(call_stmt, 0);
79838 + while (true) {
79839 + gimple def_stmt;
79840 + expanded_location xloc;
79841 + size_t size_val;
79842 +
79843 + if (TREE_CODE(size) != SSA_NAME)
79844 + break;
79845 + def_stmt = SSA_NAME_DEF_STMT(size);
79846 + if (!def_stmt || !is_gimple_assign(def_stmt))
79847 + break;
79848 + if (gimple_num_ops(def_stmt) != 2)
79849 + break;
79850 + size = gimple_assign_rhs1(def_stmt);
79851 + if (!TREE_CONSTANT(size))
79852 + continue;
79853 + xloc = expand_location(gimple_location(def_stmt));
79854 + if (!xloc.file)
79855 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
79856 + size_val = TREE_INT_CST_LOW(size);
79857 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
79858 + break;
79859 + }
79860 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
79861 +//debug_tree(gimple_call_fn(call_stmt));
79862 +//print_node(stderr, "pax", fndecl, 4);
79863 + }
79864 + }
79865 +
79866 + return 0;
79867 +}
79868 +
79869 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79870 +{
79871 + const char * const plugin_name = plugin_info->base_name;
79872 + struct register_pass_info kallocstat_pass_info = {
79873 + .pass = &kallocstat_pass.pass,
79874 + .reference_pass_name = "ssa",
79875 + .ref_pass_instance_number = 0,
79876 + .pos_op = PASS_POS_INSERT_AFTER
79877 + };
79878 +
79879 + if (!plugin_default_version_check(version, &gcc_version)) {
79880 + error(G_("incompatible gcc/plugin versions"));
79881 + return 1;
79882 + }
79883 +
79884 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
79885 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
79886 +
79887 + return 0;
79888 +}
79889 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
79890 new file mode 100644
79891 index 0000000..d8a8da2
79892 --- /dev/null
79893 +++ b/tools/gcc/kernexec_plugin.c
79894 @@ -0,0 +1,427 @@
79895 +/*
79896 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79897 + * Licensed under the GPL v2
79898 + *
79899 + * Note: the choice of the license means that the compilation process is
79900 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79901 + * but for the kernel it doesn't matter since it doesn't link against
79902 + * any of the gcc libraries
79903 + *
79904 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
79905 + *
79906 + * TODO:
79907 + *
79908 + * BUGS:
79909 + * - none known
79910 + */
79911 +#include "gcc-plugin.h"
79912 +#include "config.h"
79913 +#include "system.h"
79914 +#include "coretypes.h"
79915 +#include "tree.h"
79916 +#include "tree-pass.h"
79917 +#include "flags.h"
79918 +#include "intl.h"
79919 +#include "toplev.h"
79920 +#include "plugin.h"
79921 +//#include "expr.h" where are you...
79922 +#include "diagnostic.h"
79923 +#include "plugin-version.h"
79924 +#include "tm.h"
79925 +#include "function.h"
79926 +#include "basic-block.h"
79927 +#include "gimple.h"
79928 +#include "rtl.h"
79929 +#include "emit-rtl.h"
79930 +#include "tree-flow.h"
79931 +
79932 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79933 +extern rtx emit_move_insn(rtx x, rtx y);
79934 +
79935 +int plugin_is_GPL_compatible;
79936 +
79937 +static struct plugin_info kernexec_plugin_info = {
79938 + .version = "201111291120",
79939 + .help = "method=[bts|or]\tinstrumentation method\n"
79940 +};
79941 +
79942 +static unsigned int execute_kernexec_reload(void);
79943 +static unsigned int execute_kernexec_fptr(void);
79944 +static unsigned int execute_kernexec_retaddr(void);
79945 +static bool kernexec_cmodel_check(void);
79946 +
79947 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
79948 +static void (*kernexec_instrument_retaddr)(rtx);
79949 +
79950 +static struct gimple_opt_pass kernexec_reload_pass = {
79951 + .pass = {
79952 + .type = GIMPLE_PASS,
79953 + .name = "kernexec_reload",
79954 + .gate = kernexec_cmodel_check,
79955 + .execute = execute_kernexec_reload,
79956 + .sub = NULL,
79957 + .next = NULL,
79958 + .static_pass_number = 0,
79959 + .tv_id = TV_NONE,
79960 + .properties_required = 0,
79961 + .properties_provided = 0,
79962 + .properties_destroyed = 0,
79963 + .todo_flags_start = 0,
79964 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
79965 + }
79966 +};
79967 +
79968 +static struct gimple_opt_pass kernexec_fptr_pass = {
79969 + .pass = {
79970 + .type = GIMPLE_PASS,
79971 + .name = "kernexec_fptr",
79972 + .gate = kernexec_cmodel_check,
79973 + .execute = execute_kernexec_fptr,
79974 + .sub = NULL,
79975 + .next = NULL,
79976 + .static_pass_number = 0,
79977 + .tv_id = TV_NONE,
79978 + .properties_required = 0,
79979 + .properties_provided = 0,
79980 + .properties_destroyed = 0,
79981 + .todo_flags_start = 0,
79982 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
79983 + }
79984 +};
79985 +
79986 +static struct rtl_opt_pass kernexec_retaddr_pass = {
79987 + .pass = {
79988 + .type = RTL_PASS,
79989 + .name = "kernexec_retaddr",
79990 + .gate = kernexec_cmodel_check,
79991 + .execute = execute_kernexec_retaddr,
79992 + .sub = NULL,
79993 + .next = NULL,
79994 + .static_pass_number = 0,
79995 + .tv_id = TV_NONE,
79996 + .properties_required = 0,
79997 + .properties_provided = 0,
79998 + .properties_destroyed = 0,
79999 + .todo_flags_start = 0,
80000 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
80001 + }
80002 +};
80003 +
80004 +static bool kernexec_cmodel_check(void)
80005 +{
80006 + tree section;
80007 +
80008 + if (ix86_cmodel != CM_KERNEL)
80009 + return false;
80010 +
80011 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
80012 + if (!section || !TREE_VALUE(section))
80013 + return true;
80014 +
80015 + section = TREE_VALUE(TREE_VALUE(section));
80016 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
80017 + return true;
80018 +
80019 + return false;
80020 +}
80021 +
80022 +/*
80023 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
80024 + */
80025 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
80026 +{
80027 + gimple asm_movabs_stmt;
80028 +
80029 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
80030 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
80031 + gimple_asm_set_volatile(asm_movabs_stmt, true);
80032 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
80033 + update_stmt(asm_movabs_stmt);
80034 +}
80035 +
80036 +/*
80037 + * find all asm() stmts that clobber r10 and add a reload of r10
80038 + */
80039 +static unsigned int execute_kernexec_reload(void)
80040 +{
80041 + basic_block bb;
80042 +
80043 + // 1. loop through BBs and GIMPLE statements
80044 + FOR_EACH_BB(bb) {
80045 + gimple_stmt_iterator gsi;
80046 +
80047 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80048 + // gimple match: __asm__ ("" : : : "r10");
80049 + gimple asm_stmt;
80050 + size_t nclobbers;
80051 +
80052 + // is it an asm ...
80053 + asm_stmt = gsi_stmt(gsi);
80054 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
80055 + continue;
80056 +
80057 + // ... clobbering r10
80058 + nclobbers = gimple_asm_nclobbers(asm_stmt);
80059 + while (nclobbers--) {
80060 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
80061 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
80062 + continue;
80063 + kernexec_reload_fptr_mask(&gsi);
80064 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
80065 + break;
80066 + }
80067 + }
80068 + }
80069 +
80070 + return 0;
80071 +}
80072 +
80073 +/*
80074 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
80075 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
80076 + */
80077 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
80078 +{
80079 + gimple assign_intptr, assign_new_fptr, call_stmt;
80080 + tree intptr, old_fptr, new_fptr, kernexec_mask;
80081 +
80082 + call_stmt = gsi_stmt(*gsi);
80083 + old_fptr = gimple_call_fn(call_stmt);
80084 +
80085 + // create temporary unsigned long variable used for bitops and cast fptr to it
80086 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
80087 + add_referenced_var(intptr);
80088 + mark_sym_for_renaming(intptr);
80089 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
80090 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80091 + update_stmt(assign_intptr);
80092 +
80093 + // apply logical or to temporary unsigned long and bitmask
80094 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
80095 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
80096 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
80097 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80098 + update_stmt(assign_intptr);
80099 +
80100 + // cast temporary unsigned long back to a temporary fptr variable
80101 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
80102 + add_referenced_var(new_fptr);
80103 + mark_sym_for_renaming(new_fptr);
80104 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
80105 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
80106 + update_stmt(assign_new_fptr);
80107 +
80108 + // replace call stmt fn with the new fptr
80109 + gimple_call_set_fn(call_stmt, new_fptr);
80110 + update_stmt(call_stmt);
80111 +}
80112 +
80113 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
80114 +{
80115 + gimple asm_or_stmt, call_stmt;
80116 + tree old_fptr, new_fptr, input, output;
80117 + VEC(tree, gc) *inputs = NULL;
80118 + VEC(tree, gc) *outputs = NULL;
80119 +
80120 + call_stmt = gsi_stmt(*gsi);
80121 + old_fptr = gimple_call_fn(call_stmt);
80122 +
80123 + // create temporary fptr variable
80124 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
80125 + add_referenced_var(new_fptr);
80126 + mark_sym_for_renaming(new_fptr);
80127 +
80128 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
80129 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
80130 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
80131 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
80132 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
80133 + VEC_safe_push(tree, gc, inputs, input);
80134 + VEC_safe_push(tree, gc, outputs, output);
80135 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
80136 + gimple_asm_set_volatile(asm_or_stmt, true);
80137 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
80138 + update_stmt(asm_or_stmt);
80139 +
80140 + // replace call stmt fn with the new fptr
80141 + gimple_call_set_fn(call_stmt, new_fptr);
80142 + update_stmt(call_stmt);
80143 +}
80144 +
80145 +/*
80146 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
80147 + */
80148 +static unsigned int execute_kernexec_fptr(void)
80149 +{
80150 + basic_block bb;
80151 +
80152 + // 1. loop through BBs and GIMPLE statements
80153 + FOR_EACH_BB(bb) {
80154 + gimple_stmt_iterator gsi;
80155 +
80156 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80157 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
80158 + tree fn;
80159 + gimple call_stmt;
80160 +
80161 + // is it a call ...
80162 + call_stmt = gsi_stmt(gsi);
80163 + if (!is_gimple_call(call_stmt))
80164 + continue;
80165 + fn = gimple_call_fn(call_stmt);
80166 + if (TREE_CODE(fn) == ADDR_EXPR)
80167 + continue;
80168 + if (TREE_CODE(fn) != SSA_NAME)
80169 + gcc_unreachable();
80170 +
80171 + // ... through a function pointer
80172 + fn = SSA_NAME_VAR(fn);
80173 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
80174 + continue;
80175 + fn = TREE_TYPE(fn);
80176 + if (TREE_CODE(fn) != POINTER_TYPE)
80177 + continue;
80178 + fn = TREE_TYPE(fn);
80179 + if (TREE_CODE(fn) != FUNCTION_TYPE)
80180 + continue;
80181 +
80182 + kernexec_instrument_fptr(&gsi);
80183 +
80184 +//debug_tree(gimple_call_fn(call_stmt));
80185 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80186 + }
80187 + }
80188 +
80189 + return 0;
80190 +}
80191 +
80192 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
80193 +static void kernexec_instrument_retaddr_bts(rtx insn)
80194 +{
80195 + rtx btsq;
80196 + rtvec argvec, constraintvec, labelvec;
80197 + int line;
80198 +
80199 + // create asm volatile("btsq $63,(%%rsp)":::)
80200 + argvec = rtvec_alloc(0);
80201 + constraintvec = rtvec_alloc(0);
80202 + labelvec = rtvec_alloc(0);
80203 + line = expand_location(RTL_LOCATION(insn)).line;
80204 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80205 + MEM_VOLATILE_P(btsq) = 1;
80206 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
80207 + emit_insn_before(btsq, insn);
80208 +}
80209 +
80210 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
80211 +static void kernexec_instrument_retaddr_or(rtx insn)
80212 +{
80213 + rtx orq;
80214 + rtvec argvec, constraintvec, labelvec;
80215 + int line;
80216 +
80217 + // create asm volatile("orq %%r10,(%%rsp)":::)
80218 + argvec = rtvec_alloc(0);
80219 + constraintvec = rtvec_alloc(0);
80220 + labelvec = rtvec_alloc(0);
80221 + line = expand_location(RTL_LOCATION(insn)).line;
80222 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80223 + MEM_VOLATILE_P(orq) = 1;
80224 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
80225 + emit_insn_before(orq, insn);
80226 +}
80227 +
80228 +/*
80229 + * find all asm level function returns and forcibly set the highest bit of the return address
80230 + */
80231 +static unsigned int execute_kernexec_retaddr(void)
80232 +{
80233 + rtx insn;
80234 +
80235 + // 1. find function returns
80236 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80237 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
80238 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
80239 + rtx body;
80240 +
80241 + // is it a retn
80242 + if (!JUMP_P(insn))
80243 + continue;
80244 + body = PATTERN(insn);
80245 + if (GET_CODE(body) == PARALLEL)
80246 + body = XVECEXP(body, 0, 0);
80247 + if (GET_CODE(body) != RETURN)
80248 + continue;
80249 + kernexec_instrument_retaddr(insn);
80250 + }
80251 +
80252 +// print_simple_rtl(stderr, get_insns());
80253 +// print_rtl(stderr, get_insns());
80254 +
80255 + return 0;
80256 +}
80257 +
80258 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80259 +{
80260 + const char * const plugin_name = plugin_info->base_name;
80261 + const int argc = plugin_info->argc;
80262 + const struct plugin_argument * const argv = plugin_info->argv;
80263 + int i;
80264 + struct register_pass_info kernexec_reload_pass_info = {
80265 + .pass = &kernexec_reload_pass.pass,
80266 + .reference_pass_name = "ssa",
80267 + .ref_pass_instance_number = 0,
80268 + .pos_op = PASS_POS_INSERT_AFTER
80269 + };
80270 + struct register_pass_info kernexec_fptr_pass_info = {
80271 + .pass = &kernexec_fptr_pass.pass,
80272 + .reference_pass_name = "ssa",
80273 + .ref_pass_instance_number = 0,
80274 + .pos_op = PASS_POS_INSERT_AFTER
80275 + };
80276 + struct register_pass_info kernexec_retaddr_pass_info = {
80277 + .pass = &kernexec_retaddr_pass.pass,
80278 + .reference_pass_name = "pro_and_epilogue",
80279 + .ref_pass_instance_number = 0,
80280 + .pos_op = PASS_POS_INSERT_AFTER
80281 + };
80282 +
80283 + if (!plugin_default_version_check(version, &gcc_version)) {
80284 + error(G_("incompatible gcc/plugin versions"));
80285 + return 1;
80286 + }
80287 +
80288 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
80289 +
80290 + if (TARGET_64BIT == 0)
80291 + return 0;
80292 +
80293 + for (i = 0; i < argc; ++i) {
80294 + if (!strcmp(argv[i].key, "method")) {
80295 + if (!argv[i].value) {
80296 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80297 + continue;
80298 + }
80299 + if (!strcmp(argv[i].value, "bts")) {
80300 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
80301 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
80302 + } else if (!strcmp(argv[i].value, "or")) {
80303 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
80304 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
80305 + fix_register("r10", 1, 1);
80306 + } else
80307 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80308 + continue;
80309 + }
80310 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80311 + }
80312 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
80313 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
80314 +
80315 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
80316 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
80317 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
80318 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
80319 +
80320 + return 0;
80321 +}
80322 diff --git a/tools/gcc/size_overflow_hash.h b/tools/gcc/size_overflow_hash.h
80323 new file mode 100644
80324 index 0000000..41de68c
80325 --- /dev/null
80326 +++ b/tools/gcc/size_overflow_hash.h
80327 @@ -0,0 +1,13146 @@
80328 +struct size_overflow_hash _000001_hash = {
80329 + .next = NULL,
80330 + .name = "alloc_dr",
80331 + .file = "drivers/base/devres.c",
80332 + .param2 = 1,
80333 +};
80334 +struct size_overflow_hash _000002_hash = {
80335 + .next = NULL,
80336 + .name = "__copy_from_user",
80337 + .file = "arch/x86/include/asm/uaccess_32.h",
80338 + .param3 = 1,
80339 +};
80340 +struct size_overflow_hash _000003_hash = {
80341 + .next = NULL,
80342 + .name = "copy_from_user",
80343 + .file = "arch/x86/include/asm/uaccess_32.h",
80344 + .param3 = 1,
80345 +};
80346 +struct size_overflow_hash _000004_hash = {
80347 + .next = NULL,
80348 + .name = "__copy_from_user_inatomic",
80349 + .file = "arch/x86/include/asm/uaccess_32.h",
80350 + .param3 = 1,
80351 +};
80352 +struct size_overflow_hash _000005_hash = {
80353 + .next = NULL,
80354 + .name = "__copy_from_user_nocache",
80355 + .file = "arch/x86/include/asm/uaccess_32.h",
80356 + .param3 = 1,
80357 +};
80358 +struct size_overflow_hash _000006_hash = {
80359 + .next = NULL,
80360 + .name = "__copy_to_user_inatomic",
80361 + .file = "arch/x86/include/asm/uaccess_32.h",
80362 + .param3 = 1,
80363 +};
80364 +struct size_overflow_hash _000007_hash = {
80365 + .next = NULL,
80366 + .name = "do_xip_mapping_read",
80367 + .file = "mm/filemap_xip.c",
80368 + .param5 = 1,
80369 +};
80370 +struct size_overflow_hash _000008_hash = {
80371 + .next = NULL,
80372 + .name = "hugetlbfs_read",
80373 + .file = "fs/hugetlbfs/inode.c",
80374 + .param3 = 1,
80375 +};
80376 +struct size_overflow_hash _000009_hash = {
80377 + .next = NULL,
80378 + .name = "kcalloc",
80379 + .file = "include/linux/slab.h",
80380 + .param1 = 1,
80381 + .param2 = 1,
80382 +};
80383 +struct size_overflow_hash _000011_hash = {
80384 + .next = NULL,
80385 + .name = "kmalloc",
80386 + .file = "include/linux/slub_def.h",
80387 + .param1 = 1,
80388 +};
80389 +struct size_overflow_hash _000012_hash = {
80390 + .next = NULL,
80391 + .name = "kmalloc_slab",
80392 + .file = "include/linux/slub_def.h",
80393 + .param1 = 1,
80394 +};
80395 +struct size_overflow_hash _000013_hash = {
80396 + .next = NULL,
80397 + .name = "kmemdup",
80398 + .file = "include/linux/string.h",
80399 + .param2 = 1,
80400 +};
80401 +struct size_overflow_hash _000014_hash = {
80402 + .next = NULL,
80403 + .name = "__krealloc",
80404 + .file = "include/linux/slab.h",
80405 + .param2 = 1,
80406 +};
80407 +struct size_overflow_hash _000015_hash = {
80408 + .next = NULL,
80409 + .name = "memdup_user",
80410 + .file = "include/linux/string.h",
80411 + .param2 = 1,
80412 +};
80413 +struct size_overflow_hash _000016_hash = {
80414 + .next = NULL,
80415 + .name = "module_alloc",
80416 + .file = "include/linux/moduleloader.h",
80417 + .param1 = 1,
80418 +};
80419 +struct size_overflow_hash _000017_hash = {
80420 + .next = NULL,
80421 + .name = "read_default_ldt",
80422 + .file = "arch/x86/kernel/ldt.c",
80423 + .param2 = 1,
80424 +};
80425 +struct size_overflow_hash _000018_hash = {
80426 + .next = NULL,
80427 + .name = "read_kcore",
80428 + .file = "fs/proc/kcore.c",
80429 + .param3 = 1,
80430 +};
80431 +struct size_overflow_hash _000019_hash = {
80432 + .next = NULL,
80433 + .name = "read_ldt",
80434 + .file = "arch/x86/kernel/ldt.c",
80435 + .param2 = 1,
80436 +};
80437 +struct size_overflow_hash _000020_hash = {
80438 + .next = NULL,
80439 + .name = "read_zero",
80440 + .file = "drivers/char/mem.c",
80441 + .param3 = 1,
80442 +};
80443 +struct size_overflow_hash _000021_hash = {
80444 + .next = NULL,
80445 + .name = "__vmalloc_node",
80446 + .file = "mm/vmalloc.c",
80447 + .param1 = 1,
80448 +};
80449 +struct size_overflow_hash _000022_hash = {
80450 + .next = NULL,
80451 + .name = "vm_map_ram",
80452 + .file = "include/linux/vmalloc.h",
80453 + .param2 = 1,
80454 +};
80455 +struct size_overflow_hash _000023_hash = {
80456 + .next = NULL,
80457 + .name = "aa_simple_write_to_buffer",
80458 + .file = "security/apparmor/apparmorfs.c",
80459 + .param4 = 1,
80460 +};
80461 +struct size_overflow_hash _000024_hash = {
80462 + .next = NULL,
80463 + .name = "ablkcipher_copy_iv",
80464 + .file = "crypto/ablkcipher.c",
80465 + .param3 = 1,
80466 +};
80467 +struct size_overflow_hash _000025_hash = {
80468 + .next = NULL,
80469 + .name = "ablkcipher_next_slow",
80470 + .file = "crypto/ablkcipher.c",
80471 + .param4 = 1,
80472 +};
80473 +struct size_overflow_hash _000026_hash = {
80474 + .next = NULL,
80475 + .name = "acpi_os_allocate",
80476 + .file = "include/acpi/platform/aclinux.h",
80477 + .param1 = 1,
80478 +};
80479 +struct size_overflow_hash _000027_hash = {
80480 + .next = NULL,
80481 + .name = "acpi_system_write_wakeup_device",
80482 + .file = "drivers/acpi/proc.c",
80483 + .param3 = 1,
80484 +};
80485 +struct size_overflow_hash _000028_hash = {
80486 + .next = NULL,
80487 + .name = "ahash_setkey_unaligned",
80488 + .file = "crypto/ahash.c",
80489 + .param3 = 1,
80490 +};
80491 +struct size_overflow_hash _000029_hash = {
80492 + .next = NULL,
80493 + .name = "alloc_fdmem",
80494 + .file = "fs/file.c",
80495 + .param1 = 1,
80496 +};
80497 +struct size_overflow_hash _000030_hash = {
80498 + .next = NULL,
80499 + .name = "audit_unpack_string",
80500 + .file = "kernel/auditfilter.c",
80501 + .param3 = 1,
80502 +};
80503 +struct size_overflow_hash _000031_hash = {
80504 + .next = NULL,
80505 + .name = "bio_alloc_map_data",
80506 + .file = "fs/bio.c",
80507 + .param2 = 1,
80508 +};
80509 +struct size_overflow_hash _000032_hash = {
80510 + .next = NULL,
80511 + .name = "bio_kmalloc",
80512 + .file = "include/linux/bio.h",
80513 + .param2 = 1,
80514 +};
80515 +struct size_overflow_hash _000033_hash = {
80516 + .next = NULL,
80517 + .name = "blkcipher_copy_iv",
80518 + .file = "crypto/blkcipher.c",
80519 + .param3 = 1,
80520 +};
80521 +struct size_overflow_hash _000034_hash = {
80522 + .next = NULL,
80523 + .name = "blkcipher_next_slow",
80524 + .file = "crypto/blkcipher.c",
80525 + .param4 = 1,
80526 +};
80527 +struct size_overflow_hash _000035_hash = {
80528 + .next = NULL,
80529 + .name = "cgroup_write_string",
80530 + .file = "kernel/cgroup.c",
80531 + .param5 = 1,
80532 +};
80533 +struct size_overflow_hash _000036_hash = {
80534 + .next = NULL,
80535 + .name = "cgroup_write_X64",
80536 + .file = "kernel/cgroup.c",
80537 + .param5 = 1,
80538 +};
80539 +struct size_overflow_hash _000037_hash = {
80540 + .next = NULL,
80541 + .name = "clear_refs_write",
80542 + .file = "fs/proc/task_mmu.c",
80543 + .param3 = 1,
80544 +};
80545 +struct size_overflow_hash _000038_hash = {
80546 + .next = NULL,
80547 + .name = "comm_write",
80548 + .file = "fs/proc/base.c",
80549 + .param3 = 1,
80550 +};
80551 +struct size_overflow_hash _000039_hash = {
80552 + .next = NULL,
80553 + .name = "copy_and_check",
80554 + .file = "kernel/module.c",
80555 + .param3 = 1,
80556 +};
80557 +struct size_overflow_hash _000040_hash = {
80558 + .next = NULL,
80559 + .name = "__copy_to_user",
80560 + .file = "arch/x86/include/asm/uaccess_32.h",
80561 + .param3 = 1,
80562 +};
80563 +struct size_overflow_hash _000041_hash = {
80564 + .next = NULL,
80565 + .name = "copy_vm86_regs_from_user",
80566 + .file = "arch/x86/kernel/vm86_32.c",
80567 + .param3 = 1,
80568 +};
80569 +struct size_overflow_hash _000042_hash = {
80570 + .next = NULL,
80571 + .name = "csum_partial_copy_fromiovecend",
80572 + .file = "include/linux/socket.h",
80573 + .param4 = 1,
80574 +};
80575 +struct size_overflow_hash _000043_hash = {
80576 + .next = NULL,
80577 + .name = "ddebug_proc_write",
80578 + .file = "lib/dynamic_debug.c",
80579 + .param3 = 1,
80580 +};
80581 +struct size_overflow_hash _000044_hash = {
80582 + .next = NULL,
80583 + .name = "devm_kzalloc",
80584 + .file = "include/linux/device.h",
80585 + .param2 = 1,
80586 +};
80587 +struct size_overflow_hash _000045_hash = {
80588 + .next = NULL,
80589 + .name = "devres_alloc",
80590 + .file = "include/linux/device.h",
80591 + .param2 = 1,
80592 +};
80593 +struct size_overflow_hash _000046_hash = {
80594 + .next = NULL,
80595 + .name = "do_ip_setsockopt",
80596 + .file = "net/ipv4/ip_sockglue.c",
80597 + .param5 = 1,
80598 +};
80599 +struct size_overflow_hash _000047_hash = {
80600 + .next = NULL,
80601 + .name = "do_kimage_alloc",
80602 + .file = "kernel/kexec.c",
80603 + .param3 = 1,
80604 +};
80605 +struct size_overflow_hash _000048_hash = {
80606 + .next = NULL,
80607 + .name = "do_tty_write",
80608 + .file = "drivers/tty/tty_io.c",
80609 + .param5 = 1,
80610 +};
80611 +struct size_overflow_hash _000049_hash = {
80612 + .next = NULL,
80613 + .name = "fanotify_write",
80614 + .file = "fs/notify/fanotify/fanotify_user.c",
80615 + .param3 = 1,
80616 +};
80617 +struct size_overflow_hash _000050_hash = {
80618 + .next = NULL,
80619 + .name = "file_read_actor",
80620 + .file = "include/linux/fs.h",
80621 + .param4 = 1,
80622 +};
80623 +struct size_overflow_hash _000051_hash = {
80624 + .next = NULL,
80625 + .name = "fill_write_buffer",
80626 + .file = "fs/sysfs/file.c",
80627 + .param3 = 1,
80628 +};
80629 +struct size_overflow_hash _000052_hash = {
80630 + .next = NULL,
80631 + .name = "get_user_cpu_mask",
80632 + .file = "kernel/sched/core.c",
80633 + .param2 = 1,
80634 +};
80635 +struct size_overflow_hash _000053_hash = {
80636 + .next = NULL,
80637 + .name = "hashtab_create",
80638 + .file = "security/selinux/ss/hashtab.c",
80639 + .param3 = 1,
80640 +};
80641 +struct size_overflow_hash _000054_hash = {
80642 + .next = NULL,
80643 + .name = "heap_init",
80644 + .file = "include/linux/prio_heap.h",
80645 + .param2 = 1,
80646 +};
80647 +struct size_overflow_hash _000055_hash = {
80648 + .next = NULL,
80649 + .name = "hest_ghes_dev_register",
80650 + .file = "drivers/acpi/apei/hest.c",
80651 + .param1 = 1,
80652 +};
80653 +struct size_overflow_hash _000056_hash = {
80654 + .next = NULL,
80655 + .name = "ima_write_policy",
80656 + .file = "security/integrity/ima/ima_fs.c",
80657 + .param3 = 1,
80658 +};
80659 +struct size_overflow_hash _000057_hash = {
80660 + .next = NULL,
80661 + .name = "input_ff_create",
80662 + .file = "include/linux/input.h",
80663 + .param2 = 1,
80664 +};
80665 +struct size_overflow_hash _000058_hash = {
80666 + .next = NULL,
80667 + .name = "input_mt_init_slots",
80668 + .file = "include/linux/input/mt.h",
80669 + .param2 = 1,
80670 +};
80671 +struct size_overflow_hash _000059_hash = {
80672 + .next = NULL,
80673 + .name = "iov_iter_copy_from_user",
80674 + .file = "include/linux/fs.h",
80675 + .param4 = 1,
80676 +};
80677 +struct size_overflow_hash _000060_hash = {
80678 + .next = NULL,
80679 + .name = "iov_iter_copy_from_user_atomic",
80680 + .file = "include/linux/fs.h",
80681 + .param4 = 1,
80682 +};
80683 +struct size_overflow_hash _000061_hash = {
80684 + .next = NULL,
80685 + .name = "keyctl_instantiate_key_common",
80686 + .file = "security/keys/keyctl.c",
80687 + .param4 = 1,
80688 +};
80689 +struct size_overflow_hash _000062_hash = {
80690 + .next = NULL,
80691 + .name = "keyctl_update_key",
80692 + .file = "security/keys/keyctl.c",
80693 + .param3 = 1,
80694 +};
80695 +struct size_overflow_hash _000063_hash = {
80696 + .next = NULL,
80697 + .name = "__kfifo_alloc",
80698 + .file = "include/linux/kfifo.h",
80699 + .param2 = 1,
80700 + .param3 = 1,
80701 +};
80702 +struct size_overflow_hash _000065_hash = {
80703 + .next = NULL,
80704 + .name = "kfifo_copy_from_user",
80705 + .file = "kernel/kfifo.c",
80706 + .param3 = 1,
80707 +};
80708 +struct size_overflow_hash _000066_hash = {
80709 + .next = NULL,
80710 + .name = "kmalloc_node",
80711 + .file = "include/linux/slab.h",
80712 + .param1 = 1,
80713 +};
80714 +struct size_overflow_hash _000067_hash = {
80715 + .next = NULL,
80716 + .name = "kmalloc_parameter",
80717 + .file = "kernel/params.c",
80718 + .param1 = 1,
80719 +};
80720 +struct size_overflow_hash _000068_hash = {
80721 + .next = NULL,
80722 + .name = "kobj_map",
80723 + .file = "include/linux/kobj_map.h",
80724 + .param2 = 1,
80725 + .param3 = 1,
80726 +};
80727 +struct size_overflow_hash _000070_hash = {
80728 + .next = NULL,
80729 + .name = "krealloc",
80730 + .file = "include/linux/slab.h",
80731 + .param2 = 1,
80732 +};
80733 +struct size_overflow_hash _000071_hash = {
80734 + .next = NULL,
80735 + .name = "kvmalloc",
80736 + .file = "security/apparmor/lib.c",
80737 + .param1 = 1,
80738 +};
80739 +struct size_overflow_hash _000072_hash = {
80740 + .next = NULL,
80741 + .name = "kzalloc",
80742 + .file = "include/linux/slab.h",
80743 + .param1 = 1,
80744 +};
80745 +struct size_overflow_hash _000073_hash = {
80746 + .next = NULL,
80747 + .name = "listxattr",
80748 + .file = "fs/xattr.c",
80749 + .param3 = 1,
80750 +};
80751 +struct size_overflow_hash _000074_hash = {
80752 + .next = NULL,
80753 + .name = "mempool_kmalloc",
80754 + .file = "include/linux/mempool.h",
80755 + .param2 = 1,
80756 +};
80757 +struct size_overflow_hash _000075_hash = {
80758 + .next = NULL,
80759 + .name = "mem_rw",
80760 + .file = "fs/proc/base.c",
80761 + .param3 = 1,
80762 +};
80763 +struct size_overflow_hash _000076_hash = {
80764 + .next = NULL,
80765 + .name = "module_alloc_update_bounds",
80766 + .file = "kernel/module.c",
80767 + .param1 = 1,
80768 +};
80769 +struct size_overflow_hash _000077_hash = {
80770 + .next = NULL,
80771 + .name = "mpi_alloc_limb_space",
80772 + .file = "lib/mpi/mpiutil.c",
80773 + .param1 = 1,
80774 +};
80775 +struct size_overflow_hash _000078_hash = {
80776 + .next = NULL,
80777 + .name = "mpi_resize",
80778 + .file = "include/linux/mpi.h",
80779 + .param2 = 1,
80780 +};
80781 +struct size_overflow_hash _000079_hash = {
80782 + .next = NULL,
80783 + .name = "mtrr_write",
80784 + .file = "arch/x86/kernel/cpu/mtrr/if.c",
80785 + .param3 = 1,
80786 +};
80787 +struct size_overflow_hash _000080_hash = {
80788 + .next = NULL,
80789 + .name = "oom_adjust_write",
80790 + .file = "fs/proc/base.c",
80791 + .param3 = 1,
80792 +};
80793 +struct size_overflow_hash _000081_hash = {
80794 + .next = NULL,
80795 + .name = "oom_score_adj_write",
80796 + .file = "fs/proc/base.c",
80797 + .param3 = 1,
80798 +};
80799 +struct size_overflow_hash _000082_hash = {
80800 + .next = NULL,
80801 + .name = "pipe_iov_copy_from_user",
80802 + .file = "fs/pipe.c",
80803 + .param3 = 1,
80804 +};
80805 +struct size_overflow_hash _000083_hash = {
80806 + .next = NULL,
80807 + .name = "pipe_iov_copy_to_user",
80808 + .file = "fs/pipe.c",
80809 + .param3 = 1,
80810 +};
80811 +struct size_overflow_hash _000084_hash = {
80812 + .next = NULL,
80813 + .name = "pipe_set_size",
80814 + .file = "fs/pipe.c",
80815 + .param2 = 1,
80816 +};
80817 +struct size_overflow_hash _000085_hash = {
80818 + .next = NULL,
80819 + .name = "platform_device_add_data",
80820 + .file = "include/linux/platform_device.h",
80821 + .param3 = 1,
80822 +};
80823 +struct size_overflow_hash _000086_hash = {
80824 + .next = NULL,
80825 + .name = "platform_device_add_resources",
80826 + .file = "include/linux/platform_device.h",
80827 + .param3 = 1,
80828 +};
80829 +struct size_overflow_hash _000087_hash = {
80830 + .next = NULL,
80831 + .name = "pm_qos_power_write",
80832 + .file = "kernel/power/qos.c",
80833 + .param3 = 1,
80834 +};
80835 +struct size_overflow_hash _000088_hash = {
80836 + .next = NULL,
80837 + .name = "pnpbios_proc_write",
80838 + .file = "drivers/pnp/pnpbios/proc.c",
80839 + .param3 = 1,
80840 +};
80841 +struct size_overflow_hash _000089_hash = {
80842 + .next = NULL,
80843 + .name = "__probe_kernel_read",
80844 + .file = "include/linux/uaccess.h",
80845 + .param3 = 1,
80846 +};
80847 +struct size_overflow_hash _000090_hash = {
80848 + .next = NULL,
80849 + .name = "__probe_kernel_write",
80850 + .file = "include/linux/uaccess.h",
80851 + .param3 = 1,
80852 +};
80853 +struct size_overflow_hash _000091_hash = {
80854 + .next = NULL,
80855 + .name = "proc_coredump_filter_write",
80856 + .file = "fs/proc/base.c",
80857 + .param3 = 1,
80858 +};
80859 +struct size_overflow_hash _000092_hash = {
80860 + .next = NULL,
80861 + .name = "process_vm_rw_pages",
80862 + .file = "mm/process_vm_access.c",
80863 + .param5 = 1,
80864 + .param6 = 1,
80865 +};
80866 +struct size_overflow_hash _000094_hash = {
80867 + .next = NULL,
80868 + .name = "proc_loginuid_write",
80869 + .file = "fs/proc/base.c",
80870 + .param3 = 1,
80871 +};
80872 +struct size_overflow_hash _000095_hash = {
80873 + .next = NULL,
80874 + .name = "proc_pid_attr_write",
80875 + .file = "fs/proc/base.c",
80876 + .param3 = 1,
80877 +};
80878 +struct size_overflow_hash _000096_hash = {
80879 + .next = NULL,
80880 + .name = "pstore_mkfile",
80881 + .file = "fs/pstore/inode.c",
80882 + .param5 = 1,
80883 +};
80884 +struct size_overflow_hash _000097_hash = {
80885 + .next = NULL,
80886 + .name = "qdisc_class_hash_alloc",
80887 + .file = "net/sched/sch_api.c",
80888 + .param1 = 1,
80889 +};
80890 +struct size_overflow_hash _000098_hash = {
80891 + .next = NULL,
80892 + .name = "read",
80893 + .file = "fs/sysfs/bin.c",
80894 + .param3 = 1,
80895 +};
80896 +struct size_overflow_hash _000099_hash = {
80897 + .next = NULL,
80898 + .name = "regmap_access_read_file",
80899 + .file = "drivers/base/regmap/regmap-debugfs.c",
80900 + .param3 = 1,
80901 +};
80902 +struct size_overflow_hash _000100_hash = {
80903 + .next = NULL,
80904 + .name = "regmap_map_read_file",
80905 + .file = "drivers/base/regmap/regmap-debugfs.c",
80906 + .param3 = 1,
80907 +};
80908 +struct size_overflow_hash _000101_hash = {
80909 + .next = NULL,
80910 + .name = "_regmap_raw_write",
80911 + .file = "drivers/base/regmap/regmap.c",
80912 + .param4 = 1,
80913 +};
80914 +struct size_overflow_hash _000102_hash = {
80915 + .next = NULL,
80916 + .name = "regset_tls_set",
80917 + .file = "arch/x86/kernel/tls.c",
80918 + .param4 = 1,
80919 +};
80920 +struct size_overflow_hash _000103_hash = {
80921 + .next = NULL,
80922 + .name = "request_key_auth_new",
80923 + .file = "security/keys/request_key_auth.c",
80924 + .param3 = 1,
80925 +};
80926 +struct size_overflow_hash _000104_hash = {
80927 + .next = NULL,
80928 + .name = "restore_i387_fxsave",
80929 + .file = "arch/x86/kernel/i387.c",
80930 + .param2 = 1,
80931 +};
80932 +struct size_overflow_hash _000105_hash = {
80933 + .next = NULL,
80934 + .name = "rngapi_reset",
80935 + .file = "crypto/rng.c",
80936 + .param3 = 1,
80937 +};
80938 +struct size_overflow_hash _000106_hash = {
80939 + .next = NULL,
80940 + .name = "rw_copy_check_uvector",
80941 + .file = "include/linux/fs.h",
80942 + .param3 = 1,
80943 +};
80944 +struct size_overflow_hash _000107_hash = {
80945 + .next = NULL,
80946 + .name = "sched_autogroup_write",
80947 + .file = "fs/proc/base.c",
80948 + .param3 = 1,
80949 +};
80950 +struct size_overflow_hash _000108_hash = {
80951 + .next = NULL,
80952 + .name = "security_context_to_sid_core",
80953 + .file = "security/selinux/ss/services.c",
80954 + .param2 = 1,
80955 +};
80956 +struct size_overflow_hash _000109_hash = {
80957 + .next = NULL,
80958 + .name = "sel_commit_bools_write",
80959 + .file = "security/selinux/selinuxfs.c",
80960 + .param3 = 1,
80961 +};
80962 +struct size_overflow_hash _000110_hash = {
80963 + .next = NULL,
80964 + .name = "sel_write_avc_cache_threshold",
80965 + .file = "security/selinux/selinuxfs.c",
80966 + .param3 = 1,
80967 +};
80968 +struct size_overflow_hash _000111_hash = {
80969 + .next = NULL,
80970 + .name = "sel_write_bool",
80971 + .file = "security/selinux/selinuxfs.c",
80972 + .param3 = 1,
80973 +};
80974 +struct size_overflow_hash _000112_hash = {
80975 + .next = NULL,
80976 + .name = "sel_write_checkreqprot",
80977 + .file = "security/selinux/selinuxfs.c",
80978 + .param3 = 1,
80979 +};
80980 +struct size_overflow_hash _000113_hash = {
80981 + .next = NULL,
80982 + .name = "sel_write_disable",
80983 + .file = "security/selinux/selinuxfs.c",
80984 + .param3 = 1,
80985 +};
80986 +struct size_overflow_hash _000114_hash = {
80987 + .next = NULL,
80988 + .name = "sel_write_enforce",
80989 + .file = "security/selinux/selinuxfs.c",
80990 + .param3 = 1,
80991 +};
80992 +struct size_overflow_hash _000115_hash = {
80993 + .next = NULL,
80994 + .name = "sel_write_load",
80995 + .file = "security/selinux/selinuxfs.c",
80996 + .param3 = 1,
80997 +};
80998 +struct size_overflow_hash _000116_hash = {
80999 + .next = NULL,
81000 + .name = "setkey_unaligned",
81001 + .file = "crypto/ablkcipher.c",
81002 + .param3 = 1,
81003 +};
81004 +struct size_overflow_hash _000117_hash = {
81005 + .next = NULL,
81006 + .name = "setkey_unaligned",
81007 + .file = "crypto/blkcipher.c",
81008 + .param3 = 1,
81009 +};
81010 +struct size_overflow_hash _000118_hash = {
81011 + .next = NULL,
81012 + .name = "setkey_unaligned",
81013 + .file = "crypto/aead.c",
81014 + .param3 = 1,
81015 +};
81016 +struct size_overflow_hash _000119_hash = {
81017 + .next = NULL,
81018 + .name = "setkey_unaligned",
81019 + .file = "crypto/cipher.c",
81020 + .param3 = 1,
81021 +};
81022 +struct size_overflow_hash _000120_hash = {
81023 + .next = NULL,
81024 + .name = "setxattr",
81025 + .file = "fs/xattr.c",
81026 + .param4 = 1,
81027 +};
81028 +struct size_overflow_hash _000121_hash = {
81029 + .next = NULL,
81030 + .name = "sg_kmalloc",
81031 + .file = "lib/scatterlist.c",
81032 + .param1 = 1,
81033 +};
81034 +struct size_overflow_hash _000122_hash = {
81035 + .next = NULL,
81036 + .name = "shash_setkey_unaligned",
81037 + .file = "crypto/shash.c",
81038 + .param3 = 1,
81039 +};
81040 +struct size_overflow_hash _000123_hash = {
81041 + .next = NULL,
81042 + .name = "shmem_xattr_set",
81043 + .file = "mm/shmem.c",
81044 + .param4 = 1,
81045 +};
81046 +struct size_overflow_hash _000124_hash = {
81047 + .next = NULL,
81048 + .name = "simple_transaction_get",
81049 + .file = "include/linux/fs.h",
81050 + .param3 = 1,
81051 +};
81052 +struct size_overflow_hash _000125_hash = {
81053 + .next = NULL,
81054 + .name = "simple_write_to_buffer",
81055 + .file = "include/linux/fs.h",
81056 + .param2 = 1,
81057 + .param5 = 1,
81058 +};
81059 +struct size_overflow_hash _000127_hash = {
81060 + .next = NULL,
81061 + .name = "smk_write_ambient",
81062 + .file = "security/smack/smackfs.c",
81063 + .param3 = 1,
81064 +};
81065 +struct size_overflow_hash _000128_hash = {
81066 + .next = NULL,
81067 + .name = "smk_write_cipso",
81068 + .file = "security/smack/smackfs.c",
81069 + .param3 = 1,
81070 +};
81071 +struct size_overflow_hash _000129_hash = {
81072 + .next = NULL,
81073 + .name = "smk_write_direct",
81074 + .file = "security/smack/smackfs.c",
81075 + .param3 = 1,
81076 +};
81077 +struct size_overflow_hash _000130_hash = {
81078 + .next = NULL,
81079 + .name = "smk_write_doi",
81080 + .file = "security/smack/smackfs.c",
81081 + .param3 = 1,
81082 +};
81083 +struct size_overflow_hash _000131_hash = {
81084 + .next = NULL,
81085 + .name = "smk_write_load_list",
81086 + .file = "security/smack/smackfs.c",
81087 + .param3 = 1,
81088 +};
81089 +struct size_overflow_hash _000132_hash = {
81090 + .next = &_000102_hash,
81091 + .name = "smk_write_logging",
81092 + .file = "security/smack/smackfs.c",
81093 + .param3 = 1,
81094 +};
81095 +struct size_overflow_hash _000133_hash = {
81096 + .next = NULL,
81097 + .name = "smk_write_netlbladdr",
81098 + .file = "security/smack/smackfs.c",
81099 + .param3 = 1,
81100 +};
81101 +struct size_overflow_hash _000134_hash = {
81102 + .next = NULL,
81103 + .name = "smk_write_onlycap",
81104 + .file = "security/smack/smackfs.c",
81105 + .param3 = 1,
81106 +};
81107 +struct size_overflow_hash _000135_hash = {
81108 + .next = NULL,
81109 + .name = "sys_add_key",
81110 + .file = "include/linux/syscalls.h",
81111 + .param4 = 1,
81112 +};
81113 +struct size_overflow_hash _000136_hash = {
81114 + .next = NULL,
81115 + .name = "sys_modify_ldt",
81116 + .file = "arch/x86/include/asm/syscalls.h",
81117 + .param3 = 1,
81118 +};
81119 +struct size_overflow_hash _000137_hash = {
81120 + .next = NULL,
81121 + .name = "sys_semtimedop",
81122 + .file = "include/linux/syscalls.h",
81123 + .param3 = 1,
81124 +};
81125 +struct size_overflow_hash _000138_hash = {
81126 + .next = NULL,
81127 + .name = "tomoyo_write_self",
81128 + .file = "security/tomoyo/securityfs_if.c",
81129 + .param3 = 1,
81130 +};
81131 +struct size_overflow_hash _000139_hash = {
81132 + .next = NULL,
81133 + .name = "tpm_write",
81134 + .file = "drivers/char/tpm/tpm.c",
81135 + .param3 = 1,
81136 +};
81137 +struct size_overflow_hash _000140_hash = {
81138 + .next = NULL,
81139 + .name = "tty_buffer_alloc",
81140 + .file = "drivers/tty/tty_buffer.c",
81141 + .param2 = 1,
81142 +};
81143 +struct size_overflow_hash _000141_hash = {
81144 + .next = NULL,
81145 + .name = "user_instantiate",
81146 + .file = "include/keys/user-type.h",
81147 + .param3 = 1,
81148 +};
81149 +struct size_overflow_hash _000142_hash = {
81150 + .next = NULL,
81151 + .name = "user_update",
81152 + .file = "include/keys/user-type.h",
81153 + .param3 = 1,
81154 +};
81155 +struct size_overflow_hash _000143_hash = {
81156 + .next = NULL,
81157 + .name = "vc_do_resize",
81158 + .file = "drivers/tty/vt/vt.c",
81159 + .param3 = 1,
81160 + .param4 = 1,
81161 +};
81162 +struct size_overflow_hash _000145_hash = {
81163 + .next = NULL,
81164 + .name = "vcs_write",
81165 + .file = "drivers/tty/vt/vc_screen.c",
81166 + .param3 = 1,
81167 +};
81168 +struct size_overflow_hash _000146_hash = {
81169 + .next = NULL,
81170 + .name = "vga_arb_write",
81171 + .file = "drivers/gpu/vga/vgaarb.c",
81172 + .param3 = 1,
81173 +};
81174 +struct size_overflow_hash _000147_hash = {
81175 + .next = NULL,
81176 + .name = "vga_switcheroo_debugfs_write",
81177 + .file = "drivers/gpu/vga/vga_switcheroo.c",
81178 + .param3 = 1,
81179 +};
81180 +struct size_overflow_hash _000148_hash = {
81181 + .next = NULL,
81182 + .name = "__vmalloc",
81183 + .file = "include/linux/vmalloc.h",
81184 + .param1 = 1,
81185 +};
81186 +struct size_overflow_hash _000149_hash = {
81187 + .next = NULL,
81188 + .name = "vmalloc_32",
81189 + .file = "include/linux/vmalloc.h",
81190 + .param1 = 1,
81191 +};
81192 +struct size_overflow_hash _000150_hash = {
81193 + .next = NULL,
81194 + .name = "vmalloc_32_user",
81195 + .file = "include/linux/vmalloc.h",
81196 + .param1 = 1,
81197 +};
81198 +struct size_overflow_hash _000151_hash = {
81199 + .next = NULL,
81200 + .name = "vmalloc_exec",
81201 + .file = "include/linux/vmalloc.h",
81202 + .param1 = 1,
81203 +};
81204 +struct size_overflow_hash _000152_hash = {
81205 + .next = NULL,
81206 + .name = "vmalloc_node",
81207 + .file = "include/linux/vmalloc.h",
81208 + .param1 = 1,
81209 +};
81210 +struct size_overflow_hash _000153_hash = {
81211 + .next = NULL,
81212 + .name = "__vmalloc_node_flags",
81213 + .file = "mm/vmalloc.c",
81214 + .param1 = 1,
81215 +};
81216 +struct size_overflow_hash _000154_hash = {
81217 + .next = NULL,
81218 + .name = "vmalloc_user",
81219 + .file = "include/linux/vmalloc.h",
81220 + .param1 = 1,
81221 +};
81222 +struct size_overflow_hash _000155_hash = {
81223 + .next = NULL,
81224 + .name = "write",
81225 + .file = "fs/sysfs/bin.c",
81226 + .param3 = 1,
81227 +};
81228 +struct size_overflow_hash _000156_hash = {
81229 + .next = NULL,
81230 + .name = "__xip_file_write",
81231 + .file = "mm/filemap_xip.c",
81232 + .param3 = 1,
81233 +};
81234 +struct size_overflow_hash _000157_hash = {
81235 + .next = NULL,
81236 + .name = "acpi_ex_allocate_name_string",
81237 + .file = "drivers/acpi/acpica/exnames.c",
81238 + .param2 = 1,
81239 +};
81240 +struct size_overflow_hash _000158_hash = {
81241 + .next = NULL,
81242 + .name = "acpi_os_allocate_zeroed",
81243 + .file = "include/acpi/platform/aclinux.h",
81244 + .param1 = 1,
81245 +};
81246 +struct size_overflow_hash _000159_hash = {
81247 + .next = NULL,
81248 + .name = "acpi_ut_initialize_buffer",
81249 + .file = "drivers/acpi/acpica/utalloc.c",
81250 + .param2 = 1,
81251 +};
81252 +struct size_overflow_hash _000160_hash = {
81253 + .next = NULL,
81254 + .name = "add_numbered_child",
81255 + .file = "drivers/mfd/twl-core.c",
81256 + .param5 = 1,
81257 +};
81258 +struct size_overflow_hash _000161_hash = {
81259 + .next = NULL,
81260 + .name = "___alloc_bootmem_nopanic",
81261 + .file = "mm/nobootmem.c",
81262 + .param1 = 1,
81263 +};
81264 +struct size_overflow_hash _000162_hash = {
81265 + .next = NULL,
81266 + .name = "alloc_large_system_hash",
81267 + .file = "include/linux/bootmem.h",
81268 + .param2 = 1,
81269 +};
81270 +struct size_overflow_hash _000163_hash = {
81271 + .next = NULL,
81272 + .name = "audit_init_entry",
81273 + .file = "kernel/auditfilter.c",
81274 + .param1 = 1,
81275 +};
81276 +struct size_overflow_hash _000164_hash = {
81277 + .next = NULL,
81278 + .name = "__bio_map_kern",
81279 + .file = "fs/bio.c",
81280 + .param2 = 1,
81281 + .param3 = 1,
81282 +};
81283 +struct size_overflow_hash _000166_hash = {
81284 + .next = NULL,
81285 + .name = "blk_register_region",
81286 + .file = "include/linux/genhd.h",
81287 + .param1 = 1,
81288 + .param2 = 1,
81289 +};
81290 +struct size_overflow_hash _000168_hash = {
81291 + .next = NULL,
81292 + .name = "cdev_add",
81293 + .file = "include/linux/cdev.h",
81294 + .param2 = 1,
81295 + .param3 = 1,
81296 +};
81297 +struct size_overflow_hash _000170_hash = {
81298 + .next = NULL,
81299 + .name = "copy_to_user",
81300 + .file = "arch/x86/include/asm/uaccess_32.h",
81301 + .param3 = 1,
81302 +};
81303 +struct size_overflow_hash _000171_hash = {
81304 + .next = NULL,
81305 + .name = "crypto_ahash_setkey",
81306 + .file = "include/crypto/hash.h",
81307 + .param3 = 1,
81308 +};
81309 +struct size_overflow_hash _000172_hash = {
81310 + .next = NULL,
81311 + .name = "crypto_alloc_instance2",
81312 + .file = "include/crypto/algapi.h",
81313 + .param3 = 1,
81314 +};
81315 +struct size_overflow_hash _000173_hash = {
81316 + .next = NULL,
81317 + .name = "crypto_shash_setkey",
81318 + .file = "include/crypto/hash.h",
81319 + .param3 = 1,
81320 +};
81321 +struct size_overflow_hash _000174_hash = {
81322 + .next = NULL,
81323 + .name = "dev_set_alias",
81324 + .file = "include/linux/netdevice.h",
81325 + .param3 = 1,
81326 +};
81327 +struct size_overflow_hash _000175_hash = {
81328 + .next = NULL,
81329 + .name = "do_readv_writev",
81330 + .file = "fs/read_write.c",
81331 + .param4 = 1,
81332 +};
81333 +struct size_overflow_hash _000176_hash = {
81334 + .next = NULL,
81335 + .name = "getxattr",
81336 + .file = "fs/xattr.c",
81337 + .param4 = 1,
81338 +};
81339 +struct size_overflow_hash _000177_hash = {
81340 + .next = NULL,
81341 + .name = "hugetlbfs_read_actor",
81342 + .file = "fs/hugetlbfs/inode.c",
81343 + .param2 = 1,
81344 + .param5 = 1,
81345 + .param4 = 1,
81346 +};
81347 +struct size_overflow_hash _000180_hash = {
81348 + .next = NULL,
81349 + .name = "keyctl_instantiate_key",
81350 + .file = "security/keys/keyctl.c",
81351 + .param3 = 1,
81352 +};
81353 +struct size_overflow_hash _000181_hash = {
81354 + .next = NULL,
81355 + .name = "keyctl_instantiate_key_iov",
81356 + .file = "security/keys/keyctl.c",
81357 + .param3 = 1,
81358 +};
81359 +struct size_overflow_hash _000182_hash = {
81360 + .next = NULL,
81361 + .name = "__kfifo_from_user",
81362 + .file = "include/linux/kfifo.h",
81363 + .param3 = 1,
81364 +};
81365 +struct size_overflow_hash _000183_hash = {
81366 + .next = NULL,
81367 + .name = "kimage_crash_alloc",
81368 + .file = "kernel/kexec.c",
81369 + .param3 = 1,
81370 +};
81371 +struct size_overflow_hash _000184_hash = {
81372 + .next = NULL,
81373 + .name = "kimage_normal_alloc",
81374 + .file = "kernel/kexec.c",
81375 + .param3 = 1,
81376 +};
81377 +struct size_overflow_hash _000185_hash = {
81378 + .next = NULL,
81379 + .name = "mpi_alloc",
81380 + .file = "include/linux/mpi.h",
81381 + .param1 = 1,
81382 +};
81383 +struct size_overflow_hash _000186_hash = {
81384 + .next = NULL,
81385 + .name = "mpi_set_bit",
81386 + .file = "include/linux/mpi.h",
81387 + .param2 = 1,
81388 +};
81389 +struct size_overflow_hash _000187_hash = {
81390 + .next = NULL,
81391 + .name = "mpi_set_highbit",
81392 + .file = "include/linux/mpi.h",
81393 + .param2 = 1,
81394 +};
81395 +struct size_overflow_hash _000188_hash = {
81396 + .next = NULL,
81397 + .name = "neigh_hash_alloc",
81398 + .file = "net/core/neighbour.c",
81399 + .param1 = 1,
81400 +};
81401 +struct size_overflow_hash _000189_hash = {
81402 + .next = NULL,
81403 + .name = "nl_pid_hash_zalloc",
81404 + .file = "net/netlink/af_netlink.c",
81405 + .param1 = 1,
81406 +};
81407 +struct size_overflow_hash _000190_hash = {
81408 + .next = NULL,
81409 + .name = "pci_add_cap_save_buffer",
81410 + .file = "drivers/pci/pci.c",
81411 + .param3 = 1,
81412 +};
81413 +struct size_overflow_hash _000191_hash = {
81414 + .next = NULL,
81415 + .name = "pcpu_mem_zalloc",
81416 + .file = "mm/percpu.c",
81417 + .param1 = 1,
81418 +};
81419 +struct size_overflow_hash _000192_hash = {
81420 + .next = NULL,
81421 + .name = "platform_create_bundle",
81422 + .file = "include/linux/platform_device.h",
81423 + .param4 = 1,
81424 + .param6 = 1,
81425 +};
81426 +struct size_overflow_hash _000194_hash = {
81427 + .next = NULL,
81428 + .name = "process_vm_rw",
81429 + .file = "mm/process_vm_access.c",
81430 + .param3 = 1,
81431 + .param5 = 1,
81432 +};
81433 +struct size_overflow_hash _000196_hash = {
81434 + .next = NULL,
81435 + .name = "process_vm_rw_single_vec",
81436 + .file = "mm/process_vm_access.c",
81437 + .param1 = 1,
81438 + .param2 = 1,
81439 +};
81440 +struct size_overflow_hash _000198_hash = {
81441 + .next = NULL,
81442 + .name = "profile_load",
81443 + .file = "security/apparmor/apparmorfs.c",
81444 + .param3 = 1,
81445 +};
81446 +struct size_overflow_hash _000199_hash = {
81447 + .next = NULL,
81448 + .name = "profile_remove",
81449 + .file = "security/apparmor/apparmorfs.c",
81450 + .param3 = 1,
81451 +};
81452 +struct size_overflow_hash _000200_hash = {
81453 + .next = NULL,
81454 + .name = "profile_replace",
81455 + .file = "security/apparmor/apparmorfs.c",
81456 + .param3 = 1,
81457 +};
81458 +struct size_overflow_hash _000201_hash = {
81459 + .next = NULL,
81460 + .name = "regcache_rbtree_insert_to_block",
81461 + .file = "drivers/base/regmap/regcache-rbtree.c",
81462 + .param5 = 1,
81463 +};
81464 +struct size_overflow_hash _000202_hash = {
81465 + .next = NULL,
81466 + .name = "regmap_raw_write",
81467 + .file = "include/linux/regmap.h",
81468 + .param4 = 1,
81469 +};
81470 +struct size_overflow_hash _000203_hash = {
81471 + .next = NULL,
81472 + .name = "relay_alloc_page_array",
81473 + .file = "kernel/relay.c",
81474 + .param1 = 1,
81475 +};
81476 +struct size_overflow_hash _000204_hash = {
81477 + .next = NULL,
81478 + .name = "RESIZE_IF_NEEDED",
81479 + .file = "lib/mpi/mpi-internal.h",
81480 + .param2 = 1,
81481 +};
81482 +struct size_overflow_hash _000205_hash = {
81483 + .next = NULL,
81484 + .name = "security_context_to_sid",
81485 + .file = "security/selinux/ss/services.c",
81486 + .param2 = 1,
81487 +};
81488 +struct size_overflow_hash _000206_hash = {
81489 + .next = NULL,
81490 + .name = "security_context_to_sid_default",
81491 + .file = "security/selinux/ss/services.c",
81492 + .param2 = 1,
81493 +};
81494 +struct size_overflow_hash _000207_hash = {
81495 + .next = NULL,
81496 + .name = "security_context_to_sid_force",
81497 + .file = "security/selinux/ss/services.c",
81498 + .param2 = 1,
81499 +};
81500 +struct size_overflow_hash _000208_hash = {
81501 + .next = NULL,
81502 + .name = "selinux_transaction_write",
81503 + .file = "security/selinux/selinuxfs.c",
81504 + .param3 = 1,
81505 +};
81506 +struct size_overflow_hash _000209_hash = {
81507 + .next = NULL,
81508 + .name = "sel_write_access",
81509 + .file = "security/selinux/selinuxfs.c",
81510 + .param3 = 1,
81511 +};
81512 +struct size_overflow_hash _000210_hash = {
81513 + .next = NULL,
81514 + .name = "sel_write_create",
81515 + .file = "security/selinux/selinuxfs.c",
81516 + .param3 = 1,
81517 +};
81518 +struct size_overflow_hash _000211_hash = {
81519 + .next = NULL,
81520 + .name = "sel_write_member",
81521 + .file = "security/selinux/selinuxfs.c",
81522 + .param3 = 1,
81523 +};
81524 +struct size_overflow_hash _000212_hash = {
81525 + .next = NULL,
81526 + .name = "sel_write_relabel",
81527 + .file = "security/selinux/selinuxfs.c",
81528 + .param3 = 1,
81529 +};
81530 +struct size_overflow_hash _000213_hash = {
81531 + .next = NULL,
81532 + .name = "sel_write_user",
81533 + .file = "security/selinux/selinuxfs.c",
81534 + .param3 = 1,
81535 +};
81536 +struct size_overflow_hash _000214_hash = {
81537 + .next = NULL,
81538 + .name = "setkey",
81539 + .file = "crypto/cipher.c",
81540 + .param3 = 1,
81541 +};
81542 +struct size_overflow_hash _000215_hash = {
81543 + .next = NULL,
81544 + .name = "setkey",
81545 + .file = "crypto/ablkcipher.c",
81546 + .param3 = 1,
81547 +};
81548 +struct size_overflow_hash _000216_hash = {
81549 + .next = NULL,
81550 + .name = "setkey",
81551 + .file = "crypto/aead.c",
81552 + .param3 = 1,
81553 +};
81554 +struct size_overflow_hash _000217_hash = {
81555 + .next = NULL,
81556 + .name = "setkey",
81557 + .file = "crypto/blkcipher.c",
81558 + .param3 = 1,
81559 +};
81560 +struct size_overflow_hash _000218_hash = {
81561 + .next = NULL,
81562 + .name = "smk_write_access",
81563 + .file = "security/smack/smackfs.c",
81564 + .param3 = 1,
81565 +};
81566 +struct size_overflow_hash _000219_hash = {
81567 + .next = NULL,
81568 + .name = "snapshot_write",
81569 + .file = "kernel/power/user.c",
81570 + .param3 = 1,
81571 +};
81572 +struct size_overflow_hash _000220_hash = {
81573 + .next = NULL,
81574 + .name = "spi_alloc_master",
81575 + .file = "include/linux/spi/spi.h",
81576 + .param2 = 1,
81577 +};
81578 +struct size_overflow_hash _000221_hash = {
81579 + .next = NULL,
81580 + .name = "spi_register_board_info",
81581 + .file = "include/linux/spi/spi.h",
81582 + .param2 = 1,
81583 +};
81584 +struct size_overflow_hash _000222_hash = {
81585 + .next = NULL,
81586 + .name = "sys_flistxattr",
81587 + .file = "include/linux/syscalls.h",
81588 + .param3 = 1,
81589 +};
81590 +struct size_overflow_hash _000223_hash = {
81591 + .next = NULL,
81592 + .name = "sys_fsetxattr",
81593 + .file = "include/linux/syscalls.h",
81594 + .param4 = 1,
81595 +};
81596 +struct size_overflow_hash _000224_hash = {
81597 + .next = NULL,
81598 + .name = "sysfs_write_file",
81599 + .file = "fs/sysfs/file.c",
81600 + .param3 = 1,
81601 +};
81602 +struct size_overflow_hash _000225_hash = {
81603 + .next = NULL,
81604 + .name = "sys_ipc",
81605 + .file = "include/linux/syscalls.h",
81606 + .param3 = 1,
81607 +};
81608 +struct size_overflow_hash _000226_hash = {
81609 + .next = NULL,
81610 + .name = "sys_keyctl",
81611 + .file = "include/linux/syscalls.h",
81612 + .param4 = 1,
81613 +};
81614 +struct size_overflow_hash _000227_hash = {
81615 + .next = NULL,
81616 + .name = "sys_listxattr",
81617 + .file = "include/linux/syscalls.h",
81618 + .param3 = 1,
81619 +};
81620 +struct size_overflow_hash _000228_hash = {
81621 + .next = NULL,
81622 + .name = "sys_llistxattr",
81623 + .file = "include/linux/syscalls.h",
81624 + .param3 = 1,
81625 +};
81626 +struct size_overflow_hash _000229_hash = {
81627 + .next = NULL,
81628 + .name = "sys_lsetxattr",
81629 + .file = "include/linux/syscalls.h",
81630 + .param4 = 1,
81631 +};
81632 +struct size_overflow_hash _000230_hash = {
81633 + .next = NULL,
81634 + .name = "sys_sched_setaffinity",
81635 + .file = "include/linux/syscalls.h",
81636 + .param2 = 1,
81637 +};
81638 +struct size_overflow_hash _000231_hash = {
81639 + .next = NULL,
81640 + .name = "sys_semop",
81641 + .file = "include/linux/syscalls.h",
81642 + .param3 = 1,
81643 +};
81644 +struct size_overflow_hash _000232_hash = {
81645 + .next = NULL,
81646 + .name = "sys_setxattr",
81647 + .file = "include/linux/syscalls.h",
81648 + .param4 = 1,
81649 +};
81650 +struct size_overflow_hash _000233_hash = {
81651 + .next = NULL,
81652 + .name = "tnode_alloc",
81653 + .file = "net/ipv4/fib_trie.c",
81654 + .param1 = 1,
81655 +};
81656 +struct size_overflow_hash _000234_hash = {
81657 + .next = NULL,
81658 + .name = "tomoyo_commit_ok",
81659 + .file = "security/tomoyo/memory.c",
81660 + .param2 = 1,
81661 +};
81662 +struct size_overflow_hash _000235_hash = {
81663 + .next = NULL,
81664 + .name = "tomoyo_scan_bprm",
81665 + .file = "security/tomoyo/condition.c",
81666 + .param2 = 1,
81667 + .param4 = 1,
81668 +};
81669 +struct size_overflow_hash _000237_hash = {
81670 + .next = NULL,
81671 + .name = "tty_write",
81672 + .file = "drivers/tty/tty_io.c",
81673 + .param3 = 1,
81674 +};
81675 +struct size_overflow_hash _000238_hash = {
81676 + .next = NULL,
81677 + .name = "vc_resize",
81678 + .file = "include/linux/vt_kern.h",
81679 + .param2 = 1,
81680 + .param3 = 1,
81681 +};
81682 +struct size_overflow_hash _000240_hash = {
81683 + .next = NULL,
81684 + .name = "vmalloc",
81685 + .file = "include/linux/vmalloc.h",
81686 + .param1 = 1,
81687 +};
81688 +struct size_overflow_hash _000241_hash = {
81689 + .next = NULL,
81690 + .name = "vzalloc",
81691 + .file = "include/linux/vmalloc.h",
81692 + .param1 = 1,
81693 +};
81694 +struct size_overflow_hash _000242_hash = {
81695 + .next = NULL,
81696 + .name = "vzalloc_node",
81697 + .file = "include/linux/vmalloc.h",
81698 + .param1 = 1,
81699 +};
81700 +struct size_overflow_hash _000243_hash = {
81701 + .next = NULL,
81702 + .name = "xfrm_hash_alloc",
81703 + .file = "net/xfrm/xfrm_hash.c",
81704 + .param1 = 1,
81705 +};
81706 +struct size_overflow_hash _000244_hash = {
81707 + .next = NULL,
81708 + .name = "acpi_ds_build_internal_package_obj",
81709 + .file = "drivers/acpi/acpica/dsobject.c",
81710 + .param3 = 1,
81711 +};
81712 +struct size_overflow_hash _000245_hash = {
81713 + .next = NULL,
81714 + .name = "acpi_system_read_event",
81715 + .file = "drivers/acpi/event.c",
81716 + .param3 = 1,
81717 +};
81718 +struct size_overflow_hash _000246_hash = {
81719 + .next = NULL,
81720 + .name = "acpi_ut_create_buffer_object",
81721 + .file = "drivers/acpi/acpica/utobject.c",
81722 + .param1 = 1,
81723 +};
81724 +struct size_overflow_hash _000247_hash = {
81725 + .next = NULL,
81726 + .name = "acpi_ut_create_package_object",
81727 + .file = "drivers/acpi/acpica/utobject.c",
81728 + .param1 = 1,
81729 +};
81730 +struct size_overflow_hash _000248_hash = {
81731 + .next = NULL,
81732 + .name = "acpi_ut_create_string_object",
81733 + .file = "drivers/acpi/acpica/utobject.c",
81734 + .param1 = 1,
81735 +};
81736 +struct size_overflow_hash _000249_hash = {
81737 + .next = NULL,
81738 + .name = "add_child",
81739 + .file = "drivers/mfd/twl-core.c",
81740 + .param4 = 1,
81741 +};
81742 +struct size_overflow_hash _000250_hash = {
81743 + .next = NULL,
81744 + .name = "___alloc_bootmem",
81745 + .file = "mm/nobootmem.c",
81746 + .param1 = 1,
81747 +};
81748 +struct size_overflow_hash _000251_hash = {
81749 + .next = NULL,
81750 + .name = "__alloc_bootmem_nopanic",
81751 + .file = "include/linux/bootmem.h",
81752 + .param1 = 1,
81753 +};
81754 +struct size_overflow_hash _000252_hash = {
81755 + .next = NULL,
81756 + .name = "async_setkey",
81757 + .file = "crypto/blkcipher.c",
81758 + .param3 = 1,
81759 +};
81760 +struct size_overflow_hash _000253_hash = {
81761 + .next = NULL,
81762 + .name = "bio_map_kern",
81763 + .file = "include/linux/bio.h",
81764 + .param3 = 1,
81765 +};
81766 +struct size_overflow_hash _000254_hash = {
81767 + .next = NULL,
81768 + .name = "copy_oldmem_page",
81769 + .file = "include/linux/crash_dump.h",
81770 + .param3 = 1,
81771 +};
81772 +struct size_overflow_hash _000255_hash = {
81773 + .next = NULL,
81774 + .name = "do_sigpending",
81775 + .file = "include/linux/signal.h",
81776 + .param2 = 1,
81777 +};
81778 +struct size_overflow_hash _000257_hash = {
81779 + .next = NULL,
81780 + .name = "keyctl_describe_key",
81781 + .file = "security/keys/keyctl.c",
81782 + .param3 = 1,
81783 +};
81784 +struct size_overflow_hash _000258_hash = {
81785 + .next = NULL,
81786 + .name = "keyctl_get_security",
81787 + .file = "security/keys/keyctl.c",
81788 + .param3 = 1,
81789 +};
81790 +struct size_overflow_hash _000259_hash = {
81791 + .next = NULL,
81792 + .name = "keyring_read",
81793 + .file = "security/keys/keyring.c",
81794 + .param3 = 1,
81795 +};
81796 +struct size_overflow_hash _000260_hash = {
81797 + .next = NULL,
81798 + .name = "kfifo_copy_to_user",
81799 + .file = "kernel/kfifo.c",
81800 + .param3 = 1,
81801 +};
81802 +struct size_overflow_hash _000261_hash = {
81803 + .next = NULL,
81804 + .name = "mousedev_read",
81805 + .file = "drivers/input/mousedev.c",
81806 + .param3 = 1,
81807 +};
81808 +struct size_overflow_hash _000262_hash = {
81809 + .next = NULL,
81810 + .name = "mpi_lshift_limbs",
81811 + .file = "lib/mpi/mpi-bit.c",
81812 + .param2 = 1,
81813 +};
81814 +struct size_overflow_hash _000263_hash = {
81815 + .next = NULL,
81816 + .name = "neigh_hash_grow",
81817 + .file = "net/core/neighbour.c",
81818 + .param2 = 1,
81819 +};
81820 +struct size_overflow_hash _000264_hash = {
81821 + .next = NULL,
81822 + .name = "posix_clock_register",
81823 + .file = "include/linux/posix-clock.h",
81824 + .param2 = 1,
81825 +};
81826 +struct size_overflow_hash _000265_hash = {
81827 + .next = NULL,
81828 + .name = "__proc_file_read",
81829 + .file = "fs/proc/generic.c",
81830 + .param3 = 1,
81831 +};
81832 +struct size_overflow_hash _000266_hash = {
81833 + .next = NULL,
81834 + .name = "read_profile",
81835 + .file = "kernel/profile.c",
81836 + .param3 = 1,
81837 +};
81838 +struct size_overflow_hash _000267_hash = {
81839 + .next = NULL,
81840 + .name = "read_vmcore",
81841 + .file = "fs/proc/vmcore.c",
81842 + .param3 = 1,
81843 +};
81844 +struct size_overflow_hash _000268_hash = {
81845 + .next = NULL,
81846 + .name = "redirected_tty_write",
81847 + .file = "drivers/tty/tty_io.c",
81848 + .param3 = 1,
81849 +};
81850 +struct size_overflow_hash _000269_hash = {
81851 + .next = NULL,
81852 + .name = "__register_chrdev",
81853 + .file = "include/linux/fs.h",
81854 + .param2 = 1,
81855 + .param3 = 1,
81856 +};
81857 +struct size_overflow_hash _000271_hash = {
81858 + .next = NULL,
81859 + .name = "request_key_auth_read",
81860 + .file = "security/keys/request_key_auth.c",
81861 + .param3 = 1,
81862 +};
81863 +struct size_overflow_hash _000272_hash = {
81864 + .next = NULL,
81865 + .name = "shash_async_setkey",
81866 + .file = "crypto/shash.c",
81867 + .param3 = 1,
81868 +};
81869 +struct size_overflow_hash _000273_hash = {
81870 + .next = NULL,
81871 + .name = "shash_compat_setkey",
81872 + .file = "crypto/shash.c",
81873 + .param3 = 1,
81874 +};
81875 +struct size_overflow_hash _000274_hash = {
81876 + .next = NULL,
81877 + .name = "simple_read_from_buffer",
81878 + .file = "include/linux/fs.h",
81879 + .param2 = 1,
81880 + .param5 = 1,
81881 +};
81882 +struct size_overflow_hash _000276_hash = {
81883 + .next = NULL,
81884 + .name = "store_ifalias",
81885 + .file = "net/core/net-sysfs.c",
81886 + .param4 = 1,
81887 +};
81888 +struct size_overflow_hash _000277_hash = {
81889 + .next = NULL,
81890 + .name = "subbuf_read_actor",
81891 + .file = "kernel/relay.c",
81892 + .param3 = 1,
81893 +};
81894 +struct size_overflow_hash _000278_hash = {
81895 + .next = NULL,
81896 + .name = "sys_fgetxattr",
81897 + .file = "include/linux/syscalls.h",
81898 + .param4 = 1,
81899 +};
81900 +struct size_overflow_hash _000279_hash = {
81901 + .next = NULL,
81902 + .name = "sys_getxattr",
81903 + .file = "include/linux/syscalls.h",
81904 + .param4 = 1,
81905 +};
81906 +struct size_overflow_hash _000280_hash = {
81907 + .next = NULL,
81908 + .name = "sys_kexec_load",
81909 + .file = "include/linux/syscalls.h",
81910 + .param2 = 1,
81911 +};
81912 +struct size_overflow_hash _000281_hash = {
81913 + .next = NULL,
81914 + .name = "sys_lgetxattr",
81915 + .file = "include/linux/syscalls.h",
81916 + .param4 = 1,
81917 +};
81918 +struct size_overflow_hash _000282_hash = {
81919 + .next = NULL,
81920 + .name = "sys_process_vm_readv",
81921 + .file = "include/linux/syscalls.h",
81922 + .param3 = 1,
81923 + .param5 = 1,
81924 +};
81925 +struct size_overflow_hash _000284_hash = {
81926 + .next = NULL,
81927 + .name = "sys_process_vm_writev",
81928 + .file = "include/linux/syscalls.h",
81929 + .param3 = 1,
81930 + .param5 = 1,
81931 +};
81932 +struct size_overflow_hash _000286_hash = {
81933 + .next = NULL,
81934 + .name = "sys_sched_getaffinity",
81935 + .file = "include/linux/syscalls.h",
81936 + .param2 = 1,
81937 +};
81938 +struct size_overflow_hash _000287_hash = {
81939 + .next = NULL,
81940 + .name = "tomoyo_read_self",
81941 + .file = "security/tomoyo/securityfs_if.c",
81942 + .param3 = 1,
81943 +};
81944 +struct size_overflow_hash _000288_hash = {
81945 + .next = NULL,
81946 + .name = "tpm_read",
81947 + .file = "drivers/char/tpm/tpm.c",
81948 + .param3 = 1,
81949 +};
81950 +struct size_overflow_hash _000289_hash = {
81951 + .next = NULL,
81952 + .name = "user_read",
81953 + .file = "include/keys/user-type.h",
81954 + .param3 = 1,
81955 +};
81956 +struct size_overflow_hash _000290_hash = {
81957 + .next = NULL,
81958 + .name = "vcs_read",
81959 + .file = "drivers/tty/vt/vc_screen.c",
81960 + .param3 = 1,
81961 +};
81962 +struct size_overflow_hash _000291_hash = {
81963 + .next = NULL,
81964 + .name = "vfs_readv",
81965 + .file = "include/linux/fs.h",
81966 + .param3 = 1,
81967 +};
81968 +struct size_overflow_hash _000292_hash = {
81969 + .next = NULL,
81970 + .name = "vfs_writev",
81971 + .file = "include/linux/fs.h",
81972 + .param3 = 1,
81973 +};
81974 +struct size_overflow_hash _000293_hash = {
81975 + .next = NULL,
81976 + .name = "vga_arb_read",
81977 + .file = "drivers/gpu/vga/vgaarb.c",
81978 + .param3 = 1,
81979 +};
81980 +struct size_overflow_hash _000294_hash = {
81981 + .next = NULL,
81982 + .name = "xz_dec_lzma2_create",
81983 + .file = "lib/xz/xz_dec_lzma2.c",
81984 + .param2 = 1,
81985 +};
81986 +struct size_overflow_hash _000295_hash = {
81987 + .next = NULL,
81988 + .name = "aat2870_reg_read_file",
81989 + .file = "drivers/mfd/aat2870-core.c",
81990 + .param3 = 1,
81991 +};
81992 +struct size_overflow_hash _000296_hash = {
81993 + .next = NULL,
81994 + .name = "__alloc_bootmem",
81995 + .file = "include/linux/bootmem.h",
81996 + .param1 = 1,
81997 +};
81998 +struct size_overflow_hash _000297_hash = {
81999 + .next = NULL,
82000 + .name = "__alloc_bootmem_low",
82001 + .file = "include/linux/bootmem.h",
82002 + .param1 = 1,
82003 +};
82004 +struct size_overflow_hash _000298_hash = {
82005 + .next = NULL,
82006 + .name = "__alloc_bootmem_node_nopanic",
82007 + .file = "include/linux/bootmem.h",
82008 + .param2 = 1,
82009 +};
82010 +struct size_overflow_hash _000299_hash = {
82011 + .next = NULL,
82012 + .name = "blk_rq_map_kern",
82013 + .file = "include/linux/blkdev.h",
82014 + .param4 = 1,
82015 +};
82016 +struct size_overflow_hash _000300_hash = {
82017 + .next = NULL,
82018 + .name = "cgroup_read_s64",
82019 + .file = "kernel/cgroup.c",
82020 + .param5 = 1,
82021 +};
82022 +struct size_overflow_hash _000301_hash = {
82023 + .next = NULL,
82024 + .name = "cgroup_read_u64",
82025 + .file = "kernel/cgroup.c",
82026 + .param5 = 1,
82027 +};
82028 +struct size_overflow_hash _000302_hash = {
82029 + .next = NULL,
82030 + .name = "cpuset_common_file_read",
82031 + .file = "kernel/cpuset.c",
82032 + .param5 = 1,
82033 +};
82034 +struct size_overflow_hash _000303_hash = {
82035 + .next = NULL,
82036 + .name = "filter_read",
82037 + .file = "lib/dma-debug.c",
82038 + .param3 = 1,
82039 +};
82040 +struct size_overflow_hash _000304_hash = {
82041 + .next = NULL,
82042 + .name = "ima_show_htable_value",
82043 + .file = "security/integrity/ima/ima_fs.c",
82044 + .param2 = 1,
82045 +};
82046 +struct size_overflow_hash _000305_hash = {
82047 + .next = NULL,
82048 + .name = "kernel_readv",
82049 + .file = "fs/splice.c",
82050 + .param3 = 1,
82051 +};
82052 +struct size_overflow_hash _000306_hash = {
82053 + .next = NULL,
82054 + .name = "__kfifo_to_user",
82055 + .file = "include/linux/kfifo.h",
82056 + .param3 = 1,
82057 +};
82058 +struct size_overflow_hash _000307_hash = {
82059 + .next = NULL,
82060 + .name = "__kfifo_to_user_r",
82061 + .file = "include/linux/kfifo.h",
82062 + .param3 = 1,
82063 +};
82064 +struct size_overflow_hash _000308_hash = {
82065 + .next = NULL,
82066 + .name = "mqueue_read_file",
82067 + .file = "ipc/mqueue.c",
82068 + .param3 = 1,
82069 +};
82070 +struct size_overflow_hash _000309_hash = {
82071 + .next = NULL,
82072 + .name = "oom_adjust_read",
82073 + .file = "fs/proc/base.c",
82074 + .param3 = 1,
82075 +};
82076 +struct size_overflow_hash _000310_hash = {
82077 + .next = NULL,
82078 + .name = "oom_score_adj_read",
82079 + .file = "fs/proc/base.c",
82080 + .param3 = 1,
82081 +};
82082 +struct size_overflow_hash _000311_hash = {
82083 + .next = NULL,
82084 + .name = "pm_qos_power_read",
82085 + .file = "kernel/power/qos.c",
82086 + .param3 = 1,
82087 +};
82088 +struct size_overflow_hash _000312_hash = {
82089 + .next = NULL,
82090 + .name = "proc_coredump_filter_read",
82091 + .file = "fs/proc/base.c",
82092 + .param3 = 1,
82093 +};
82094 +struct size_overflow_hash _000313_hash = {
82095 + .next = NULL,
82096 + .name = "proc_fdinfo_read",
82097 + .file = "fs/proc/base.c",
82098 + .param3 = 1,
82099 +};
82100 +struct size_overflow_hash _000314_hash = {
82101 + .next = NULL,
82102 + .name = "proc_info_read",
82103 + .file = "fs/proc/base.c",
82104 + .param3 = 1,
82105 +};
82106 +struct size_overflow_hash _000315_hash = {
82107 + .next = NULL,
82108 + .name = "proc_loginuid_read",
82109 + .file = "fs/proc/base.c",
82110 + .param3 = 1,
82111 +};
82112 +struct size_overflow_hash _000316_hash = {
82113 + .next = NULL,
82114 + .name = "proc_pid_attr_read",
82115 + .file = "fs/proc/base.c",
82116 + .param3 = 1,
82117 +};
82118 +struct size_overflow_hash _000317_hash = {
82119 + .next = NULL,
82120 + .name = "proc_sessionid_read",
82121 + .file = "fs/proc/base.c",
82122 + .param3 = 1,
82123 +};
82124 +struct size_overflow_hash _000318_hash = {
82125 + .next = NULL,
82126 + .name = "pstore_file_read",
82127 + .file = "fs/pstore/inode.c",
82128 + .param3 = 1,
82129 +};
82130 +struct size_overflow_hash _000319_hash = {
82131 + .next = NULL,
82132 + .name = "read_enabled_file_bool",
82133 + .file = "kernel/kprobes.c",
82134 + .param3 = 1,
82135 +};
82136 +struct size_overflow_hash _000320_hash = {
82137 + .next = NULL,
82138 + .name = "read_file_blob",
82139 + .file = "fs/debugfs/file.c",
82140 + .param3 = 1,
82141 +};
82142 +struct size_overflow_hash _000321_hash = {
82143 + .next = NULL,
82144 + .name = "read_file_bool",
82145 + .file = "fs/debugfs/file.c",
82146 + .param3 = 1,
82147 +};
82148 +struct size_overflow_hash _000322_hash = {
82149 + .next = NULL,
82150 + .name = "read_from_oldmem",
82151 + .file = "fs/proc/vmcore.c",
82152 + .param2 = 1,
82153 +};
82154 +struct size_overflow_hash _000323_hash = {
82155 + .next = NULL,
82156 + .name = "read_oldmem",
82157 + .file = "drivers/char/mem.c",
82158 + .param3 = 1,
82159 +};
82160 +struct size_overflow_hash _000324_hash = {
82161 + .next = NULL,
82162 + .name = "res_counter_read",
82163 + .file = "include/linux/res_counter.h",
82164 + .param4 = 1,
82165 +};
82166 +struct size_overflow_hash _000325_hash = {
82167 + .next = NULL,
82168 + .name = "sel_read_avc_cache_threshold",
82169 + .file = "security/selinux/selinuxfs.c",
82170 + .param3 = 1,
82171 +};
82172 +struct size_overflow_hash _000326_hash = {
82173 + .next = NULL,
82174 + .name = "sel_read_avc_hash_stats",
82175 + .file = "security/selinux/selinuxfs.c",
82176 + .param3 = 1,
82177 +};
82178 +struct size_overflow_hash _000327_hash = {
82179 + .next = NULL,
82180 + .name = "sel_read_bool",
82181 + .file = "security/selinux/selinuxfs.c",
82182 + .param3 = 1,
82183 +};
82184 +struct size_overflow_hash _000328_hash = {
82185 + .next = NULL,
82186 + .name = "sel_read_checkreqprot",
82187 + .file = "security/selinux/selinuxfs.c",
82188 + .param3 = 1,
82189 +};
82190 +struct size_overflow_hash _000329_hash = {
82191 + .next = NULL,
82192 + .name = "sel_read_class",
82193 + .file = "security/selinux/selinuxfs.c",
82194 + .param3 = 1,
82195 +};
82196 +struct size_overflow_hash _000330_hash = {
82197 + .next = NULL,
82198 + .name = "sel_read_enforce",
82199 + .file = "security/selinux/selinuxfs.c",
82200 + .param3 = 1,
82201 +};
82202 +struct size_overflow_hash _000331_hash = {
82203 + .next = NULL,
82204 + .name = "sel_read_handle_status",
82205 + .file = "security/selinux/selinuxfs.c",
82206 + .param3 = 1,
82207 +};
82208 +struct size_overflow_hash _000332_hash = {
82209 + .next = NULL,
82210 + .name = "sel_read_handle_unknown",
82211 + .file = "security/selinux/selinuxfs.c",
82212 + .param3 = 1,
82213 +};
82214 +struct size_overflow_hash _000333_hash = {
82215 + .next = NULL,
82216 + .name = "sel_read_initcon",
82217 + .file = "security/selinux/selinuxfs.c",
82218 + .param3 = 1,
82219 +};
82220 +struct size_overflow_hash _000334_hash = {
82221 + .next = NULL,
82222 + .name = "sel_read_mls",
82223 + .file = "security/selinux/selinuxfs.c",
82224 + .param3 = 1,
82225 +};
82226 +struct size_overflow_hash _000335_hash = {
82227 + .next = NULL,
82228 + .name = "sel_read_perm",
82229 + .file = "security/selinux/selinuxfs.c",
82230 + .param3 = 1,
82231 +};
82232 +struct size_overflow_hash _000336_hash = {
82233 + .next = NULL,
82234 + .name = "sel_read_policy",
82235 + .file = "security/selinux/selinuxfs.c",
82236 + .param3 = 1,
82237 +};
82238 +struct size_overflow_hash _000337_hash = {
82239 + .next = NULL,
82240 + .name = "sel_read_policycap",
82241 + .file = "security/selinux/selinuxfs.c",
82242 + .param3 = 1,
82243 +};
82244 +struct size_overflow_hash _000338_hash = {
82245 + .next = NULL,
82246 + .name = "sel_read_policyvers",
82247 + .file = "security/selinux/selinuxfs.c",
82248 + .param3 = 1,
82249 +};
82250 +struct size_overflow_hash _000339_hash = {
82251 + .next = NULL,
82252 + .name = "simple_attr_read",
82253 + .file = "include/linux/fs.h",
82254 + .param3 = 1,
82255 +};
82256 +struct size_overflow_hash _000340_hash = {
82257 + .next = NULL,
82258 + .name = "simple_transaction_read",
82259 + .file = "include/linux/fs.h",
82260 + .param3 = 1,
82261 +};
82262 +struct size_overflow_hash _000341_hash = {
82263 + .next = NULL,
82264 + .name = "smk_read_ambient",
82265 + .file = "security/smack/smackfs.c",
82266 + .param3 = 1,
82267 +};
82268 +struct size_overflow_hash _000342_hash = {
82269 + .next = NULL,
82270 + .name = "smk_read_direct",
82271 + .file = "security/smack/smackfs.c",
82272 + .param3 = 1,
82273 +};
82274 +struct size_overflow_hash _000343_hash = {
82275 + .next = NULL,
82276 + .name = "smk_read_doi",
82277 + .file = "security/smack/smackfs.c",
82278 + .param3 = 1,
82279 +};
82280 +struct size_overflow_hash _000344_hash = {
82281 + .next = NULL,
82282 + .name = "smk_read_logging",
82283 + .file = "security/smack/smackfs.c",
82284 + .param3 = 1,
82285 +};
82286 +struct size_overflow_hash _000345_hash = {
82287 + .next = NULL,
82288 + .name = "smk_read_onlycap",
82289 + .file = "security/smack/smackfs.c",
82290 + .param3 = 1,
82291 +};
82292 +struct size_overflow_hash _000346_hash = {
82293 + .next = NULL,
82294 + .name = "snapshot_read",
82295 + .file = "kernel/power/user.c",
82296 + .param3 = 1,
82297 +};
82298 +struct size_overflow_hash _000347_hash = {
82299 + .next = NULL,
82300 + .name = "supply_map_read_file",
82301 + .file = "drivers/regulator/core.c",
82302 + .param3 = 1,
82303 +};
82304 +struct size_overflow_hash _000348_hash = {
82305 + .next = NULL,
82306 + .name = "sysfs_read_file",
82307 + .file = "fs/sysfs/file.c",
82308 + .param3 = 1,
82309 +};
82310 +struct size_overflow_hash _000349_hash = {
82311 + .next = NULL,
82312 + .name = "sys_preadv",
82313 + .file = "include/linux/syscalls.h",
82314 + .param3 = 1,
82315 +};
82316 +struct size_overflow_hash _000350_hash = {
82317 + .next = NULL,
82318 + .name = "sys_pwritev",
82319 + .file = "include/linux/syscalls.h",
82320 + .param3 = 1,
82321 +};
82322 +struct size_overflow_hash _000351_hash = {
82323 + .next = NULL,
82324 + .name = "sys_readv",
82325 + .file = "include/linux/syscalls.h",
82326 + .param3 = 1,
82327 +};
82328 +struct size_overflow_hash _000352_hash = {
82329 + .next = NULL,
82330 + .name = "sys_rt_sigpending",
82331 + .file = "include/linux/syscalls.h",
82332 + .param2 = 1,
82333 +};
82334 +struct size_overflow_hash _000353_hash = {
82335 + .next = NULL,
82336 + .name = "sys_writev",
82337 + .file = "include/linux/syscalls.h",
82338 + .param3 = 1,
82339 +};
82340 +struct size_overflow_hash _000354_hash = {
82341 + .next = NULL,
82342 + .name = "ima_show_htable_violations",
82343 + .file = "security/integrity/ima/ima_fs.c",
82344 + .param3 = 1,
82345 +};
82346 +struct size_overflow_hash _000355_hash = {
82347 + .next = NULL,
82348 + .name = "ima_show_measurements_count",
82349 + .file = "security/integrity/ima/ima_fs.c",
82350 + .param3 = 1,
82351 +};
82352 +struct size_overflow_hash _000356_hash = {
82353 + .next = NULL,
82354 + .name = "alloc_cpu_rmap",
82355 + .file = "include/linux/cpu_rmap.h",
82356 + .param1 = 1,
82357 +};
82358 +struct size_overflow_hash _000357_hash = {
82359 + .next = NULL,
82360 + .name = "alloc_page_cgroup",
82361 + .file = "mm/page_cgroup.c",
82362 + .param1 = 1,
82363 +};
82364 +struct size_overflow_hash _000358_hash = {
82365 + .next = NULL,
82366 + .name = "alloc_sched_domains",
82367 + .file = "include/linux/sched.h",
82368 + .param1 = 1,
82369 +};
82370 +struct size_overflow_hash _000359_hash = {
82371 + .next = NULL,
82372 + .name = "compat_rw_copy_check_uvector",
82373 + .file = "include/linux/compat.h",
82374 + .param3 = 1,
82375 +};
82376 +struct size_overflow_hash _000360_hash = {
82377 + .next = NULL,
82378 + .name = "compat_sys_kexec_load",
82379 + .file = "include/linux/kexec.h",
82380 + .param2 = 1,
82381 +};
82382 +struct size_overflow_hash _000361_hash = {
82383 + .next = NULL,
82384 + .name = "compat_sys_semtimedop",
82385 + .file = "include/linux/compat.h",
82386 + .param3 = 1,
82387 +};
82388 +struct size_overflow_hash _000362_hash = {
82389 + .next = NULL,
82390 + .name = "copy_from_user",
82391 + .file = "arch/x86/include/asm/uaccess_64.h",
82392 + .param3 = 1,
82393 +};
82394 +struct size_overflow_hash _000363_hash = {
82395 + .next = NULL,
82396 + .name = "__copy_from_user",
82397 + .file = "arch/x86/include/asm/uaccess_64.h",
82398 + .param3 = 1,
82399 +};
82400 +struct size_overflow_hash _000364_hash = {
82401 + .next = NULL,
82402 + .name = "__copy_from_user_inatomic",
82403 + .file = "arch/x86/include/asm/uaccess_64.h",
82404 + .param3 = 1,
82405 +};
82406 +struct size_overflow_hash _000365_hash = {
82407 + .next = NULL,
82408 + .name = "__copy_from_user_nocache",
82409 + .file = "arch/x86/include/asm/uaccess_64.h",
82410 + .param3 = 1,
82411 +};
82412 +struct size_overflow_hash _000366_hash = {
82413 + .next = NULL,
82414 + .name = "__copy_in_user",
82415 + .file = "arch/x86/include/asm/uaccess_64.h",
82416 + .param3 = 1,
82417 +};
82418 +struct size_overflow_hash _000367_hash = {
82419 + .next = NULL,
82420 + .name = "copy_in_user",
82421 + .file = "arch/x86/include/asm/uaccess_64.h",
82422 + .param3 = 1,
82423 +};
82424 +struct size_overflow_hash _000368_hash = {
82425 + .next = NULL,
82426 + .name = "__copy_to_user",
82427 + .file = "arch/x86/include/asm/uaccess_64.h",
82428 + .param3 = 1,
82429 +};
82430 +struct size_overflow_hash _000369_hash = {
82431 + .next = NULL,
82432 + .name = "copy_to_user",
82433 + .file = "arch/x86/include/asm/uaccess_64.h",
82434 + .param3 = 1,
82435 +};
82436 +struct size_overflow_hash _000370_hash = {
82437 + .next = NULL,
82438 + .name = "__copy_to_user_inatomic",
82439 + .file = "arch/x86/include/asm/uaccess_64.h",
82440 + .param3 = 1,
82441 +};
82442 +struct size_overflow_hash _000371_hash = {
82443 + .next = NULL,
82444 + .name = "kmalloc_node",
82445 + .file = "include/linux/slub_def.h",
82446 + .param1 = 1,
82447 +};
82448 +struct size_overflow_hash _000372_hash = {
82449 + .next = NULL,
82450 + .name = "pcpu_alloc_bootmem",
82451 + .file = "arch/x86/kernel/setup_percpu.c",
82452 + .param2 = 1,
82453 +};
82454 +struct size_overflow_hash _000373_hash = {
82455 + .next = NULL,
82456 + .name = "sys32_rt_sigpending",
82457 + .file = "arch/x86/include/asm/sys_ia32.h",
82458 + .param2 = 1,
82459 +};
82460 +struct size_overflow_hash _000374_hash = {
82461 + .next = NULL,
82462 + .name = "tunables_read",
82463 + .file = "arch/x86/platform/uv/tlb_uv.c",
82464 + .param3 = 1,
82465 +};
82466 +struct size_overflow_hash _000375_hash = {
82467 + .next = NULL,
82468 + .name = "compat_do_readv_writev",
82469 + .file = "fs/compat.c",
82470 + .param4 = 1,
82471 +};
82472 +struct size_overflow_hash _000376_hash = {
82473 + .next = NULL,
82474 + .name = "compat_keyctl_instantiate_key_iov",
82475 + .file = "security/keys/compat.c",
82476 + .param3 = 1,
82477 +};
82478 +struct size_overflow_hash _000377_hash = {
82479 + .next = NULL,
82480 + .name = "compat_process_vm_rw",
82481 + .file = "mm/process_vm_access.c",
82482 + .param3 = 1,
82483 + .param5 = 1,
82484 +};
82485 +struct size_overflow_hash _000379_hash = {
82486 + .next = NULL,
82487 + .name = "do_pages_stat",
82488 + .file = "mm/migrate.c",
82489 + .param2 = 1,
82490 +};
82491 +struct size_overflow_hash _000380_hash = {
82492 + .next = NULL,
82493 + .name = "kzalloc_node",
82494 + .file = "include/linux/slab.h",
82495 + .param1 = 1,
82496 +};
82497 +struct size_overflow_hash _000381_hash = {
82498 + .next = NULL,
82499 + .name = "pcpu_fc_alloc",
82500 + .file = "arch/x86/kernel/setup_percpu.c",
82501 + .param2 = 1,
82502 +};
82503 +struct size_overflow_hash _000382_hash = {
82504 + .next = NULL,
82505 + .name = "ptc_proc_write",
82506 + .file = "arch/x86/platform/uv/tlb_uv.c",
82507 + .param3 = 1,
82508 +};
82509 +struct size_overflow_hash _000383_hash = {
82510 + .next = NULL,
82511 + .name = "tunables_write",
82512 + .file = "arch/x86/platform/uv/tlb_uv.c",
82513 + .param3 = 1,
82514 +};
82515 +struct size_overflow_hash _000384_hash = {
82516 + .next = NULL,
82517 + .name = "__alloc_bootmem_low_node",
82518 + .file = "include/linux/bootmem.h",
82519 + .param2 = 1,
82520 +};
82521 +struct size_overflow_hash _000385_hash = {
82522 + .next = NULL,
82523 + .name = "__alloc_bootmem_node",
82524 + .file = "include/linux/bootmem.h",
82525 + .param2 = 1,
82526 +};
82527 +struct size_overflow_hash _000386_hash = {
82528 + .next = NULL,
82529 + .name = "compat_readv",
82530 + .file = "fs/compat.c",
82531 + .param3 = 1,
82532 +};
82533 +struct size_overflow_hash _000387_hash = {
82534 + .next = NULL,
82535 + .name = "compat_sys_keyctl",
82536 + .file = "include/linux/compat.h",
82537 + .param4 = 1,
82538 +};
82539 +struct size_overflow_hash _000388_hash = {
82540 + .next = NULL,
82541 + .name = "compat_sys_process_vm_readv",
82542 + .file = "include/linux/compat.h",
82543 + .param3 = 1,
82544 + .param5 = 1,
82545 +};
82546 +struct size_overflow_hash _000390_hash = {
82547 + .next = NULL,
82548 + .name = "compat_sys_process_vm_writev",
82549 + .file = "include/linux/compat.h",
82550 + .param3 = 1,
82551 + .param5 = 1,
82552 +};
82553 +struct size_overflow_hash _000392_hash = {
82554 + .next = NULL,
82555 + .name = "compat_writev",
82556 + .file = "fs/compat.c",
82557 + .param3 = 1,
82558 +};
82559 +struct size_overflow_hash _000393_hash = {
82560 + .next = NULL,
82561 + .name = "sys_move_pages",
82562 + .file = "include/linux/syscalls.h",
82563 + .param2 = 1,
82564 +};
82565 +struct size_overflow_hash _000394_hash = {
82566 + .next = NULL,
82567 + .name = "__alloc_bootmem_node_high",
82568 + .file = "include/linux/bootmem.h",
82569 + .param2 = 1,
82570 +};
82571 +struct size_overflow_hash _000395_hash = {
82572 + .next = NULL,
82573 + .name = "compat_sys_move_pages",
82574 + .file = "include/linux/compat.h",
82575 + .param2 = 1,
82576 +};
82577 +struct size_overflow_hash _000396_hash = {
82578 + .next = NULL,
82579 + .name = "compat_sys_preadv",
82580 + .file = "include/linux/compat.h",
82581 + .param3 = 1,
82582 +};
82583 +struct size_overflow_hash _000397_hash = {
82584 + .next = NULL,
82585 + .name = "compat_sys_pwritev",
82586 + .file = "include/linux/compat.h",
82587 + .param3 = 1,
82588 +};
82589 +struct size_overflow_hash _000398_hash = {
82590 + .next = NULL,
82591 + .name = "compat_sys_readv",
82592 + .file = "include/linux/compat.h",
82593 + .param3 = 1,
82594 +};
82595 +struct size_overflow_hash _000399_hash = {
82596 + .next = NULL,
82597 + .name = "compat_sys_writev",
82598 + .file = "include/linux/compat.h",
82599 + .param3 = 1,
82600 +};
82601 +struct size_overflow_hash _000400_hash = {
82602 + .next = NULL,
82603 + .name = "sparse_early_usemaps_alloc_node",
82604 + .file = "mm/sparse.c",
82605 + .param4 = 1,
82606 +};
82607 +struct size_overflow_hash _000401_hash = {
82608 + .next = NULL,
82609 + .name = "__earlyonly_bootmem_alloc",
82610 + .file = "mm/sparse-vmemmap.c",
82611 + .param2 = 1,
82612 +};
82613 +struct size_overflow_hash _000402_hash = {
82614 + .next = NULL,
82615 + .name = "sparse_mem_maps_populate_node",
82616 + .file = "include/linux/mm.h",
82617 + .param4 = 1,
82618 +};
82619 +struct size_overflow_hash _000403_hash = {
82620 + .next = NULL,
82621 + .name = "vmemmap_alloc_block",
82622 + .file = "include/linux/mm.h",
82623 + .param1 = 1,
82624 +};
82625 +struct size_overflow_hash _000404_hash = {
82626 + .next = NULL,
82627 + .name = "sparse_early_mem_maps_alloc_node",
82628 + .file = "mm/sparse.c",
82629 + .param4 = 1,
82630 +};
82631 +struct size_overflow_hash _000405_hash = {
82632 + .next = NULL,
82633 + .name = "vmemmap_alloc_block_buf",
82634 + .file = "include/linux/mm.h",
82635 + .param1 = 1,
82636 +};
82637 +struct size_overflow_hash _000406_hash = {
82638 + .next = NULL,
82639 + .name = "acpi_battery_write_alarm",
82640 + .file = "drivers/acpi/battery.c",
82641 + .param3 = 1,
82642 +};
82643 +struct size_overflow_hash _000407_hash = {
82644 + .next = NULL,
82645 + .name = "acpi_battery_write_alarm",
82646 + .file = "drivers/acpi/sbs.c",
82647 + .param3 = 1,
82648 +};
82649 +struct size_overflow_hash _000408_hash = {
82650 + .next = NULL,
82651 + .name = "ad7879_spi_xfer",
82652 + .file = "drivers/input/touchscreen/ad7879-spi.c",
82653 + .param3 = 1,
82654 +};
82655 +struct size_overflow_hash _000409_hash = {
82656 + .next = NULL,
82657 + .name = "add_port",
82658 + .file = "drivers/char/virtio_console.c",
82659 + .param2 = 1,
82660 +};
82661 +struct size_overflow_hash _000410_hash = {
82662 + .next = NULL,
82663 + .name = "addtgt",
82664 + .file = "drivers/block/aoe/aoecmd.c",
82665 + .param3 = 1,
82666 +};
82667 +struct size_overflow_hash _000411_hash = {
82668 + .next = NULL,
82669 + .name = "adu_read",
82670 + .file = "drivers/usb/misc/adutux.c",
82671 + .param3 = 1,
82672 +};
82673 +struct size_overflow_hash _000412_hash = {
82674 + .next = NULL,
82675 + .name = "adu_write",
82676 + .file = "drivers/usb/misc/adutux.c",
82677 + .param3 = 1,
82678 +};
82679 +struct size_overflow_hash _000413_hash = {
82680 + .next = NULL,
82681 + .name = "aer_inject_write",
82682 + .file = "drivers/pci/pcie/aer/aer_inject.c",
82683 + .param3 = 1,
82684 +};
82685 +struct size_overflow_hash _000414_hash = {
82686 + .next = NULL,
82687 + .name = "aes_decrypt_fail_read",
82688 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82689 + .param3 = 1,
82690 +};
82691 +struct size_overflow_hash _000415_hash = {
82692 + .next = NULL,
82693 + .name = "aes_decrypt_interrupt_read",
82694 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82695 + .param3 = 1,
82696 +};
82697 +struct size_overflow_hash _000416_hash = {
82698 + .next = NULL,
82699 + .name = "aes_decrypt_packets_read",
82700 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82701 + .param3 = 1,
82702 +};
82703 +struct size_overflow_hash _000417_hash = {
82704 + .next = NULL,
82705 + .name = "aes_encrypt_fail_read",
82706 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82707 + .param3 = 1,
82708 +};
82709 +struct size_overflow_hash _000418_hash = {
82710 + .next = NULL,
82711 + .name = "aes_encrypt_interrupt_read",
82712 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82713 + .param3 = 1,
82714 +};
82715 +struct size_overflow_hash _000419_hash = {
82716 + .next = NULL,
82717 + .name = "aes_encrypt_packets_read",
82718 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82719 + .param3 = 1,
82720 +};
82721 +struct size_overflow_hash _000420_hash = {
82722 + .next = NULL,
82723 + .name = "afs_alloc_flat_call",
82724 + .file = "fs/afs/rxrpc.c",
82725 + .param2 = 1,
82726 + .param3 = 1,
82727 +};
82728 +struct size_overflow_hash _000422_hash = {
82729 + .next = NULL,
82730 + .name = "afs_cell_alloc",
82731 + .file = "fs/afs/cell.c",
82732 + .param2 = 1,
82733 +};
82734 +struct size_overflow_hash _000423_hash = {
82735 + .next = NULL,
82736 + .name = "afs_proc_cells_write",
82737 + .file = "fs/afs/proc.c",
82738 + .param3 = 1,
82739 +};
82740 +struct size_overflow_hash _000424_hash = {
82741 + .next = NULL,
82742 + .name = "afs_proc_rootcell_write",
82743 + .file = "fs/afs/proc.c",
82744 + .param3 = 1,
82745 +};
82746 +struct size_overflow_hash _000425_hash = {
82747 + .next = NULL,
82748 + .name = "aggr_recv_addba_req_evt",
82749 + .file = "drivers/net/wireless/ath/ath6kl/txrx.c",
82750 + .param4 = 1,
82751 +};
82752 +struct size_overflow_hash _000426_hash = {
82753 + .next = NULL,
82754 + .name = "agp_3_5_isochronous_node_enable",
82755 + .file = "drivers/char/agp/isoch.c",
82756 + .param3 = 1,
82757 +};
82758 +struct size_overflow_hash _000427_hash = {
82759 + .next = NULL,
82760 + .name = "agp_alloc_page_array",
82761 + .file = "drivers/char/agp/generic.c",
82762 + .param1 = 1,
82763 +};
82764 +struct size_overflow_hash _000428_hash = {
82765 + .next = NULL,
82766 + .name = "alg_setkey",
82767 + .file = "crypto/af_alg.c",
82768 + .param3 = 1,
82769 +};
82770 +struct size_overflow_hash _000429_hash = {
82771 + .next = NULL,
82772 + .name = "alloc_buf",
82773 + .file = "drivers/char/virtio_console.c",
82774 + .param1 = 1,
82775 +};
82776 +struct size_overflow_hash _000430_hash = {
82777 + .next = NULL,
82778 + .name = "alloc_context",
82779 + .file = "drivers/md/dm-raid1.c",
82780 + .param1 = 1,
82781 +};
82782 +struct size_overflow_hash _000431_hash = {
82783 + .next = NULL,
82784 + .name = "alloc_context",
82785 + .file = "drivers/md/dm-stripe.c",
82786 + .param1 = 1,
82787 +};
82788 +struct size_overflow_hash _000432_hash = {
82789 + .next = NULL,
82790 + .name = "__alloc_dev_table",
82791 + .file = "fs/exofs/super.c",
82792 + .param2 = 1,
82793 +};
82794 +struct size_overflow_hash _000433_hash = {
82795 + .next = NULL,
82796 + .name = "alloc_ep_req",
82797 + .file = "drivers/usb/gadget/f_midi.c",
82798 + .param2 = 1,
82799 +};
82800 +struct size_overflow_hash _000434_hash = {
82801 + .next = NULL,
82802 + .name = "alloc_flex_gd",
82803 + .file = "fs/ext4/resize.c",
82804 + .param1 = 1,
82805 +};
82806 +struct size_overflow_hash _000435_hash = {
82807 + .next = NULL,
82808 + .name = "__alloc_objio_seg",
82809 + .file = "fs/nfs/objlayout/objio_osd.c",
82810 + .param1 = 1,
82811 +};
82812 +struct size_overflow_hash _000436_hash = {
82813 + .next = NULL,
82814 + .name = "alloc_one_pg_vec_page",
82815 + .file = "net/packet/af_packet.c",
82816 + .param1 = 1,
82817 +};
82818 +struct size_overflow_hash _000437_hash = {
82819 + .next = NULL,
82820 + .name = "alloc_ring",
82821 + .file = "drivers/net/ethernet/chelsio/cxgb3/sge.c",
82822 + .param2 = 1,
82823 + .param4 = 1,
82824 +};
82825 +struct size_overflow_hash _000438_hash = {
82826 + .next = NULL,
82827 + .name = "alloc_ring",
82828 + .file = "drivers/net/ethernet/chelsio/cxgb4vf/sge.c",
82829 + .param2 = 1,
82830 + .param4 = 1,
82831 +};
82832 +struct size_overflow_hash _000441_hash = {
82833 + .next = NULL,
82834 + .name = "alloc_ts_config",
82835 + .file = "include/linux/textsearch.h",
82836 + .param1 = 1,
82837 +};
82838 +struct size_overflow_hash _000442_hash = {
82839 + .next = NULL,
82840 + .name = "altera_drscan",
82841 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82842 + .param2 = 1,
82843 +};
82844 +struct size_overflow_hash _000443_hash = {
82845 + .next = NULL,
82846 + .name = "altera_irscan",
82847 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82848 + .param2 = 1,
82849 +};
82850 +struct size_overflow_hash _000444_hash = {
82851 + .next = &_000066_hash,
82852 + .name = "altera_set_dr_post",
82853 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82854 + .param2 = 1,
82855 +};
82856 +struct size_overflow_hash _000445_hash = {
82857 + .next = NULL,
82858 + .name = "altera_set_dr_pre",
82859 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82860 + .param2 = 1,
82861 +};
82862 +struct size_overflow_hash _000446_hash = {
82863 + .next = NULL,
82864 + .name = "altera_set_ir_post",
82865 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82866 + .param2 = 1,
82867 +};
82868 +struct size_overflow_hash _000447_hash = {
82869 + .next = NULL,
82870 + .name = "altera_set_ir_pre",
82871 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82872 + .param2 = 1,
82873 +};
82874 +struct size_overflow_hash _000448_hash = {
82875 + .next = NULL,
82876 + .name = "altera_swap_dr",
82877 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82878 + .param2 = 1,
82879 +};
82880 +struct size_overflow_hash _000449_hash = {
82881 + .next = NULL,
82882 + .name = "altera_swap_ir",
82883 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82884 + .param2 = 1,
82885 +};
82886 +struct size_overflow_hash _000450_hash = {
82887 + .next = NULL,
82888 + .name = "aoedev_flush",
82889 + .file = "drivers/block/aoe/aoedev.c",
82890 + .param2 = 1,
82891 +};
82892 +struct size_overflow_hash _000451_hash = {
82893 + .next = NULL,
82894 + .name = "asd_store_update_bios",
82895 + .file = "drivers/scsi/aic94xx/aic94xx_init.c",
82896 + .param4 = 1,
82897 +};
82898 +struct size_overflow_hash _000452_hash = {
82899 + .next = NULL,
82900 + .name = "asix_read_cmd",
82901 + .file = "drivers/net/usb/asix.c",
82902 + .param5 = 1,
82903 +};
82904 +struct size_overflow_hash _000453_hash = {
82905 + .next = NULL,
82906 + .name = "asix_write_cmd",
82907 + .file = "drivers/net/usb/asix.c",
82908 + .param5 = 1,
82909 +};
82910 +struct size_overflow_hash _000454_hash = {
82911 + .next = NULL,
82912 + .name = "asn1_octets_decode",
82913 + .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
82914 + .param2 = 1,
82915 +};
82916 +struct size_overflow_hash _000455_hash = {
82917 + .next = NULL,
82918 + .name = "asn1_oid_decode",
82919 + .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
82920 + .param2 = 1,
82921 +};
82922 +struct size_overflow_hash _000456_hash = {
82923 + .next = NULL,
82924 + .name = "asn1_oid_decode",
82925 + .file = "fs/cifs/asn1.c",
82926 + .param2 = 1,
82927 +};
82928 +struct size_overflow_hash _000457_hash = {
82929 + .next = NULL,
82930 + .name = "ath6kl_add_bss_if_needed",
82931 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
82932 + .param6 = 1,
82933 +};
82934 +struct size_overflow_hash _000458_hash = {
82935 + .next = NULL,
82936 + .name = "ath6kl_debug_roam_tbl_event",
82937 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82938 + .param3 = 1,
82939 +};
82940 +struct size_overflow_hash _000459_hash = {
82941 + .next = NULL,
82942 + .name = "ath6kl_disconnect_timeout_read",
82943 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82944 + .param3 = 1,
82945 +};
82946 +struct size_overflow_hash _000460_hash = {
82947 + .next = NULL,
82948 + .name = "ath6kl_endpoint_stats_read",
82949 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82950 + .param3 = 1,
82951 +};
82952 +struct size_overflow_hash _000461_hash = {
82953 + .next = NULL,
82954 + .name = "ath6kl_fwlog_mask_read",
82955 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82956 + .param3 = 1,
82957 +};
82958 +struct size_overflow_hash _000462_hash = {
82959 + .next = NULL,
82960 + .name = "ath6kl_fwlog_read",
82961 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82962 + .param3 = 1,
82963 +};
82964 +struct size_overflow_hash _000463_hash = {
82965 + .next = NULL,
82966 + .name = "ath6kl_keepalive_read",
82967 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82968 + .param3 = 1,
82969 +};
82970 +struct size_overflow_hash _000464_hash = {
82971 + .next = NULL,
82972 + .name = "ath6kl_lrssi_roam_read",
82973 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82974 + .param3 = 1,
82975 +};
82976 +struct size_overflow_hash _000465_hash = {
82977 + .next = NULL,
82978 + .name = "ath6kl_regdump_read",
82979 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82980 + .param3 = 1,
82981 +};
82982 +struct size_overflow_hash _000466_hash = {
82983 + .next = NULL,
82984 + .name = "ath6kl_regread_read",
82985 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82986 + .param3 = 1,
82987 +};
82988 +struct size_overflow_hash _000467_hash = {
82989 + .next = NULL,
82990 + .name = "ath6kl_regwrite_read",
82991 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82992 + .param3 = 1,
82993 +};
82994 +struct size_overflow_hash _000468_hash = {
82995 + .next = NULL,
82996 + .name = "ath6kl_roam_table_read",
82997 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82998 + .param3 = 1,
82999 +};
83000 +struct size_overflow_hash _000469_hash = {
83001 + .next = NULL,
83002 + .name = "ath6kl_send_go_probe_resp",
83003 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83004 + .param3 = 1,
83005 +};
83006 +struct size_overflow_hash _000470_hash = {
83007 + .next = NULL,
83008 + .name = "ath6kl_set_ap_probe_resp_ies",
83009 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83010 + .param3 = 1,
83011 +};
83012 +struct size_overflow_hash _000471_hash = {
83013 + .next = NULL,
83014 + .name = "ath6kl_set_assoc_req_ies",
83015 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83016 + .param3 = 1,
83017 +};
83018 +struct size_overflow_hash _000472_hash = {
83019 + .next = NULL,
83020 + .name = "ath6kl_tm_rx_report_event",
83021 + .file = "drivers/net/wireless/ath/ath6kl/testmode.c",
83022 + .param3 = 1,
83023 +};
83024 +struct size_overflow_hash _000473_hash = {
83025 + .next = NULL,
83026 + .name = "ath6kl_wmi_send_action_cmd",
83027 + .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
83028 + .param7 = 1,
83029 +};
83030 +struct size_overflow_hash _000474_hash = {
83031 + .next = NULL,
83032 + .name = "ath6kl_wmi_send_mgmt_cmd",
83033 + .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
83034 + .param7 = 1,
83035 +};
83036 +struct size_overflow_hash _000475_hash = {
83037 + .next = NULL,
83038 + .name = "ath9k_debugfs_read_buf",
83039 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
83040 + .param3 = 1,
83041 +};
83042 +struct size_overflow_hash _000476_hash = {
83043 + .next = NULL,
83044 + .name = "atk_debugfs_ggrp_read",
83045 + .file = "drivers/hwmon/asus_atk0110.c",
83046 + .param3 = 1,
83047 +};
83048 +struct size_overflow_hash _000477_hash = {
83049 + .next = NULL,
83050 + .name = "atm_get_addr",
83051 + .file = "net/atm/addr.c",
83052 + .param3 = 1,
83053 +};
83054 +struct size_overflow_hash _000478_hash = {
83055 + .next = NULL,
83056 + .name = "attach_hdlc_protocol",
83057 + .file = "include/linux/hdlc.h",
83058 + .param3 = 1,
83059 +};
83060 +struct size_overflow_hash _000479_hash = {
83061 + .next = NULL,
83062 + .name = "av7110_vbi_write",
83063 + .file = "drivers/media/dvb/ttpci/av7110_v4l.c",
83064 + .param3 = 1,
83065 +};
83066 +struct size_overflow_hash _000480_hash = {
83067 + .next = NULL,
83068 + .name = "ax25_setsockopt",
83069 + .file = "net/ax25/af_ax25.c",
83070 + .param5 = 1,
83071 +};
83072 +struct size_overflow_hash _000481_hash = {
83073 + .next = NULL,
83074 + .name = "b43_debugfs_read",
83075 + .file = "drivers/net/wireless/b43/debugfs.c",
83076 + .param3 = 1,
83077 +};
83078 +struct size_overflow_hash _000482_hash = {
83079 + .next = NULL,
83080 + .name = "b43_debugfs_write",
83081 + .file = "drivers/net/wireless/b43/debugfs.c",
83082 + .param3 = 1,
83083 +};
83084 +struct size_overflow_hash _000483_hash = {
83085 + .next = NULL,
83086 + .name = "b43legacy_debugfs_read",
83087 + .file = "drivers/net/wireless/b43legacy/debugfs.c",
83088 + .param3 = 1,
83089 +};
83090 +struct size_overflow_hash _000484_hash = {
83091 + .next = NULL,
83092 + .name = "b43legacy_debugfs_write",
83093 + .file = "drivers/net/wireless/b43legacy/debugfs.c",
83094 + .param3 = 1,
83095 +};
83096 +struct size_overflow_hash _000485_hash = {
83097 + .next = NULL,
83098 + .name = "b43_nphy_load_samples",
83099 + .file = "drivers/net/wireless/b43/phy_n.c",
83100 + .param3 = 1,
83101 +};
83102 +struct size_overflow_hash _000486_hash = {
83103 + .next = NULL,
83104 + .name = "bch_alloc",
83105 + .file = "lib/bch.c",
83106 + .param1 = 1,
83107 +};
83108 +struct size_overflow_hash _000487_hash = {
83109 + .next = NULL,
83110 + .name = "bfad_debugfs_read",
83111 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
83112 + .param3 = 1,
83113 +};
83114 +struct size_overflow_hash _000488_hash = {
83115 + .next = NULL,
83116 + .name = "bfad_debugfs_read_regrd",
83117 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
83118 + .param3 = 1,
83119 +};
83120 +struct size_overflow_hash _000489_hash = {
83121 + .next = NULL,
83122 + .name = "bfad_debugfs_write_regrd",
83123 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
83124 + .param3 = 1,
83125 +};
83126 +struct size_overflow_hash _000490_hash = {
83127 + .next = NULL,
83128 + .name = "bfad_debugfs_write_regwr",
83129 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
83130 + .param3 = 1,
83131 +};
83132 +struct size_overflow_hash _000491_hash = {
83133 + .next = NULL,
83134 + .name = "bits_to_user",
83135 + .file = "drivers/input/evdev.c",
83136 + .param3 = 1,
83137 +};
83138 +struct size_overflow_hash _000492_hash = {
83139 + .next = NULL,
83140 + .name = "bl_pipe_downcall",
83141 + .file = "fs/nfs/blocklayout/blocklayoutdev.c",
83142 + .param3 = 1,
83143 +};
83144 +struct size_overflow_hash _000493_hash = {
83145 + .next = NULL,
83146 + .name = "bm_entry_read",
83147 + .file = "fs/binfmt_misc.c",
83148 + .param3 = 1,
83149 +};
83150 +struct size_overflow_hash _000494_hash = {
83151 + .next = NULL,
83152 + .name = "bm_realloc_pages",
83153 + .file = "drivers/block/drbd/drbd_bitmap.c",
83154 + .param2 = 1,
83155 +};
83156 +struct size_overflow_hash _000495_hash = {
83157 + .next = NULL,
83158 + .name = "bm_status_read",
83159 + .file = "fs/binfmt_misc.c",
83160 + .param3 = 1,
83161 +};
83162 +struct size_overflow_hash _000496_hash = {
83163 + .next = NULL,
83164 + .name = "bnad_debugfs_read",
83165 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
83166 + .param3 = 1,
83167 +};
83168 +struct size_overflow_hash _000497_hash = {
83169 + .next = NULL,
83170 + .name = "bnad_debugfs_read_regrd",
83171 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
83172 + .param3 = 1,
83173 +};
83174 +struct size_overflow_hash _000498_hash = {
83175 + .next = NULL,
83176 + .name = "bnad_debugfs_write_regrd",
83177 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
83178 + .param3 = 1,
83179 +};
83180 +struct size_overflow_hash _000499_hash = {
83181 + .next = NULL,
83182 + .name = "bnad_debugfs_write_regwr",
83183 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
83184 + .param3 = 1,
83185 +};
83186 +struct size_overflow_hash _000500_hash = {
83187 + .next = NULL,
83188 + .name = "bnx2fc_cmd_mgr_alloc",
83189 + .file = "drivers/scsi/bnx2fc/bnx2fc_io.c",
83190 + .param2 = 1,
83191 + .param3 = 1,
83192 +};
83193 +struct size_overflow_hash _000502_hash = {
83194 + .next = NULL,
83195 + .name = "btmrvl_curpsmode_read",
83196 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83197 + .param3 = 1,
83198 +};
83199 +struct size_overflow_hash _000503_hash = {
83200 + .next = NULL,
83201 + .name = "btmrvl_gpiogap_read",
83202 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83203 + .param3 = 1,
83204 +};
83205 +struct size_overflow_hash _000504_hash = {
83206 + .next = NULL,
83207 + .name = "btmrvl_gpiogap_write",
83208 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83209 + .param3 = 1,
83210 +};
83211 +struct size_overflow_hash _000505_hash = {
83212 + .next = NULL,
83213 + .name = "btmrvl_hscfgcmd_read",
83214 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83215 + .param3 = 1,
83216 +};
83217 +struct size_overflow_hash _000506_hash = {
83218 + .next = NULL,
83219 + .name = "btmrvl_hscfgcmd_write",
83220 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83221 + .param3 = 1,
83222 +};
83223 +struct size_overflow_hash _000507_hash = {
83224 + .next = &_000006_hash,
83225 + .name = "btmrvl_hscmd_read",
83226 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83227 + .param3 = 1,
83228 +};
83229 +struct size_overflow_hash _000508_hash = {
83230 + .next = NULL,
83231 + .name = "btmrvl_hscmd_write",
83232 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83233 + .param3 = 1,
83234 +};
83235 +struct size_overflow_hash _000509_hash = {
83236 + .next = NULL,
83237 + .name = "btmrvl_hsmode_read",
83238 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83239 + .param3 = 1,
83240 +};
83241 +struct size_overflow_hash _000510_hash = {
83242 + .next = NULL,
83243 + .name = "btmrvl_hsmode_write",
83244 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83245 + .param3 = 1,
83246 +};
83247 +struct size_overflow_hash _000511_hash = {
83248 + .next = NULL,
83249 + .name = "btmrvl_hsstate_read",
83250 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83251 + .param3 = 1,
83252 +};
83253 +struct size_overflow_hash _000512_hash = {
83254 + .next = NULL,
83255 + .name = "btmrvl_pscmd_read",
83256 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83257 + .param3 = 1,
83258 +};
83259 +struct size_overflow_hash _000513_hash = {
83260 + .next = NULL,
83261 + .name = "btmrvl_pscmd_write",
83262 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83263 + .param3 = 1,
83264 +};
83265 +struct size_overflow_hash _000514_hash = {
83266 + .next = NULL,
83267 + .name = "btmrvl_psmode_read",
83268 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83269 + .param3 = 1,
83270 +};
83271 +struct size_overflow_hash _000515_hash = {
83272 + .next = NULL,
83273 + .name = "btmrvl_psmode_write",
83274 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83275 + .param3 = 1,
83276 +};
83277 +struct size_overflow_hash _000516_hash = {
83278 + .next = NULL,
83279 + .name = "btmrvl_psstate_read",
83280 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83281 + .param3 = 1,
83282 +};
83283 +struct size_overflow_hash _000517_hash = {
83284 + .next = NULL,
83285 + .name = "btmrvl_txdnldready_read",
83286 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83287 + .param3 = 1,
83288 +};
83289 +struct size_overflow_hash _000518_hash = {
83290 + .next = NULL,
83291 + .name = "btrfs_alloc_delayed_item",
83292 + .file = "fs/btrfs/delayed-inode.c",
83293 + .param1 = 1,
83294 +};
83295 +struct size_overflow_hash _000519_hash = {
83296 + .next = NULL,
83297 + .name = "btrfs_copy_from_user",
83298 + .file = "fs/btrfs/file.c",
83299 + .param3 = 1,
83300 +};
83301 +struct size_overflow_hash _000520_hash = {
83302 + .next = NULL,
83303 + .name = "__btrfs_map_block",
83304 + .file = "fs/btrfs/volumes.c",
83305 + .param3 = 1,
83306 +};
83307 +struct size_overflow_hash _000521_hash = {
83308 + .next = NULL,
83309 + .name = "__c4iw_init_resource_fifo",
83310 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
83311 + .param3 = 1,
83312 +};
83313 +struct size_overflow_hash _000522_hash = {
83314 + .next = NULL,
83315 + .name = "cache_do_downcall",
83316 + .file = "net/sunrpc/cache.c",
83317 + .param3 = 1,
83318 +};
83319 +struct size_overflow_hash _000523_hash = {
83320 + .next = NULL,
83321 + .name = "cachefiles_daemon_write",
83322 + .file = "fs/cachefiles/daemon.c",
83323 + .param3 = 1,
83324 +};
83325 +struct size_overflow_hash _000524_hash = {
83326 + .next = NULL,
83327 + .name = "cache_read",
83328 + .file = "net/sunrpc/cache.c",
83329 + .param3 = 1,
83330 +};
83331 +struct size_overflow_hash _000525_hash = {
83332 + .next = NULL,
83333 + .name = "ca_extend",
83334 + .file = "drivers/md/persistent-data/dm-space-map-checker.c",
83335 + .param2 = 1,
83336 +};
83337 +struct size_overflow_hash _000526_hash = {
83338 + .next = NULL,
83339 + .name = "calc_hmac",
83340 + .file = "security/keys/encrypted-keys/encrypted.c",
83341 + .param3 = 1,
83342 +};
83343 +struct size_overflow_hash _000527_hash = {
83344 + .next = NULL,
83345 + .name = "capi_write",
83346 + .file = "drivers/isdn/capi/capi.c",
83347 + .param3 = 1,
83348 +};
83349 +struct size_overflow_hash _000528_hash = {
83350 + .next = NULL,
83351 + .name = "carl9170_cmd_buf",
83352 + .file = "drivers/net/wireless/ath/carl9170/cmd.c",
83353 + .param3 = 1,
83354 +};
83355 +struct size_overflow_hash _000529_hash = {
83356 + .next = NULL,
83357 + .name = "carl9170_debugfs_read",
83358 + .file = "drivers/net/wireless/ath/carl9170/debug.c",
83359 + .param3 = 1,
83360 +};
83361 +struct size_overflow_hash _000530_hash = {
83362 + .next = NULL,
83363 + .name = "carl9170_debugfs_write",
83364 + .file = "drivers/net/wireless/ath/carl9170/debug.c",
83365 + .param3 = 1,
83366 +};
83367 +struct size_overflow_hash _000531_hash = {
83368 + .next = NULL,
83369 + .name = "cciss_proc_write",
83370 + .file = "drivers/block/cciss.c",
83371 + .param3 = 1,
83372 +};
83373 +struct size_overflow_hash _000532_hash = {
83374 + .next = NULL,
83375 + .name = "ceph_buffer_new",
83376 + .file = "include/linux/ceph/buffer.h",
83377 + .param1 = 1,
83378 +};
83379 +struct size_overflow_hash _000533_hash = {
83380 + .next = NULL,
83381 + .name = "ceph_copy_page_vector_to_user",
83382 + .file = "include/linux/ceph/libceph.h",
83383 + .param4 = 1,
83384 +};
83385 +struct size_overflow_hash _000534_hash = {
83386 + .next = NULL,
83387 + .name = "ceph_copy_user_to_page_vector",
83388 + .file = "include/linux/ceph/libceph.h",
83389 + .param4 = 1,
83390 +};
83391 +struct size_overflow_hash _000535_hash = {
83392 + .next = NULL,
83393 + .name = "ceph_read_dir",
83394 + .file = "fs/ceph/dir.c",
83395 + .param3 = 1,
83396 +};
83397 +struct size_overflow_hash _000536_hash = {
83398 + .next = NULL,
83399 + .name = "ceph_setxattr",
83400 + .file = "fs/ceph/xattr.c",
83401 + .param4 = 1,
83402 +};
83403 +struct size_overflow_hash _000537_hash = {
83404 + .next = NULL,
83405 + .name = "cfg80211_connect_result",
83406 + .file = "include/net/cfg80211.h",
83407 + .param4 = 1,
83408 + .param6 = 1,
83409 +};
83410 +struct size_overflow_hash _000539_hash = {
83411 + .next = NULL,
83412 + .name = "cfg80211_disconnected",
83413 + .file = "include/net/cfg80211.h",
83414 + .param4 = 1,
83415 +};
83416 +struct size_overflow_hash _000540_hash = {
83417 + .next = NULL,
83418 + .name = "cfg80211_inform_bss",
83419 + .file = "include/net/cfg80211.h",
83420 + .param8 = 1,
83421 +};
83422 +struct size_overflow_hash _000541_hash = {
83423 + .next = NULL,
83424 + .name = "cfg80211_inform_bss_frame",
83425 + .file = "include/net/cfg80211.h",
83426 + .param4 = 1,
83427 +};
83428 +struct size_overflow_hash _000542_hash = {
83429 + .next = NULL,
83430 + .name = "cfg80211_roamed_bss",
83431 + .file = "include/net/cfg80211.h",
83432 + .param4 = 1,
83433 + .param6 = 1,
83434 +};
83435 +struct size_overflow_hash _000544_hash = {
83436 + .next = NULL,
83437 + .name = "cfi_read_pri",
83438 + .file = "include/linux/mtd/cfi.h",
83439 + .param3 = 1,
83440 +};
83441 +struct size_overflow_hash _000545_hash = {
83442 + .next = NULL,
83443 + .name = "channel_type_read",
83444 + .file = "net/mac80211/debugfs.c",
83445 + .param3 = 1,
83446 +};
83447 +struct size_overflow_hash _000546_hash = {
83448 + .next = NULL,
83449 + .name = "cifs_idmap_key_instantiate",
83450 + .file = "fs/cifs/cifsacl.c",
83451 + .param3 = 1,
83452 +};
83453 +struct size_overflow_hash _000547_hash = {
83454 + .next = NULL,
83455 + .name = "cifs_readdata_alloc",
83456 + .file = "fs/cifs/cifssmb.c",
83457 + .param1 = 1,
83458 +};
83459 +struct size_overflow_hash _000548_hash = {
83460 + .next = NULL,
83461 + .name = "cifs_security_flags_proc_write",
83462 + .file = "fs/cifs/cifs_debug.c",
83463 + .param3 = 1,
83464 +};
83465 +struct size_overflow_hash _000549_hash = {
83466 + .next = NULL,
83467 + .name = "cifs_setxattr",
83468 + .file = "fs/cifs/xattr.c",
83469 + .param4 = 1,
83470 +};
83471 +struct size_overflow_hash _000550_hash = {
83472 + .next = NULL,
83473 + .name = "cifs_spnego_key_instantiate",
83474 + .file = "fs/cifs/cifs_spnego.c",
83475 + .param3 = 1,
83476 +};
83477 +struct size_overflow_hash _000551_hash = {
83478 + .next = NULL,
83479 + .name = "cifs_writedata_alloc",
83480 + .file = "fs/cifs/cifssmb.c",
83481 + .param1 = 1,
83482 +};
83483 +struct size_overflow_hash _000552_hash = {
83484 + .next = NULL,
83485 + .name = "ci_ll_write",
83486 + .file = "drivers/media/dvb/ttpci/av7110_ca.c",
83487 + .param4 = 1,
83488 +};
83489 +struct size_overflow_hash _000553_hash = {
83490 + .next = NULL,
83491 + .name = "clusterip_proc_write",
83492 + .file = "net/ipv4/netfilter/ipt_CLUSTERIP.c",
83493 + .param3 = 1,
83494 +};
83495 +struct size_overflow_hash _000554_hash = {
83496 + .next = &_000108_hash,
83497 + .name = "cm4040_write",
83498 + .file = "drivers/char/pcmcia/cm4040_cs.c",
83499 + .param3 = 1,
83500 +};
83501 +struct size_overflow_hash _000555_hash = {
83502 + .next = NULL,
83503 + .name = "cm_copy_private_data",
83504 + .file = "drivers/infiniband/core/cm.c",
83505 + .param2 = 1,
83506 +};
83507 +struct size_overflow_hash _000556_hash = {
83508 + .next = NULL,
83509 + .name = "cmm_write",
83510 + .file = "drivers/char/pcmcia/cm4000_cs.c",
83511 + .param3 = 1,
83512 +};
83513 +struct size_overflow_hash _000557_hash = {
83514 + .next = NULL,
83515 + .name = "cm_write",
83516 + .file = "drivers/acpi/custom_method.c",
83517 + .param3 = 1,
83518 +};
83519 +struct size_overflow_hash _000558_hash = {
83520 + .next = NULL,
83521 + .name = "coda_psdev_read",
83522 + .file = "fs/coda/psdev.c",
83523 + .param3 = 1,
83524 +};
83525 +struct size_overflow_hash _000559_hash = {
83526 + .next = NULL,
83527 + .name = "coda_psdev_write",
83528 + .file = "fs/coda/psdev.c",
83529 + .param3 = 1,
83530 +};
83531 +struct size_overflow_hash _000560_hash = {
83532 + .next = NULL,
83533 + .name = "codec_list_read_file",
83534 + .file = "sound/soc/soc-core.c",
83535 + .param3 = 1,
83536 +};
83537 +struct size_overflow_hash _000561_hash = {
83538 + .next = NULL,
83539 + .name = "codec_reg_read_file",
83540 + .file = "sound/soc/soc-core.c",
83541 + .param3 = 1,
83542 +};
83543 +struct size_overflow_hash _000562_hash = {
83544 + .next = NULL,
83545 + .name = "command_file_write",
83546 + .file = "drivers/misc/ibmasm/ibmasmfs.c",
83547 + .param3 = 1,
83548 +};
83549 +struct size_overflow_hash _000563_hash = {
83550 + .next = NULL,
83551 + .name = "command_write",
83552 + .file = "drivers/uwb/uwb-debug.c",
83553 + .param3 = 1,
83554 +};
83555 +struct size_overflow_hash _000564_hash = {
83556 + .next = NULL,
83557 + .name = "concat_writev",
83558 + .file = "drivers/mtd/mtdconcat.c",
83559 + .param3 = 1,
83560 +};
83561 +struct size_overflow_hash _000565_hash = {
83562 + .next = NULL,
83563 + .name = "configfs_read_file",
83564 + .file = "fs/configfs/file.c",
83565 + .param3 = 1,
83566 +};
83567 +struct size_overflow_hash _000566_hash = {
83568 + .next = NULL,
83569 + .name = "context_alloc",
83570 + .file = "drivers/md/dm-raid.c",
83571 + .param3 = 1,
83572 +};
83573 +struct size_overflow_hash _000567_hash = {
83574 + .next = NULL,
83575 + .name = "copy_counters_to_user",
83576 + .file = "net/bridge/netfilter/ebtables.c",
83577 + .param5 = 1,
83578 +};
83579 +struct size_overflow_hash _000568_hash = {
83580 + .next = NULL,
83581 + .name = "copy_entries_to_user",
83582 + .file = "net/ipv6/netfilter/ip6_tables.c",
83583 + .param1 = 1,
83584 +};
83585 +struct size_overflow_hash _000569_hash = {
83586 + .next = NULL,
83587 + .name = "copy_entries_to_user",
83588 + .file = "net/ipv4/netfilter/arp_tables.c",
83589 + .param1 = 1,
83590 +};
83591 +struct size_overflow_hash _000570_hash = {
83592 + .next = NULL,
83593 + .name = "copy_entries_to_user",
83594 + .file = "net/ipv4/netfilter/ip_tables.c",
83595 + .param1 = 1,
83596 +};
83597 +struct size_overflow_hash _000571_hash = {
83598 + .next = NULL,
83599 + .name = "copy_from_user_toio",
83600 + .file = "include/sound/core.h",
83601 + .param3 = 1,
83602 +};
83603 +struct size_overflow_hash _000572_hash = {
83604 + .next = NULL,
83605 + .name = "copy_macs",
83606 + .file = "net/atm/mpc.c",
83607 + .param4 = 1,
83608 +};
83609 +struct size_overflow_hash _000573_hash = {
83610 + .next = NULL,
83611 + .name = "copy_to_user_fromio",
83612 + .file = "include/sound/core.h",
83613 + .param3 = 1,
83614 +};
83615 +struct size_overflow_hash _000574_hash = {
83616 + .next = NULL,
83617 + .name = "cosa_write",
83618 + .file = "drivers/net/wan/cosa.c",
83619 + .param3 = 1,
83620 +};
83621 +struct size_overflow_hash _000575_hash = {
83622 + .next = NULL,
83623 + .name = "create_attr_set",
83624 + .file = "drivers/platform/x86/thinkpad_acpi.c",
83625 + .param1 = 1,
83626 +};
83627 +struct size_overflow_hash _000576_hash = {
83628 + .next = NULL,
83629 + .name = "create_entry",
83630 + .file = "fs/binfmt_misc.c",
83631 + .param2 = 1,
83632 +};
83633 +struct size_overflow_hash _000577_hash = {
83634 + .next = NULL,
83635 + .name = "create_gpadl_header",
83636 + .file = "drivers/hv/channel.c",
83637 + .param2 = 1,
83638 +};
83639 +struct size_overflow_hash _000578_hash = {
83640 + .next = NULL,
83641 + .name = "create_queues",
83642 + .file = "drivers/atm/ambassador.c",
83643 + .param2 = 1,
83644 + .param3 = 1,
83645 +};
83646 +struct size_overflow_hash _000580_hash = {
83647 + .next = NULL,
83648 + .name = "_create_sg_bios",
83649 + .file = "drivers/scsi/osd/osd_initiator.c",
83650 + .param4 = 1,
83651 +};
83652 +struct size_overflow_hash _000581_hash = {
83653 + .next = NULL,
83654 + .name = "cryptd_alloc_instance",
83655 + .file = "crypto/cryptd.c",
83656 + .param2 = 1,
83657 + .param3 = 1,
83658 +};
83659 +struct size_overflow_hash _000583_hash = {
83660 + .next = NULL,
83661 + .name = "cryptd_hash_setkey",
83662 + .file = "crypto/cryptd.c",
83663 + .param3 = 1,
83664 +};
83665 +struct size_overflow_hash _000584_hash = {
83666 + .next = NULL,
83667 + .name = "crypto_authenc_esn_setkey",
83668 + .file = "crypto/authencesn.c",
83669 + .param3 = 1,
83670 +};
83671 +struct size_overflow_hash _000585_hash = {
83672 + .next = NULL,
83673 + .name = "crypto_authenc_setkey",
83674 + .file = "crypto/authenc.c",
83675 + .param3 = 1,
83676 +};
83677 +struct size_overflow_hash _000586_hash = {
83678 + .next = NULL,
83679 + .name = "ctrl_out",
83680 + .file = "drivers/usb/misc/usbtest.c",
83681 + .param3 = 1,
83682 + .param5 = 1,
83683 +};
83684 +struct size_overflow_hash _000588_hash = {
83685 + .next = NULL,
83686 + .name = "cx18_copy_buf_to_user",
83687 + .file = "drivers/media/video/cx18/cx18-fileops.c",
83688 + .param4 = 1,
83689 +};
83690 +struct size_overflow_hash _000589_hash = {
83691 + .next = NULL,
83692 + .name = "cx24116_writeregN",
83693 + .file = "drivers/media/dvb/frontends/cx24116.c",
83694 + .param4 = 1,
83695 +};
83696 +struct size_overflow_hash _000590_hash = {
83697 + .next = NULL,
83698 + .name = "cxgb_alloc_mem",
83699 + .file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
83700 + .param1 = 1,
83701 +};
83702 +struct size_overflow_hash _000591_hash = {
83703 + .next = NULL,
83704 + .name = "cxgbi_alloc_big_mem",
83705 + .file = "drivers/scsi/cxgbi/libcxgbi.h",
83706 + .param1 = 1,
83707 +};
83708 +struct size_overflow_hash _000592_hash = {
83709 + .next = NULL,
83710 + .name = "cxgbi_device_register",
83711 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
83712 + .param1 = 1,
83713 + .param2 = 1,
83714 +};
83715 +struct size_overflow_hash _000594_hash = {
83716 + .next = NULL,
83717 + .name = "__cxio_init_resource_fifo",
83718 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
83719 + .param3 = 1,
83720 +};
83721 +struct size_overflow_hash _000595_hash = {
83722 + .next = NULL,
83723 + .name = "dac960_user_command_proc_write",
83724 + .file = "drivers/block/DAC960.c",
83725 + .param3 = 1,
83726 +};
83727 +struct size_overflow_hash _000596_hash = {
83728 + .next = NULL,
83729 + .name = "dai_list_read_file",
83730 + .file = "sound/soc/soc-core.c",
83731 + .param3 = 1,
83732 +};
83733 +struct size_overflow_hash _000597_hash = {
83734 + .next = NULL,
83735 + .name = "dapm_bias_read_file",
83736 + .file = "sound/soc/soc-dapm.c",
83737 + .param3 = 1,
83738 +};
83739 +struct size_overflow_hash _000598_hash = {
83740 + .next = NULL,
83741 + .name = "dapm_widget_power_read_file",
83742 + .file = "sound/soc/soc-dapm.c",
83743 + .param3 = 1,
83744 +};
83745 +struct size_overflow_hash _000599_hash = {
83746 + .next = NULL,
83747 + .name = "datablob_format",
83748 + .file = "security/keys/encrypted-keys/encrypted.c",
83749 + .param2 = 1,
83750 +};
83751 +struct size_overflow_hash _000600_hash = {
83752 + .next = NULL,
83753 + .name = "dbgfs_frame",
83754 + .file = "drivers/net/caif/caif_spi.c",
83755 + .param3 = 1,
83756 +};
83757 +struct size_overflow_hash _000601_hash = {
83758 + .next = NULL,
83759 + .name = "dbgfs_state",
83760 + .file = "drivers/net/caif/caif_spi.c",
83761 + .param3 = 1,
83762 +};
83763 +struct size_overflow_hash _000602_hash = {
83764 + .next = NULL,
83765 + .name = "dccp_feat_clone_sp_val",
83766 + .file = "net/dccp/feat.c",
83767 + .param3 = 1,
83768 +};
83769 +struct size_overflow_hash _000603_hash = {
83770 + .next = NULL,
83771 + .name = "dccp_setsockopt_ccid",
83772 + .file = "net/dccp/proto.c",
83773 + .param4 = 1,
83774 +};
83775 +struct size_overflow_hash _000604_hash = {
83776 + .next = NULL,
83777 + .name = "dccp_setsockopt_service",
83778 + .file = "net/dccp/proto.c",
83779 + .param4 = 1,
83780 +};
83781 +struct size_overflow_hash _000605_hash = {
83782 + .next = NULL,
83783 + .name = "ddb_input_read",
83784 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
83785 + .param3 = 1,
83786 +};
83787 +struct size_overflow_hash _000606_hash = {
83788 + .next = NULL,
83789 + .name = "ddb_output_write",
83790 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
83791 + .param3 = 1,
83792 +};
83793 +struct size_overflow_hash _000607_hash = {
83794 + .next = NULL,
83795 + .name = "ddp_make_gl",
83796 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
83797 + .param1 = 1,
83798 +};
83799 +struct size_overflow_hash _000608_hash = {
83800 + .next = NULL,
83801 + .name = "debugfs_read",
83802 + .file = "drivers/infiniband/hw/cxgb4/device.c",
83803 + .param3 = 1,
83804 +};
83805 +struct size_overflow_hash _000609_hash = {
83806 + .next = NULL,
83807 + .name = "debugfs_read",
83808 + .file = "drivers/char/virtio_console.c",
83809 + .param3 = 1,
83810 +};
83811 +struct size_overflow_hash _000610_hash = {
83812 + .next = NULL,
83813 + .name = "debug_output",
83814 + .file = "drivers/usb/host/ohci-dbg.c",
83815 + .param3 = 1,
83816 +};
83817 +struct size_overflow_hash _000611_hash = {
83818 + .next = NULL,
83819 + .name = "debug_output",
83820 + .file = "drivers/usb/host/ehci-dbg.c",
83821 + .param3 = 1,
83822 +};
83823 +struct size_overflow_hash _000612_hash = {
83824 + .next = NULL,
83825 + .name = "debug_read",
83826 + .file = "fs/ocfs2/dlm/dlmdebug.c",
83827 + .param3 = 1,
83828 +};
83829 +struct size_overflow_hash _000613_hash = {
83830 + .next = NULL,
83831 + .name = "dev_config",
83832 + .file = "drivers/usb/gadget/inode.c",
83833 + .param3 = 1,
83834 +};
83835 +struct size_overflow_hash _000614_hash = {
83836 + .next = NULL,
83837 + .name = "device_write",
83838 + .file = "fs/dlm/user.c",
83839 + .param3 = 1,
83840 +};
83841 +struct size_overflow_hash _000615_hash = {
83842 + .next = NULL,
83843 + .name = "dev_read",
83844 + .file = "drivers/media/video/gspca/gspca.c",
83845 + .param3 = 1,
83846 +};
83847 +struct size_overflow_hash _000616_hash = {
83848 + .next = NULL,
83849 + .name = "dfs_file_read",
83850 + .file = "drivers/mtd/ubi/debug.c",
83851 + .param3 = 1,
83852 +};
83853 +struct size_overflow_hash _000617_hash = {
83854 + .next = NULL,
83855 + .name = "dfs_file_write",
83856 + .file = "drivers/mtd/ubi/debug.c",
83857 + .param3 = 1,
83858 +};
83859 +struct size_overflow_hash _000618_hash = {
83860 + .next = NULL,
83861 + .name = "direct_entry",
83862 + .file = "drivers/misc/lkdtm.c",
83863 + .param3 = 1,
83864 +};
83865 +struct size_overflow_hash _000619_hash = {
83866 + .next = NULL,
83867 + .name = "dispatch_proc_write",
83868 + .file = "drivers/platform/x86/thinkpad_acpi.c",
83869 + .param3 = 1,
83870 +};
83871 +struct size_overflow_hash _000620_hash = {
83872 + .next = NULL,
83873 + .name = "diva_os_malloc",
83874 + .file = "drivers/isdn/hardware/eicon/platform.h",
83875 + .param2 = 1,
83876 +};
83877 +struct size_overflow_hash _000621_hash = {
83878 + .next = NULL,
83879 + .name = "dlmfs_file_read",
83880 + .file = "fs/ocfs2/dlmfs/dlmfs.c",
83881 + .param3 = 1,
83882 +};
83883 +struct size_overflow_hash _000622_hash = {
83884 + .next = NULL,
83885 + .name = "dlmfs_file_write",
83886 + .file = "fs/ocfs2/dlmfs/dlmfs.c",
83887 + .param3 = 1,
83888 +};
83889 +struct size_overflow_hash _000623_hash = {
83890 + .next = NULL,
83891 + .name = "dma_attach",
83892 + .file = "drivers/net/wireless/brcm80211/brcmsmac/dma.c",
83893 + .param6 = 1,
83894 + .param7 = 1,
83895 +};
83896 +struct size_overflow_hash _000625_hash = {
83897 + .next = NULL,
83898 + .name = "dma_rx_errors_read",
83899 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83900 + .param3 = 1,
83901 +};
83902 +struct size_overflow_hash _000626_hash = {
83903 + .next = NULL,
83904 + .name = "dma_rx_requested_read",
83905 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83906 + .param3 = 1,
83907 +};
83908 +struct size_overflow_hash _000627_hash = {
83909 + .next = NULL,
83910 + .name = "dma_show_regs",
83911 + .file = "drivers/tty/serial/mfd.c",
83912 + .param3 = 1,
83913 +};
83914 +struct size_overflow_hash _000628_hash = {
83915 + .next = NULL,
83916 + .name = "dma_tx_errors_read",
83917 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83918 + .param3 = 1,
83919 +};
83920 +struct size_overflow_hash _000629_hash = {
83921 + .next = NULL,
83922 + .name = "dma_tx_requested_read",
83923 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83924 + .param3 = 1,
83925 +};
83926 +struct size_overflow_hash _000630_hash = {
83927 + .next = NULL,
83928 + .name = "dm_read",
83929 + .file = "drivers/net/usb/dm9601.c",
83930 + .param3 = 1,
83931 +};
83932 +struct size_overflow_hash _000631_hash = {
83933 + .next = NULL,
83934 + .name = "dm_vcalloc",
83935 + .file = "include/linux/device-mapper.h",
83936 + .param1 = 1,
83937 + .param2 = 1,
83938 +};
83939 +struct size_overflow_hash _000633_hash = {
83940 + .next = NULL,
83941 + .name = "dm_write",
83942 + .file = "drivers/net/usb/dm9601.c",
83943 + .param3 = 1,
83944 +};
83945 +struct size_overflow_hash _000634_hash = {
83946 + .next = NULL,
83947 + .name = "__dn_setsockopt",
83948 + .file = "net/decnet/af_decnet.c",
83949 + .param5 = 1,
83950 +};
83951 +struct size_overflow_hash _000635_hash = {
83952 + .next = NULL,
83953 + .name = "dns_query",
83954 + .file = "include/linux/dns_resolver.h",
83955 + .param3 = 1,
83956 +};
83957 +struct size_overflow_hash _000636_hash = {
83958 + .next = NULL,
83959 + .name = "dns_resolver_instantiate",
83960 + .file = "net/dns_resolver/dns_key.c",
83961 + .param3 = 1,
83962 +};
83963 +struct size_overflow_hash _000637_hash = {
83964 + .next = NULL,
83965 + .name = "dns_resolver_read",
83966 + .file = "net/dns_resolver/dns_key.c",
83967 + .param3 = 1,
83968 +};
83969 +struct size_overflow_hash _000638_hash = {
83970 + .next = NULL,
83971 + .name = "do_add_counters",
83972 + .file = "net/ipv6/netfilter/ip6_tables.c",
83973 + .param3 = 1,
83974 +};
83975 +struct size_overflow_hash _000639_hash = {
83976 + .next = NULL,
83977 + .name = "do_add_counters",
83978 + .file = "net/ipv4/netfilter/ip_tables.c",
83979 + .param3 = 1,
83980 +};
83981 +struct size_overflow_hash _000640_hash = {
83982 + .next = NULL,
83983 + .name = "do_add_counters",
83984 + .file = "net/ipv4/netfilter/arp_tables.c",
83985 + .param3 = 1,
83986 +};
83987 +struct size_overflow_hash _000641_hash = {
83988 + .next = NULL,
83989 + .name = "__do_config_autodelink",
83990 + .file = "drivers/usb/storage/realtek_cr.c",
83991 + .param3 = 1,
83992 +};
83993 +struct size_overflow_hash _000642_hash = {
83994 + .next = NULL,
83995 + .name = "do_ipv6_setsockopt",
83996 + .file = "net/ipv6/ipv6_sockglue.c",
83997 + .param5 = 1,
83998 +};
83999 +struct size_overflow_hash _000643_hash = {
84000 + .next = NULL,
84001 + .name = "do_ip_vs_set_ctl",
84002 + .file = "net/netfilter/ipvs/ip_vs_ctl.c",
84003 + .param4 = 1,
84004 +};
84005 +struct size_overflow_hash _000644_hash = {
84006 + .next = NULL,
84007 + .name = "do_register_entry",
84008 + .file = "drivers/misc/lkdtm.c",
84009 + .param4 = 1,
84010 +};
84011 +struct size_overflow_hash _000645_hash = {
84012 + .next = NULL,
84013 + .name = "__do_replace",
84014 + .file = "net/ipv6/netfilter/ip6_tables.c",
84015 + .param5 = 1,
84016 +};
84017 +struct size_overflow_hash _000646_hash = {
84018 + .next = NULL,
84019 + .name = "__do_replace",
84020 + .file = "net/ipv4/netfilter/ip_tables.c",
84021 + .param5 = 1,
84022 +};
84023 +struct size_overflow_hash _000647_hash = {
84024 + .next = NULL,
84025 + .name = "__do_replace",
84026 + .file = "net/ipv4/netfilter/arp_tables.c",
84027 + .param5 = 1,
84028 +};
84029 +struct size_overflow_hash _000648_hash = {
84030 + .next = NULL,
84031 + .name = "do_sync",
84032 + .file = "fs/gfs2/quota.c",
84033 + .param1 = 1,
84034 +};
84035 +struct size_overflow_hash _000649_hash = {
84036 + .next = NULL,
84037 + .name = "do_update_counters",
84038 + .file = "net/bridge/netfilter/ebtables.c",
84039 + .param4 = 1,
84040 +};
84041 +struct size_overflow_hash _000650_hash = {
84042 + .next = NULL,
84043 + .name = "driver_state_read",
84044 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
84045 + .param3 = 1,
84046 +};
84047 +struct size_overflow_hash _000651_hash = {
84048 + .next = NULL,
84049 + .name = "dsp_write",
84050 + .file = "sound/oss/msnd_pinnacle.c",
84051 + .param2 = 1,
84052 +};
84053 +struct size_overflow_hash _000652_hash = {
84054 + .next = NULL,
84055 + .name = "dvb_aplay",
84056 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
84057 + .param3 = 1,
84058 +};
84059 +struct size_overflow_hash _000653_hash = {
84060 + .next = NULL,
84061 + .name = "dvb_ca_en50221_io_write",
84062 + .file = "drivers/media/dvb/dvb-core/dvb_ca_en50221.c",
84063 + .param3 = 1,
84064 +};
84065 +struct size_overflow_hash _000654_hash = {
84066 + .next = NULL,
84067 + .name = "dvb_dmxdev_set_buffer_size",
84068 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
84069 + .param2 = 1,
84070 +};
84071 +struct size_overflow_hash _000655_hash = {
84072 + .next = NULL,
84073 + .name = "dvbdmx_write",
84074 + .file = "drivers/media/dvb/dvb-core/dvb_demux.c",
84075 + .param3 = 1,
84076 +};
84077 +struct size_overflow_hash _000656_hash = {
84078 + .next = NULL,
84079 + .name = "dvb_dvr_set_buffer_size",
84080 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
84081 + .param2 = 1,
84082 +};
84083 +struct size_overflow_hash _000657_hash = {
84084 + .next = NULL,
84085 + .name = "dvb_play",
84086 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
84087 + .param3 = 1,
84088 +};
84089 +struct size_overflow_hash _000658_hash = {
84090 + .next = NULL,
84091 + .name = "dvb_ringbuffer_pkt_read_user",
84092 + .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
84093 + .param5 = 1,
84094 +};
84095 +struct size_overflow_hash _000659_hash = {
84096 + .next = NULL,
84097 + .name = "dvb_ringbuffer_read_user",
84098 + .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
84099 + .param3 = 1,
84100 +};
84101 +struct size_overflow_hash _000660_hash = {
84102 + .next = NULL,
84103 + .name = "dw210x_op_rw",
84104 + .file = "drivers/media/dvb/dvb-usb/dw2102.c",
84105 + .param6 = 1,
84106 +};
84107 +struct size_overflow_hash _000661_hash = {
84108 + .next = NULL,
84109 + .name = "dwc3_mode_write",
84110 + .file = "drivers/usb/dwc3/debugfs.c",
84111 + .param3 = 1,
84112 +};
84113 +struct size_overflow_hash _000662_hash = {
84114 + .next = NULL,
84115 + .name = "econet_sendmsg",
84116 + .file = "net/econet/af_econet.c",
84117 + .param4 = 1,
84118 +};
84119 +struct size_overflow_hash _000663_hash = {
84120 + .next = NULL,
84121 + .name = "ecryptfs_copy_filename",
84122 + .file = "fs/ecryptfs/crypto.c",
84123 + .param4 = 1,
84124 +};
84125 +struct size_overflow_hash _000664_hash = {
84126 + .next = NULL,
84127 + .name = "ecryptfs_miscdev_write",
84128 + .file = "fs/ecryptfs/miscdev.c",
84129 + .param3 = 1,
84130 +};
84131 +struct size_overflow_hash _000665_hash = {
84132 + .next = NULL,
84133 + .name = "ecryptfs_send_miscdev",
84134 + .file = "fs/ecryptfs/miscdev.c",
84135 + .param2 = 1,
84136 +};
84137 +struct size_overflow_hash _000666_hash = {
84138 + .next = NULL,
84139 + .name = "edac_device_alloc_ctl_info",
84140 + .file = "drivers/edac/edac_device.c",
84141 + .param1 = 1,
84142 +};
84143 +struct size_overflow_hash _000667_hash = {
84144 + .next = NULL,
84145 + .name = "edac_mc_alloc",
84146 + .file = "drivers/edac/edac_mc.c",
84147 + .param1 = 1,
84148 +};
84149 +struct size_overflow_hash _000668_hash = {
84150 + .next = NULL,
84151 + .name = "edac_pci_alloc_ctl_info",
84152 + .file = "drivers/edac/edac_pci.c",
84153 + .param1 = 1,
84154 +};
84155 +struct size_overflow_hash _000669_hash = {
84156 + .next = NULL,
84157 + .name = "efivar_create_sysfs_entry",
84158 + .file = "drivers/firmware/efivars.c",
84159 + .param2 = 1,
84160 +};
84161 +struct size_overflow_hash _000670_hash = {
84162 + .next = NULL,
84163 + .name = "efx_tsoh_heap_alloc",
84164 + .file = "drivers/net/ethernet/sfc/tx.c",
84165 + .param2 = 1,
84166 +};
84167 +struct size_overflow_hash _000671_hash = {
84168 + .next = NULL,
84169 + .name = "encrypted_instantiate",
84170 + .file = "security/keys/encrypted-keys/encrypted.c",
84171 + .param3 = 1,
84172 +};
84173 +struct size_overflow_hash _000672_hash = {
84174 + .next = NULL,
84175 + .name = "encrypted_update",
84176 + .file = "security/keys/encrypted-keys/encrypted.c",
84177 + .param3 = 1,
84178 +};
84179 +struct size_overflow_hash _000673_hash = {
84180 + .next = NULL,
84181 + .name = "ep0_write",
84182 + .file = "drivers/usb/gadget/inode.c",
84183 + .param3 = 1,
84184 +};
84185 +struct size_overflow_hash _000674_hash = {
84186 + .next = NULL,
84187 + .name = "ep_read",
84188 + .file = "drivers/usb/gadget/inode.c",
84189 + .param3 = 1,
84190 +};
84191 +struct size_overflow_hash _000675_hash = {
84192 + .next = NULL,
84193 + .name = "ep_write",
84194 + .file = "drivers/usb/gadget/inode.c",
84195 + .param3 = 1,
84196 +};
84197 +struct size_overflow_hash _000676_hash = {
84198 + .next = NULL,
84199 + .name = "erst_dbg_write",
84200 + .file = "drivers/acpi/apei/erst-dbg.c",
84201 + .param3 = 1,
84202 +};
84203 +struct size_overflow_hash _000677_hash = {
84204 + .next = NULL,
84205 + .name = "et61x251_read",
84206 + .file = "drivers/media/video/et61x251/et61x251_core.c",
84207 + .param3 = 1,
84208 +};
84209 +struct size_overflow_hash _000678_hash = {
84210 + .next = NULL,
84211 + .name = "event_calibration_read",
84212 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84213 + .param3 = 1,
84214 +};
84215 +struct size_overflow_hash _000679_hash = {
84216 + .next = NULL,
84217 + .name = "event_heart_beat_read",
84218 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84219 + .param3 = 1,
84220 +};
84221 +struct size_overflow_hash _000680_hash = {
84222 + .next = NULL,
84223 + .name = "event_oom_late_read",
84224 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84225 + .param3 = 1,
84226 +};
84227 +struct size_overflow_hash _000681_hash = {
84228 + .next = NULL,
84229 + .name = "event_phy_transmit_error_read",
84230 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84231 + .param3 = 1,
84232 +};
84233 +struct size_overflow_hash _000682_hash = {
84234 + .next = NULL,
84235 + .name = "event_rx_mem_empty_read",
84236 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84237 + .param3 = 1,
84238 +};
84239 +struct size_overflow_hash _000683_hash = {
84240 + .next = NULL,
84241 + .name = "event_rx_mismatch_read",
84242 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84243 + .param3 = 1,
84244 +};
84245 +struct size_overflow_hash _000684_hash = {
84246 + .next = NULL,
84247 + .name = "event_rx_pool_read",
84248 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84249 + .param3 = 1,
84250 +};
84251 +struct size_overflow_hash _000685_hash = {
84252 + .next = NULL,
84253 + .name = "event_tx_stuck_read",
84254 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84255 + .param3 = 1,
84256 +};
84257 +struct size_overflow_hash _000686_hash = {
84258 + .next = NULL,
84259 + .name = "excessive_retries_read",
84260 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84261 + .param3 = 1,
84262 +};
84263 +struct size_overflow_hash _000687_hash = {
84264 + .next = NULL,
84265 + .name = "exofs_read_lookup_dev_table",
84266 + .file = "fs/exofs/super.c",
84267 + .param3 = 1,
84268 +};
84269 +struct size_overflow_hash _000688_hash = {
84270 + .next = NULL,
84271 + .name = "ext4_kvmalloc",
84272 + .file = "fs/ext4/super.c",
84273 + .param1 = 1,
84274 +};
84275 +struct size_overflow_hash _000689_hash = {
84276 + .next = NULL,
84277 + .name = "ext4_kvzalloc",
84278 + .file = "fs/ext4/super.c",
84279 + .param1 = 1,
84280 +};
84281 +struct size_overflow_hash _000690_hash = {
84282 + .next = NULL,
84283 + .name = "extend_netdev_table",
84284 + .file = "net/core/netprio_cgroup.c",
84285 + .param2 = 1,
84286 +};
84287 +struct size_overflow_hash _000691_hash = {
84288 + .next = NULL,
84289 + .name = "fd_copyin",
84290 + .file = "drivers/block/floppy.c",
84291 + .param3 = 1,
84292 +};
84293 +struct size_overflow_hash _000692_hash = {
84294 + .next = NULL,
84295 + .name = "fd_copyout",
84296 + .file = "drivers/block/floppy.c",
84297 + .param3 = 1,
84298 +};
84299 +struct size_overflow_hash _000693_hash = {
84300 + .next = NULL,
84301 + .name = "__ffs_ep0_read_events",
84302 + .file = "drivers/usb/gadget/f_fs.c",
84303 + .param3 = 1,
84304 +};
84305 +struct size_overflow_hash _000694_hash = {
84306 + .next = NULL,
84307 + .name = "ffs_epfile_io",
84308 + .file = "drivers/usb/gadget/f_fs.c",
84309 + .param3 = 1,
84310 +};
84311 +struct size_overflow_hash _000695_hash = {
84312 + .next = NULL,
84313 + .name = "ffs_prepare_buffer",
84314 + .file = "drivers/usb/gadget/f_fs.c",
84315 + .param2 = 1,
84316 +};
84317 +struct size_overflow_hash _000696_hash = {
84318 + .next = NULL,
84319 + .name = "f_hidg_read",
84320 + .file = "drivers/usb/gadget/f_hid.c",
84321 + .param3 = 1,
84322 +};
84323 +struct size_overflow_hash _000697_hash = {
84324 + .next = NULL,
84325 + .name = "f_hidg_write",
84326 + .file = "drivers/usb/gadget/f_hid.c",
84327 + .param3 = 1,
84328 +};
84329 +struct size_overflow_hash _000698_hash = {
84330 + .next = NULL,
84331 + .name = "fill_write_buffer",
84332 + .file = "fs/configfs/file.c",
84333 + .param3 = 1,
84334 +};
84335 +struct size_overflow_hash _000699_hash = {
84336 + .next = NULL,
84337 + .name = "flexcop_device_kmalloc",
84338 + .file = "drivers/media/dvb/b2c2/flexcop.c",
84339 + .param1 = 1,
84340 +};
84341 +struct size_overflow_hash _000700_hash = {
84342 + .next = NULL,
84343 + .name = "fops_read",
84344 + .file = "drivers/media/video/saa7164/saa7164-encoder.c",
84345 + .param3 = 1,
84346 +};
84347 +struct size_overflow_hash _000701_hash = {
84348 + .next = NULL,
84349 + .name = "fops_read",
84350 + .file = "drivers/media/video/saa7164/saa7164-vbi.c",
84351 + .param3 = 1,
84352 +};
84353 +struct size_overflow_hash _000702_hash = {
84354 + .next = NULL,
84355 + .name = "format_devstat_counter",
84356 + .file = "net/mac80211/debugfs.c",
84357 + .param3 = 1,
84358 +};
84359 +struct size_overflow_hash _000703_hash = {
84360 + .next = NULL,
84361 + .name = "fragmentation_threshold_read",
84362 + .file = "net/wireless/debugfs.c",
84363 + .param3 = 1,
84364 +};
84365 +struct size_overflow_hash _000704_hash = {
84366 + .next = NULL,
84367 + .name = "frame_alloc",
84368 + .file = "drivers/media/video/gspca/gspca.c",
84369 + .param4 = 1,
84370 +};
84371 +struct size_overflow_hash _000705_hash = {
84372 + .next = NULL,
84373 + .name = "ftdi_elan_write",
84374 + .file = "drivers/usb/misc/ftdi-elan.c",
84375 + .param3 = 1,
84376 +};
84377 +struct size_overflow_hash _000706_hash = {
84378 + .next = NULL,
84379 + .name = "fuse_conn_limit_read",
84380 + .file = "fs/fuse/control.c",
84381 + .param3 = 1,
84382 +};
84383 +struct size_overflow_hash _000707_hash = {
84384 + .next = NULL,
84385 + .name = "fuse_conn_limit_write",
84386 + .file = "fs/fuse/control.c",
84387 + .param3 = 1,
84388 +};
84389 +struct size_overflow_hash _000708_hash = {
84390 + .next = &_000531_hash,
84391 + .name = "fuse_conn_waiting_read",
84392 + .file = "fs/fuse/control.c",
84393 + .param3 = 1,
84394 +};
84395 +struct size_overflow_hash _000709_hash = {
84396 + .next = NULL,
84397 + .name = "garp_attr_create",
84398 + .file = "net/802/garp.c",
84399 + .param3 = 1,
84400 +};
84401 +struct size_overflow_hash _000710_hash = {
84402 + .next = NULL,
84403 + .name = "get_alua_req",
84404 + .file = "drivers/scsi/device_handler/scsi_dh_alua.c",
84405 + .param3 = 1,
84406 +};
84407 +struct size_overflow_hash _000711_hash = {
84408 + .next = NULL,
84409 + .name = "get_derived_key",
84410 + .file = "security/keys/encrypted-keys/encrypted.c",
84411 + .param4 = 1,
84412 +};
84413 +struct size_overflow_hash _000712_hash = {
84414 + .next = NULL,
84415 + .name = "getdqbuf",
84416 + .file = "fs/quota/quota_tree.c",
84417 + .param1 = 1,
84418 +};
84419 +struct size_overflow_hash _000713_hash = {
84420 + .next = NULL,
84421 + .name = "get_fdb_entries",
84422 + .file = "net/bridge/br_ioctl.c",
84423 + .param3 = 1,
84424 +};
84425 +struct size_overflow_hash _000714_hash = {
84426 + .next = NULL,
84427 + .name = "get_rdac_req",
84428 + .file = "drivers/scsi/device_handler/scsi_dh_rdac.c",
84429 + .param3 = 1,
84430 +};
84431 +struct size_overflow_hash _000715_hash = {
84432 + .next = NULL,
84433 + .name = "get_registers",
84434 + .file = "drivers/net/usb/pegasus.c",
84435 + .param3 = 1,
84436 +};
84437 +struct size_overflow_hash _000716_hash = {
84438 + .next = NULL,
84439 + .name = "get_server_iovec",
84440 + .file = "fs/cifs/connect.c",
84441 + .param2 = 1,
84442 +};
84443 +struct size_overflow_hash _000717_hash = {
84444 + .next = NULL,
84445 + .name = "get_ucode_user",
84446 + .file = "arch/x86/kernel/microcode_intel.c",
84447 + .param3 = 1,
84448 +};
84449 +struct size_overflow_hash _000718_hash = {
84450 + .next = NULL,
84451 + .name = "gfs2_alloc_sort_buffer",
84452 + .file = "fs/gfs2/dir.c",
84453 + .param1 = 1,
84454 +};
84455 +struct size_overflow_hash _000719_hash = {
84456 + .next = NULL,
84457 + .name = "gfs2_glock_nq_m",
84458 + .file = "fs/gfs2/glock.c",
84459 + .param1 = 1,
84460 +};
84461 +struct size_overflow_hash _000720_hash = {
84462 + .next = NULL,
84463 + .name = "gigaset_initdriver",
84464 + .file = "drivers/isdn/gigaset/common.c",
84465 + .param2 = 1,
84466 +};
84467 +struct size_overflow_hash _000721_hash = {
84468 + .next = NULL,
84469 + .name = "gpio_power_read",
84470 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
84471 + .param3 = 1,
84472 +};
84473 +struct size_overflow_hash _000722_hash = {
84474 + .next = NULL,
84475 + .name = "gs_alloc_req",
84476 + .file = "drivers/usb/gadget/u_serial.c",
84477 + .param2 = 1,
84478 +};
84479 +struct size_overflow_hash _000723_hash = {
84480 + .next = NULL,
84481 + .name = "gs_buf_alloc",
84482 + .file = "drivers/usb/gadget/u_serial.c",
84483 + .param2 = 1,
84484 +};
84485 +struct size_overflow_hash _000724_hash = {
84486 + .next = NULL,
84487 + .name = "gss_pipe_downcall",
84488 + .file = "net/sunrpc/auth_gss/auth_gss.c",
84489 + .param3 = 1,
84490 +};
84491 +struct size_overflow_hash _000725_hash = {
84492 + .next = NULL,
84493 + .name = "handle_request",
84494 + .file = "drivers/firewire/core-cdev.c",
84495 + .param9 = 1,
84496 +};
84497 +struct size_overflow_hash _000726_hash = {
84498 + .next = NULL,
84499 + .name = "hash_new",
84500 + .file = "net/batman-adv/hash.c",
84501 + .param1 = 1,
84502 +};
84503 +struct size_overflow_hash _000727_hash = {
84504 + .next = NULL,
84505 + .name = "hash_setkey",
84506 + .file = "crypto/algif_hash.c",
84507 + .param3 = 1,
84508 +};
84509 +struct size_overflow_hash _000728_hash = {
84510 + .next = NULL,
84511 + .name = "hcd_buffer_alloc",
84512 + .file = "include/linux/usb/hcd.h",
84513 + .param2 = 1,
84514 +};
84515 +struct size_overflow_hash _000729_hash = {
84516 + .next = NULL,
84517 + .name = "hci_sock_setsockopt",
84518 + .file = "net/bluetooth/hci_sock.c",
84519 + .param5 = 1,
84520 +};
84521 +struct size_overflow_hash _000730_hash = {
84522 + .next = NULL,
84523 + .name = "hdpvr_read",
84524 + .file = "drivers/media/video/hdpvr/hdpvr-video.c",
84525 + .param3 = 1,
84526 +};
84527 +struct size_overflow_hash _000731_hash = {
84528 + .next = NULL,
84529 + .name = "hidraw_get_report",
84530 + .file = "drivers/hid/hidraw.c",
84531 + .param3 = 1,
84532 +};
84533 +struct size_overflow_hash _000732_hash = {
84534 + .next = NULL,
84535 + .name = "hidraw_read",
84536 + .file = "drivers/hid/hidraw.c",
84537 + .param3 = 1,
84538 +};
84539 +struct size_overflow_hash _000733_hash = {
84540 + .next = NULL,
84541 + .name = "hidraw_send_report",
84542 + .file = "drivers/hid/hidraw.c",
84543 + .param3 = 1,
84544 +};
84545 +struct size_overflow_hash _000734_hash = {
84546 + .next = NULL,
84547 + .name = "hid_register_field",
84548 + .file = "drivers/hid/hid-core.c",
84549 + .param2 = 1,
84550 + .param3 = 1,
84551 +};
84552 +struct size_overflow_hash _000736_hash = {
84553 + .next = NULL,
84554 + .name = "hpfs_translate_name",
84555 + .file = "fs/hpfs/name.c",
84556 + .param3 = 1,
84557 +};
84558 +struct size_overflow_hash _000737_hash = {
84559 + .next = NULL,
84560 + .name = "hpi_alloc_control_cache",
84561 + .file = "sound/pci/asihpi/hpicmn.c",
84562 + .param1 = 1,
84563 +};
84564 +struct size_overflow_hash _000738_hash = {
84565 + .next = NULL,
84566 + .name = "ht40allow_map_read",
84567 + .file = "net/wireless/debugfs.c",
84568 + .param3 = 1,
84569 +};
84570 +struct size_overflow_hash _000739_hash = {
84571 + .next = NULL,
84572 + .name = "__hwahc_dev_set_key",
84573 + .file = "drivers/usb/host/hwa-hc.c",
84574 + .param5 = 1,
84575 +};
84576 +struct size_overflow_hash _000740_hash = {
84577 + .next = NULL,
84578 + .name = "hwflags_read",
84579 + .file = "net/mac80211/debugfs.c",
84580 + .param3 = 1,
84581 +};
84582 +struct size_overflow_hash _000741_hash = {
84583 + .next = NULL,
84584 + .name = "hysdn_conf_read",
84585 + .file = "drivers/isdn/hysdn/hysdn_procconf.c",
84586 + .param3 = 1,
84587 +};
84588 +struct size_overflow_hash _000742_hash = {
84589 + .next = NULL,
84590 + .name = "hysdn_conf_write",
84591 + .file = "drivers/isdn/hysdn/hysdn_procconf.c",
84592 + .param3 = 1,
84593 +};
84594 +struct size_overflow_hash _000743_hash = {
84595 + .next = NULL,
84596 + .name = "hysdn_log_write",
84597 + .file = "drivers/isdn/hysdn/hysdn_proclog.c",
84598 + .param3 = 1,
84599 +};
84600 +struct size_overflow_hash _000744_hash = {
84601 + .next = NULL,
84602 + .name = "i2400m_rx_stats_read",
84603 + .file = "drivers/net/wimax/i2400m/debugfs.c",
84604 + .param3 = 1,
84605 +};
84606 +struct size_overflow_hash _000745_hash = {
84607 + .next = NULL,
84608 + .name = "i2400m_tx_stats_read",
84609 + .file = "drivers/net/wimax/i2400m/debugfs.c",
84610 + .param3 = 1,
84611 +};
84612 +struct size_overflow_hash _000746_hash = {
84613 + .next = NULL,
84614 + .name = "__i2400mu_send_barker",
84615 + .file = "drivers/net/wimax/i2400m/usb.c",
84616 + .param3 = 1,
84617 +};
84618 +struct size_overflow_hash _000747_hash = {
84619 + .next = NULL,
84620 + .name = "i2400m_zrealloc_2x",
84621 + .file = "drivers/net/wimax/i2400m/fw.c",
84622 + .param3 = 1,
84623 +};
84624 +struct size_overflow_hash _000748_hash = {
84625 + .next = NULL,
84626 + .name = "i2cdev_read",
84627 + .file = "drivers/i2c/i2c-dev.c",
84628 + .param3 = 1,
84629 +};
84630 +struct size_overflow_hash _000749_hash = {
84631 + .next = &_000459_hash,
84632 + .name = "i2cdev_write",
84633 + .file = "drivers/i2c/i2c-dev.c",
84634 + .param3 = 1,
84635 +};
84636 +struct size_overflow_hash _000750_hash = {
84637 + .next = NULL,
84638 + .name = "ib_alloc_device",
84639 + .file = "include/rdma/ib_verbs.h",
84640 + .param1 = 1,
84641 +};
84642 +struct size_overflow_hash _000751_hash = {
84643 + .next = NULL,
84644 + .name = "ib_copy_from_udata",
84645 + .file = "include/rdma/ib_verbs.h",
84646 + .param3 = 1,
84647 +};
84648 +struct size_overflow_hash _000752_hash = {
84649 + .next = NULL,
84650 + .name = "ib_copy_to_udata",
84651 + .file = "include/rdma/ib_verbs.h",
84652 + .param3 = 1,
84653 +};
84654 +struct size_overflow_hash _000753_hash = {
84655 + .next = NULL,
84656 + .name = "ibmasm_new_command",
84657 + .file = "drivers/misc/ibmasm/command.c",
84658 + .param2 = 1,
84659 +};
84660 +struct size_overflow_hash _000754_hash = {
84661 + .next = NULL,
84662 + .name = "ib_ucm_alloc_data",
84663 + .file = "drivers/infiniband/core/ucm.c",
84664 + .param3 = 1,
84665 +};
84666 +struct size_overflow_hash _000755_hash = {
84667 + .next = NULL,
84668 + .name = "ib_umad_write",
84669 + .file = "drivers/infiniband/core/user_mad.c",
84670 + .param3 = 1,
84671 +};
84672 +struct size_overflow_hash _000756_hash = {
84673 + .next = NULL,
84674 + .name = "ib_uverbs_unmarshall_recv",
84675 + .file = "drivers/infiniband/core/uverbs_cmd.c",
84676 + .param5 = 1,
84677 +};
84678 +struct size_overflow_hash _000757_hash = {
84679 + .next = NULL,
84680 + .name = "ide_driver_proc_write",
84681 + .file = "drivers/ide/ide-proc.c",
84682 + .param3 = 1,
84683 +};
84684 +struct size_overflow_hash _000758_hash = {
84685 + .next = NULL,
84686 + .name = "ide_queue_pc_tail",
84687 + .file = "include/linux/ide.h",
84688 + .param5 = 1,
84689 +};
84690 +struct size_overflow_hash _000759_hash = {
84691 + .next = NULL,
84692 + .name = "ide_raw_taskfile",
84693 + .file = "include/linux/ide.h",
84694 + .param4 = 1,
84695 +};
84696 +struct size_overflow_hash _000760_hash = {
84697 + .next = NULL,
84698 + .name = "ide_settings_proc_write",
84699 + .file = "drivers/ide/ide-proc.c",
84700 + .param3 = 1,
84701 +};
84702 +struct size_overflow_hash _000761_hash = {
84703 + .next = NULL,
84704 + .name = "idetape_chrdev_read",
84705 + .file = "drivers/ide/ide-tape.c",
84706 + .param3 = 1,
84707 +};
84708 +struct size_overflow_hash _000762_hash = {
84709 + .next = NULL,
84710 + .name = "idetape_chrdev_write",
84711 + .file = "drivers/ide/ide-tape.c",
84712 + .param3 = 1,
84713 +};
84714 +struct size_overflow_hash _000763_hash = {
84715 + .next = NULL,
84716 + .name = "idmouse_read",
84717 + .file = "drivers/usb/misc/idmouse.c",
84718 + .param3 = 1,
84719 +};
84720 +struct size_overflow_hash _000764_hash = {
84721 + .next = NULL,
84722 + .name = "ieee80211_build_probe_req",
84723 + .file = "net/mac80211/util.c",
84724 + .param7 = 1,
84725 +};
84726 +struct size_overflow_hash _000765_hash = {
84727 + .next = NULL,
84728 + .name = "ieee80211_if_read",
84729 + .file = "net/mac80211/debugfs_netdev.c",
84730 + .param3 = 1,
84731 +};
84732 +struct size_overflow_hash _000766_hash = {
84733 + .next = NULL,
84734 + .name = "ieee80211_if_write",
84735 + .file = "net/mac80211/debugfs_netdev.c",
84736 + .param3 = 1,
84737 +};
84738 +struct size_overflow_hash _000767_hash = {
84739 + .next = NULL,
84740 + .name = "ieee80211_key_alloc",
84741 + .file = "net/mac80211/key.c",
84742 + .param3 = 1,
84743 +};
84744 +struct size_overflow_hash _000768_hash = {
84745 + .next = NULL,
84746 + .name = "ieee80211_mgmt_tx",
84747 + .file = "net/mac80211/cfg.c",
84748 + .param9 = 1,
84749 +};
84750 +struct size_overflow_hash _000769_hash = {
84751 + .next = NULL,
84752 + .name = "ikconfig_read_current",
84753 + .file = "kernel/configs.c",
84754 + .param3 = 1,
84755 +};
84756 +struct size_overflow_hash _000770_hash = {
84757 + .next = NULL,
84758 + .name = "il3945_sta_dbgfs_stats_table_read",
84759 + .file = "drivers/net/wireless/iwlegacy/3945-rs.c",
84760 + .param3 = 1,
84761 +};
84762 +struct size_overflow_hash _000771_hash = {
84763 + .next = NULL,
84764 + .name = "il3945_ucode_general_stats_read",
84765 + .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
84766 + .param3 = 1,
84767 +};
84768 +struct size_overflow_hash _000772_hash = {
84769 + .next = NULL,
84770 + .name = "il3945_ucode_rx_stats_read",
84771 + .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
84772 + .param3 = 1,
84773 +};
84774 +struct size_overflow_hash _000773_hash = {
84775 + .next = NULL,
84776 + .name = "il3945_ucode_tx_stats_read",
84777 + .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
84778 + .param3 = 1,
84779 +};
84780 +struct size_overflow_hash _000774_hash = {
84781 + .next = NULL,
84782 + .name = "il4965_rs_sta_dbgfs_rate_scale_data_read",
84783 + .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
84784 + .param3 = 1,
84785 +};
84786 +struct size_overflow_hash _000775_hash = {
84787 + .next = NULL,
84788 + .name = "il4965_rs_sta_dbgfs_scale_table_read",
84789 + .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
84790 + .param3 = 1,
84791 +};
84792 +struct size_overflow_hash _000776_hash = {
84793 + .next = NULL,
84794 + .name = "il4965_rs_sta_dbgfs_stats_table_read",
84795 + .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
84796 + .param3 = 1,
84797 +};
84798 +struct size_overflow_hash _000777_hash = {
84799 + .next = NULL,
84800 + .name = "il4965_ucode_general_stats_read",
84801 + .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
84802 + .param3 = 1,
84803 +};
84804 +struct size_overflow_hash _000778_hash = {
84805 + .next = NULL,
84806 + .name = "il4965_ucode_rx_stats_read",
84807 + .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
84808 + .param3 = 1,
84809 +};
84810 +struct size_overflow_hash _000779_hash = {
84811 + .next = NULL,
84812 + .name = "il4965_ucode_tx_stats_read",
84813 + .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
84814 + .param3 = 1,
84815 +};
84816 +struct size_overflow_hash _000780_hash = {
84817 + .next = NULL,
84818 + .name = "il_dbgfs_chain_noise_read",
84819 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84820 + .param3 = 1,
84821 +};
84822 +struct size_overflow_hash _000781_hash = {
84823 + .next = NULL,
84824 + .name = "il_dbgfs_channels_read",
84825 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84826 + .param3 = 1,
84827 +};
84828 +struct size_overflow_hash _000782_hash = {
84829 + .next = NULL,
84830 + .name = "il_dbgfs_disable_ht40_read",
84831 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84832 + .param3 = 1,
84833 +};
84834 +struct size_overflow_hash _000783_hash = {
84835 + .next = NULL,
84836 + .name = "il_dbgfs_fh_reg_read",
84837 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84838 + .param3 = 1,
84839 +};
84840 +struct size_overflow_hash _000784_hash = {
84841 + .next = NULL,
84842 + .name = "il_dbgfs_force_reset_read",
84843 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84844 + .param3 = 1,
84845 +};
84846 +struct size_overflow_hash _000785_hash = {
84847 + .next = NULL,
84848 + .name = "il_dbgfs_interrupt_read",
84849 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84850 + .param3 = 1,
84851 +};
84852 +struct size_overflow_hash _000786_hash = {
84853 + .next = NULL,
84854 + .name = "il_dbgfs_missed_beacon_read",
84855 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84856 + .param3 = 1,
84857 +};
84858 +struct size_overflow_hash _000787_hash = {
84859 + .next = NULL,
84860 + .name = "il_dbgfs_nvm_read",
84861 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84862 + .param3 = 1,
84863 +};
84864 +struct size_overflow_hash _000788_hash = {
84865 + .next = NULL,
84866 + .name = "il_dbgfs_power_save_status_read",
84867 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84868 + .param3 = 1,
84869 +};
84870 +struct size_overflow_hash _000789_hash = {
84871 + .next = NULL,
84872 + .name = "il_dbgfs_qos_read",
84873 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84874 + .param3 = 1,
84875 +};
84876 +struct size_overflow_hash _000790_hash = {
84877 + .next = &_000221_hash,
84878 + .name = "il_dbgfs_rxon_filter_flags_read",
84879 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84880 + .param3 = 1,
84881 +};
84882 +struct size_overflow_hash _000791_hash = {
84883 + .next = NULL,
84884 + .name = "il_dbgfs_rxon_flags_read",
84885 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84886 + .param3 = 1,
84887 +};
84888 +struct size_overflow_hash _000792_hash = {
84889 + .next = NULL,
84890 + .name = "il_dbgfs_rx_queue_read",
84891 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84892 + .param3 = 1,
84893 +};
84894 +struct size_overflow_hash _000793_hash = {
84895 + .next = NULL,
84896 + .name = "il_dbgfs_rx_stats_read",
84897 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84898 + .param3 = 1,
84899 +};
84900 +struct size_overflow_hash _000794_hash = {
84901 + .next = NULL,
84902 + .name = "il_dbgfs_sensitivity_read",
84903 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84904 + .param3 = 1,
84905 +};
84906 +struct size_overflow_hash _000795_hash = {
84907 + .next = NULL,
84908 + .name = "il_dbgfs_sram_read",
84909 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84910 + .param3 = 1,
84911 +};
84912 +struct size_overflow_hash _000796_hash = {
84913 + .next = NULL,
84914 + .name = "il_dbgfs_stations_read",
84915 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84916 + .param3 = 1,
84917 +};
84918 +struct size_overflow_hash _000797_hash = {
84919 + .next = NULL,
84920 + .name = "il_dbgfs_status_read",
84921 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84922 + .param3 = 1,
84923 +};
84924 +struct size_overflow_hash _000798_hash = {
84925 + .next = NULL,
84926 + .name = "il_dbgfs_traffic_log_read",
84927 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84928 + .param3 = 1,
84929 +};
84930 +struct size_overflow_hash _000799_hash = {
84931 + .next = NULL,
84932 + .name = "il_dbgfs_tx_queue_read",
84933 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84934 + .param3 = 1,
84935 +};
84936 +struct size_overflow_hash _000800_hash = {
84937 + .next = NULL,
84938 + .name = "il_dbgfs_tx_stats_read",
84939 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84940 + .param3 = 1,
84941 +};
84942 +struct size_overflow_hash _000801_hash = {
84943 + .next = NULL,
84944 + .name = "ilo_read",
84945 + .file = "drivers/misc/hpilo.c",
84946 + .param3 = 1,
84947 +};
84948 +struct size_overflow_hash _000802_hash = {
84949 + .next = NULL,
84950 + .name = "ilo_write",
84951 + .file = "drivers/misc/hpilo.c",
84952 + .param3 = 1,
84953 +};
84954 +struct size_overflow_hash _000803_hash = {
84955 + .next = NULL,
84956 + .name = "init_data_container",
84957 + .file = "fs/btrfs/backref.c",
84958 + .param1 = 1,
84959 +};
84960 +struct size_overflow_hash _000804_hash = {
84961 + .next = NULL,
84962 + .name = "init_list_set",
84963 + .file = "net/netfilter/ipset/ip_set_list_set.c",
84964 + .param2 = 1,
84965 + .param3 = 1,
84966 +};
84967 +struct size_overflow_hash _000806_hash = {
84968 + .next = NULL,
84969 + .name = "interpret_user_input",
84970 + .file = "fs/ubifs/debug.c",
84971 + .param2 = 1,
84972 +};
84973 +struct size_overflow_hash _000807_hash = {
84974 + .next = NULL,
84975 + .name = "int_proc_write",
84976 + .file = "drivers/net/wireless/ray_cs.c",
84977 + .param3 = 1,
84978 +};
84979 +struct size_overflow_hash _000808_hash = {
84980 + .next = NULL,
84981 + .name = "iowarrior_read",
84982 + .file = "drivers/usb/misc/iowarrior.c",
84983 + .param3 = 1,
84984 +};
84985 +struct size_overflow_hash _000809_hash = {
84986 + .next = NULL,
84987 + .name = "iowarrior_write",
84988 + .file = "drivers/usb/misc/iowarrior.c",
84989 + .param3 = 1,
84990 +};
84991 +struct size_overflow_hash _000810_hash = {
84992 + .next = NULL,
84993 + .name = "ip_set_alloc",
84994 + .file = "include/linux/netfilter/ipset/ip_set.h",
84995 + .param1 = 1,
84996 +};
84997 +struct size_overflow_hash _000811_hash = {
84998 + .next = NULL,
84999 + .name = "ip_vs_conn_fill_param_sync",
85000 + .file = "net/netfilter/ipvs/ip_vs_sync.c",
85001 + .param6 = 1,
85002 +};
85003 +struct size_overflow_hash _000812_hash = {
85004 + .next = NULL,
85005 + .name = "irda_setsockopt",
85006 + .file = "net/irda/af_irda.c",
85007 + .param5 = 1,
85008 +};
85009 +struct size_overflow_hash _000813_hash = {
85010 + .next = NULL,
85011 + .name = "ir_lirc_transmit_ir",
85012 + .file = "drivers/media/rc/ir-lirc-codec.c",
85013 + .param3 = 1,
85014 +};
85015 +struct size_overflow_hash _000814_hash = {
85016 + .next = NULL,
85017 + .name = "irnet_ctrl_write",
85018 + .file = "net/irda/irnet/irnet_ppp.c",
85019 + .param3 = 1,
85020 +};
85021 +struct size_overflow_hash _000815_hash = {
85022 + .next = NULL,
85023 + .name = "iscsi_decode_text_input",
85024 + .file = "drivers/target/iscsi/iscsi_target_parameters.c",
85025 + .param4 = 1,
85026 +};
85027 +struct size_overflow_hash _000816_hash = {
85028 + .next = NULL,
85029 + .name = "iscsit_dump_data_payload",
85030 + .file = "drivers/target/iscsi/iscsi_target_erl1.c",
85031 + .param2 = 1,
85032 +};
85033 +struct size_overflow_hash _000817_hash = {
85034 + .next = NULL,
85035 + .name = "isdn_read",
85036 + .file = "drivers/isdn/i4l/isdn_common.c",
85037 + .param3 = 1,
85038 +};
85039 +struct size_overflow_hash _000818_hash = {
85040 + .next = NULL,
85041 + .name = "iso_callback",
85042 + .file = "drivers/firewire/core-cdev.c",
85043 + .param3 = 1,
85044 +};
85045 +struct size_overflow_hash _000819_hash = {
85046 + .next = NULL,
85047 + .name = "iso_packets_buffer_init",
85048 + .file = "sound/firewire/packets-buffer.c",
85049 + .param3 = 1,
85050 +};
85051 +struct size_overflow_hash _000820_hash = {
85052 + .next = NULL,
85053 + .name = "iso_sched_alloc",
85054 + .file = "drivers/usb/host/ehci-sched.c",
85055 + .param1 = 1,
85056 +};
85057 +struct size_overflow_hash _000821_hash = {
85058 + .next = NULL,
85059 + .name = "isr_cmd_cmplt_read",
85060 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85061 + .param3 = 1,
85062 +};
85063 +struct size_overflow_hash _000822_hash = {
85064 + .next = NULL,
85065 + .name = "isr_commands_read",
85066 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85067 + .param3 = 1,
85068 +};
85069 +struct size_overflow_hash _000823_hash = {
85070 + .next = NULL,
85071 + .name = "isr_decrypt_done_read",
85072 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85073 + .param3 = 1,
85074 +};
85075 +struct size_overflow_hash _000824_hash = {
85076 + .next = NULL,
85077 + .name = "isr_dma0_done_read",
85078 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85079 + .param3 = 1,
85080 +};
85081 +struct size_overflow_hash _000825_hash = {
85082 + .next = NULL,
85083 + .name = "isr_dma1_done_read",
85084 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85085 + .param3 = 1,
85086 +};
85087 +struct size_overflow_hash _000826_hash = {
85088 + .next = NULL,
85089 + .name = "isr_fiqs_read",
85090 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85091 + .param3 = 1,
85092 +};
85093 +struct size_overflow_hash _000827_hash = {
85094 + .next = NULL,
85095 + .name = "isr_host_acknowledges_read",
85096 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85097 + .param3 = 1,
85098 +};
85099 +struct size_overflow_hash _000828_hash = {
85100 + .next = &_000629_hash,
85101 + .name = "isr_hw_pm_mode_changes_read",
85102 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85103 + .param3 = 1,
85104 +};
85105 +struct size_overflow_hash _000829_hash = {
85106 + .next = &_000329_hash,
85107 + .name = "isr_irqs_read",
85108 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85109 + .param3 = 1,
85110 +};
85111 +struct size_overflow_hash _000830_hash = {
85112 + .next = NULL,
85113 + .name = "isr_low_rssi_read",
85114 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85115 + .param3 = 1,
85116 +};
85117 +struct size_overflow_hash _000831_hash = {
85118 + .next = NULL,
85119 + .name = "isr_pci_pm_read",
85120 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85121 + .param3 = 1,
85122 +};
85123 +struct size_overflow_hash _000832_hash = {
85124 + .next = NULL,
85125 + .name = "isr_rx_headers_read",
85126 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85127 + .param3 = 1,
85128 +};
85129 +struct size_overflow_hash _000833_hash = {
85130 + .next = NULL,
85131 + .name = "isr_rx_mem_overflow_read",
85132 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85133 + .param3 = 1,
85134 +};
85135 +struct size_overflow_hash _000834_hash = {
85136 + .next = NULL,
85137 + .name = "isr_rx_procs_read",
85138 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85139 + .param3 = 1,
85140 +};
85141 +struct size_overflow_hash _000835_hash = {
85142 + .next = NULL,
85143 + .name = "isr_rx_rdys_read",
85144 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85145 + .param3 = 1,
85146 +};
85147 +struct size_overflow_hash _000836_hash = {
85148 + .next = NULL,
85149 + .name = "isr_tx_exch_complete_read",
85150 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85151 + .param3 = 1,
85152 +};
85153 +struct size_overflow_hash _000837_hash = {
85154 + .next = NULL,
85155 + .name = "isr_tx_procs_read",
85156 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85157 + .param3 = 1,
85158 +};
85159 +struct size_overflow_hash _000838_hash = {
85160 + .next = NULL,
85161 + .name = "isr_wakeups_read",
85162 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85163 + .param3 = 1,
85164 +};
85165 +struct size_overflow_hash _000839_hash = {
85166 + .next = NULL,
85167 + .name = "ivtv_copy_buf_to_user",
85168 + .file = "drivers/media/video/ivtv/ivtv-fileops.c",
85169 + .param4 = 1,
85170 +};
85171 +struct size_overflow_hash _000840_hash = {
85172 + .next = NULL,
85173 + .name = "iwl_dbgfs_bt_traffic_read",
85174 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85175 + .param3 = 1,
85176 +};
85177 +struct size_overflow_hash _000841_hash = {
85178 + .next = NULL,
85179 + .name = "iwl_dbgfs_chain_noise_read",
85180 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85181 + .param3 = 1,
85182 +};
85183 +struct size_overflow_hash _000842_hash = {
85184 + .next = NULL,
85185 + .name = "iwl_dbgfs_channels_read",
85186 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85187 + .param3 = 1,
85188 +};
85189 +struct size_overflow_hash _000843_hash = {
85190 + .next = NULL,
85191 + .name = "iwl_dbgfs_current_sleep_command_read",
85192 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85193 + .param3 = 1,
85194 +};
85195 +struct size_overflow_hash _000844_hash = {
85196 + .next = NULL,
85197 + .name = "iwl_dbgfs_debug_level_read",
85198 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85199 + .param3 = 1,
85200 +};
85201 +struct size_overflow_hash _000845_hash = {
85202 + .next = NULL,
85203 + .name = "iwl_dbgfs_debug_level_write",
85204 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85205 + .param3 = 1,
85206 +};
85207 +struct size_overflow_hash _000846_hash = {
85208 + .next = NULL,
85209 + .name = "iwl_dbgfs_disable_ht40_read",
85210 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85211 + .param3 = 1,
85212 +};
85213 +struct size_overflow_hash _000847_hash = {
85214 + .next = NULL,
85215 + .name = "iwl_dbgfs_fh_reg_read",
85216 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85217 + .param3 = 1,
85218 +};
85219 +struct size_overflow_hash _000848_hash = {
85220 + .next = NULL,
85221 + .name = "iwl_dbgfs_force_reset_read",
85222 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85223 + .param3 = 1,
85224 +};
85225 +struct size_overflow_hash _000849_hash = {
85226 + .next = NULL,
85227 + .name = "iwl_dbgfs_interrupt_read",
85228 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85229 + .param3 = 1,
85230 +};
85231 +struct size_overflow_hash _000850_hash = {
85232 + .next = NULL,
85233 + .name = "iwl_dbgfs_log_event_read",
85234 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85235 + .param3 = 1,
85236 +};
85237 +struct size_overflow_hash _000851_hash = {
85238 + .next = NULL,
85239 + .name = "iwl_dbgfs_missed_beacon_read",
85240 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85241 + .param3 = 1,
85242 +};
85243 +struct size_overflow_hash _000852_hash = {
85244 + .next = NULL,
85245 + .name = "iwl_dbgfs_nvm_read",
85246 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85247 + .param3 = 1,
85248 +};
85249 +struct size_overflow_hash _000853_hash = {
85250 + .next = NULL,
85251 + .name = "iwl_dbgfs_plcp_delta_read",
85252 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85253 + .param3 = 1,
85254 +};
85255 +struct size_overflow_hash _000854_hash = {
85256 + .next = NULL,
85257 + .name = "iwl_dbgfs_power_save_status_read",
85258 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85259 + .param3 = 1,
85260 +};
85261 +struct size_overflow_hash _000855_hash = {
85262 + .next = NULL,
85263 + .name = "iwl_dbgfs_protection_mode_read",
85264 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85265 + .param3 = 1,
85266 +};
85267 +struct size_overflow_hash _000856_hash = {
85268 + .next = NULL,
85269 + .name = "iwl_dbgfs_qos_read",
85270 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85271 + .param3 = 1,
85272 +};
85273 +struct size_overflow_hash _000857_hash = {
85274 + .next = NULL,
85275 + .name = "iwl_dbgfs_reply_tx_error_read",
85276 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85277 + .param3 = 1,
85278 +};
85279 +struct size_overflow_hash _000858_hash = {
85280 + .next = NULL,
85281 + .name = "iwl_dbgfs_rx_handlers_read",
85282 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85283 + .param3 = 1,
85284 +};
85285 +struct size_overflow_hash _000859_hash = {
85286 + .next = NULL,
85287 + .name = "iwl_dbgfs_rxon_filter_flags_read",
85288 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85289 + .param3 = 1,
85290 +};
85291 +struct size_overflow_hash _000860_hash = {
85292 + .next = NULL,
85293 + .name = "iwl_dbgfs_rxon_flags_read",
85294 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85295 + .param3 = 1,
85296 +};
85297 +struct size_overflow_hash _000861_hash = {
85298 + .next = NULL,
85299 + .name = "iwl_dbgfs_rx_queue_read",
85300 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85301 + .param3 = 1,
85302 +};
85303 +struct size_overflow_hash _000862_hash = {
85304 + .next = NULL,
85305 + .name = "iwl_dbgfs_rx_statistics_read",
85306 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85307 + .param3 = 1,
85308 +};
85309 +struct size_overflow_hash _000863_hash = {
85310 + .next = NULL,
85311 + .name = "iwl_dbgfs_sensitivity_read",
85312 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85313 + .param3 = 1,
85314 +};
85315 +struct size_overflow_hash _000864_hash = {
85316 + .next = NULL,
85317 + .name = "iwl_dbgfs_sleep_level_override_read",
85318 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85319 + .param3 = 1,
85320 +};
85321 +struct size_overflow_hash _000865_hash = {
85322 + .next = NULL,
85323 + .name = "iwl_dbgfs_sram_read",
85324 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85325 + .param3 = 1,
85326 +};
85327 +struct size_overflow_hash _000866_hash = {
85328 + .next = NULL,
85329 + .name = "iwl_dbgfs_stations_read",
85330 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85331 + .param3 = 1,
85332 +};
85333 +struct size_overflow_hash _000867_hash = {
85334 + .next = NULL,
85335 + .name = "iwl_dbgfs_status_read",
85336 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85337 + .param3 = 1,
85338 +};
85339 +struct size_overflow_hash _000868_hash = {
85340 + .next = NULL,
85341 + .name = "iwl_dbgfs_temperature_read",
85342 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85343 + .param3 = 1,
85344 +};
85345 +struct size_overflow_hash _000869_hash = {
85346 + .next = NULL,
85347 + .name = "iwl_dbgfs_thermal_throttling_read",
85348 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85349 + .param3 = 1,
85350 +};
85351 +struct size_overflow_hash _000870_hash = {
85352 + .next = NULL,
85353 + .name = "iwl_dbgfs_traffic_log_read",
85354 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85355 + .param3 = 1,
85356 +};
85357 +struct size_overflow_hash _000871_hash = {
85358 + .next = NULL,
85359 + .name = "iwl_dbgfs_tx_queue_read",
85360 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85361 + .param3 = 1,
85362 +};
85363 +struct size_overflow_hash _000872_hash = {
85364 + .next = NULL,
85365 + .name = "iwl_dbgfs_tx_statistics_read",
85366 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85367 + .param3 = 1,
85368 +};
85369 +struct size_overflow_hash _000873_hash = {
85370 + .next = NULL,
85371 + .name = "iwl_dbgfs_ucode_bt_stats_read",
85372 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85373 + .param3 = 1,
85374 +};
85375 +struct size_overflow_hash _000874_hash = {
85376 + .next = NULL,
85377 + .name = "iwl_dbgfs_ucode_general_stats_read",
85378 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85379 + .param3 = 1,
85380 +};
85381 +struct size_overflow_hash _000875_hash = {
85382 + .next = NULL,
85383 + .name = "iwl_dbgfs_ucode_rx_stats_read",
85384 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85385 + .param3 = 1,
85386 +};
85387 +struct size_overflow_hash _000876_hash = {
85388 + .next = NULL,
85389 + .name = "iwl_dbgfs_ucode_tracing_read",
85390 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85391 + .param3 = 1,
85392 +};
85393 +struct size_overflow_hash _000877_hash = {
85394 + .next = NULL,
85395 + .name = "iwl_dbgfs_ucode_tx_stats_read",
85396 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85397 + .param3 = 1,
85398 +};
85399 +struct size_overflow_hash _000878_hash = {
85400 + .next = NULL,
85401 + .name = "iwl_dbgfs_wowlan_sram_read",
85402 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85403 + .param3 = 1,
85404 +};
85405 +struct size_overflow_hash _000879_hash = {
85406 + .next = NULL,
85407 + .name = "iwmct_fw_parser_init",
85408 + .file = "drivers/misc/iwmc3200top/fw-download.c",
85409 + .param4 = 1,
85410 +};
85411 +struct size_overflow_hash _000880_hash = {
85412 + .next = NULL,
85413 + .name = "iwm_notif_send",
85414 + .file = "drivers/net/wireless/iwmc3200wifi/main.c",
85415 + .param6 = 1,
85416 +};
85417 +struct size_overflow_hash _000881_hash = {
85418 + .next = NULL,
85419 + .name = "iwm_ntf_calib_res",
85420 + .file = "drivers/net/wireless/iwmc3200wifi/rx.c",
85421 + .param3 = 1,
85422 +};
85423 +struct size_overflow_hash _000882_hash = {
85424 + .next = NULL,
85425 + .name = "iwm_umac_set_config_var",
85426 + .file = "drivers/net/wireless/iwmc3200wifi/commands.c",
85427 + .param4 = 1,
85428 +};
85429 +struct size_overflow_hash _000883_hash = {
85430 + .next = NULL,
85431 + .name = "jbd2_alloc",
85432 + .file = "include/linux/jbd2.h",
85433 + .param1 = 1,
85434 +};
85435 +struct size_overflow_hash _000884_hash = {
85436 + .next = NULL,
85437 + .name = "key_algorithm_read",
85438 + .file = "net/mac80211/debugfs_key.c",
85439 + .param3 = 1,
85440 +};
85441 +struct size_overflow_hash _000885_hash = {
85442 + .next = NULL,
85443 + .name = "key_icverrors_read",
85444 + .file = "net/mac80211/debugfs_key.c",
85445 + .param3 = 1,
85446 +};
85447 +struct size_overflow_hash _000886_hash = {
85448 + .next = NULL,
85449 + .name = "key_key_read",
85450 + .file = "net/mac80211/debugfs_key.c",
85451 + .param3 = 1,
85452 +};
85453 +struct size_overflow_hash _000887_hash = {
85454 + .next = NULL,
85455 + .name = "key_replays_read",
85456 + .file = "net/mac80211/debugfs_key.c",
85457 + .param3 = 1,
85458 +};
85459 +struct size_overflow_hash _000888_hash = {
85460 + .next = NULL,
85461 + .name = "key_rx_spec_read",
85462 + .file = "net/mac80211/debugfs_key.c",
85463 + .param3 = 1,
85464 +};
85465 +struct size_overflow_hash _000889_hash = {
85466 + .next = NULL,
85467 + .name = "key_tx_spec_read",
85468 + .file = "net/mac80211/debugfs_key.c",
85469 + .param3 = 1,
85470 +};
85471 +struct size_overflow_hash _000890_hash = {
85472 + .next = NULL,
85473 + .name = "kmem_alloc",
85474 + .file = "fs/xfs/kmem.c",
85475 + .param1 = 1,
85476 +};
85477 +struct size_overflow_hash _000891_hash = {
85478 + .next = NULL,
85479 + .name = "kmem_zalloc_large",
85480 + .file = "fs/xfs/kmem.h",
85481 + .param1 = 1,
85482 +};
85483 +struct size_overflow_hash _000892_hash = {
85484 + .next = NULL,
85485 + .name = "kone_receive",
85486 + .file = "drivers/hid/hid-roccat-kone.c",
85487 + .param4 = 1,
85488 +};
85489 +struct size_overflow_hash _000893_hash = {
85490 + .next = NULL,
85491 + .name = "kone_send",
85492 + .file = "drivers/hid/hid-roccat-kone.c",
85493 + .param4 = 1,
85494 +};
85495 +struct size_overflow_hash _000894_hash = {
85496 + .next = NULL,
85497 + .name = "kvm_read_guest_atomic",
85498 + .file = "include/linux/kvm_host.h",
85499 + .param4 = 1,
85500 +};
85501 +struct size_overflow_hash _000895_hash = {
85502 + .next = NULL,
85503 + .name = "kvm_read_guest_cached",
85504 + .file = "include/linux/kvm_host.h",
85505 + .param4 = 1,
85506 +};
85507 +struct size_overflow_hash _000896_hash = {
85508 + .next = NULL,
85509 + .name = "kvm_set_irq_routing",
85510 + .file = "include/linux/kvm_host.h",
85511 + .param3 = 1,
85512 +};
85513 +struct size_overflow_hash _000897_hash = {
85514 + .next = NULL,
85515 + .name = "kvm_write_guest_cached",
85516 + .file = "include/linux/kvm_host.h",
85517 + .param4 = 1,
85518 +};
85519 +struct size_overflow_hash _000898_hash = {
85520 + .next = NULL,
85521 + .name = "l2cap_sock_setsockopt",
85522 + .file = "net/bluetooth/l2cap_sock.c",
85523 + .param5 = 1,
85524 +};
85525 +struct size_overflow_hash _000899_hash = {
85526 + .next = NULL,
85527 + .name = "l2cap_sock_setsockopt_old",
85528 + .file = "net/bluetooth/l2cap_sock.c",
85529 + .param4 = 1,
85530 +};
85531 +struct size_overflow_hash _000900_hash = {
85532 + .next = NULL,
85533 + .name = "lane2_associate_req",
85534 + .file = "net/atm/lec.c",
85535 + .param4 = 1,
85536 +};
85537 +struct size_overflow_hash _000901_hash = {
85538 + .next = NULL,
85539 + .name = "lbs_debugfs_read",
85540 + .file = "drivers/net/wireless/libertas/debugfs.c",
85541 + .param3 = 1,
85542 +};
85543 +struct size_overflow_hash _000902_hash = {
85544 + .next = NULL,
85545 + .name = "lbs_debugfs_write",
85546 + .file = "drivers/net/wireless/libertas/debugfs.c",
85547 + .param3 = 1,
85548 +};
85549 +struct size_overflow_hash _000903_hash = {
85550 + .next = NULL,
85551 + .name = "lbs_dev_info",
85552 + .file = "drivers/net/wireless/libertas/debugfs.c",
85553 + .param3 = 1,
85554 +};
85555 +struct size_overflow_hash _000904_hash = {
85556 + .next = NULL,
85557 + .name = "lbs_host_sleep_read",
85558 + .file = "drivers/net/wireless/libertas/debugfs.c",
85559 + .param3 = 1,
85560 +};
85561 +struct size_overflow_hash _000905_hash = {
85562 + .next = NULL,
85563 + .name = "lbs_rdbbp_read",
85564 + .file = "drivers/net/wireless/libertas/debugfs.c",
85565 + .param3 = 1,
85566 +};
85567 +struct size_overflow_hash _000906_hash = {
85568 + .next = NULL,
85569 + .name = "lbs_rdmac_read",
85570 + .file = "drivers/net/wireless/libertas/debugfs.c",
85571 + .param3 = 1,
85572 +};
85573 +struct size_overflow_hash _000907_hash = {
85574 + .next = NULL,
85575 + .name = "lbs_rdrf_read",
85576 + .file = "drivers/net/wireless/libertas/debugfs.c",
85577 + .param3 = 1,
85578 +};
85579 +struct size_overflow_hash _000908_hash = {
85580 + .next = NULL,
85581 + .name = "lbs_sleepparams_read",
85582 + .file = "drivers/net/wireless/libertas/debugfs.c",
85583 + .param3 = 1,
85584 +};
85585 +struct size_overflow_hash _000909_hash = {
85586 + .next = NULL,
85587 + .name = "lbs_threshold_read",
85588 + .file = "drivers/net/wireless/libertas/debugfs.c",
85589 + .param5 = 1,
85590 +};
85591 +struct size_overflow_hash _000910_hash = {
85592 + .next = NULL,
85593 + .name = "lc_create",
85594 + .file = "include/linux/lru_cache.h",
85595 + .param3 = 1,
85596 +};
85597 +struct size_overflow_hash _000911_hash = {
85598 + .next = NULL,
85599 + .name = "lcd_write",
85600 + .file = "drivers/usb/misc/usblcd.c",
85601 + .param3 = 1,
85602 +};
85603 +struct size_overflow_hash _000912_hash = {
85604 + .next = NULL,
85605 + .name = "leaf_dealloc",
85606 + .file = "fs/gfs2/dir.c",
85607 + .param3 = 1,
85608 +};
85609 +struct size_overflow_hash _000913_hash = {
85610 + .next = NULL,
85611 + .name = "__lgread",
85612 + .file = "drivers/lguest/core.c",
85613 + .param4 = 1,
85614 +};
85615 +struct size_overflow_hash _000914_hash = {
85616 + .next = NULL,
85617 + .name = "__lgwrite",
85618 + .file = "drivers/lguest/core.c",
85619 + .param4 = 1,
85620 +};
85621 +struct size_overflow_hash _000915_hash = {
85622 + .next = NULL,
85623 + .name = "link_send_sections_long",
85624 + .file = "net/tipc/link.c",
85625 + .param4 = 1,
85626 +};
85627 +struct size_overflow_hash _000916_hash = {
85628 + .next = NULL,
85629 + .name = "lirc_buffer_init",
85630 + .file = "include/media/lirc_dev.h",
85631 + .param2 = 1,
85632 + .param3 = 1,
85633 +};
85634 +struct size_overflow_hash _000918_hash = {
85635 + .next = NULL,
85636 + .name = "lkdtm_debugfs_read",
85637 + .file = "drivers/misc/lkdtm.c",
85638 + .param3 = 1,
85639 +};
85640 +struct size_overflow_hash _000919_hash = {
85641 + .next = NULL,
85642 + .name = "LoadBitmap",
85643 + .file = "drivers/media/dvb/ttpci/av7110_hw.c",
85644 + .param2 = 1,
85645 +};
85646 +struct size_overflow_hash _000920_hash = {
85647 + .next = NULL,
85648 + .name = "long_retry_limit_read",
85649 + .file = "net/wireless/debugfs.c",
85650 + .param3 = 1,
85651 +};
85652 +struct size_overflow_hash _000921_hash = {
85653 + .next = NULL,
85654 + .name = "lpfc_debugfs_dif_err_read",
85655 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85656 + .param3 = 1,
85657 +};
85658 +struct size_overflow_hash _000922_hash = {
85659 + .next = NULL,
85660 + .name = "lpfc_debugfs_dif_err_write",
85661 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85662 + .param3 = 1,
85663 +};
85664 +struct size_overflow_hash _000923_hash = {
85665 + .next = NULL,
85666 + .name = "lpfc_debugfs_read",
85667 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85668 + .param3 = 1,
85669 +};
85670 +struct size_overflow_hash _000924_hash = {
85671 + .next = NULL,
85672 + .name = "lpfc_idiag_baracc_read",
85673 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85674 + .param3 = 1,
85675 +};
85676 +struct size_overflow_hash _000925_hash = {
85677 + .next = NULL,
85678 + .name = "lpfc_idiag_ctlacc_read",
85679 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85680 + .param3 = 1,
85681 +};
85682 +struct size_overflow_hash _000926_hash = {
85683 + .next = NULL,
85684 + .name = "lpfc_idiag_drbacc_read",
85685 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85686 + .param3 = 1,
85687 +};
85688 +struct size_overflow_hash _000927_hash = {
85689 + .next = NULL,
85690 + .name = "lpfc_idiag_extacc_read",
85691 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85692 + .param3 = 1,
85693 +};
85694 +struct size_overflow_hash _000928_hash = {
85695 + .next = NULL,
85696 + .name = "lpfc_idiag_mbxacc_read",
85697 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85698 + .param3 = 1,
85699 +};
85700 +struct size_overflow_hash _000929_hash = {
85701 + .next = NULL,
85702 + .name = "lpfc_idiag_pcicfg_read",
85703 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85704 + .param3 = 1,
85705 +};
85706 +struct size_overflow_hash _000930_hash = {
85707 + .next = NULL,
85708 + .name = "lpfc_idiag_queacc_read",
85709 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85710 + .param3 = 1,
85711 +};
85712 +struct size_overflow_hash _000931_hash = {
85713 + .next = NULL,
85714 + .name = "lpfc_idiag_queinfo_read",
85715 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85716 + .param3 = 1,
85717 +};
85718 +struct size_overflow_hash _000932_hash = {
85719 + .next = NULL,
85720 + .name = "lpfc_sli4_queue_alloc",
85721 + .file = "drivers/scsi/lpfc/lpfc_sli.c",
85722 + .param3 = 1,
85723 +};
85724 +struct size_overflow_hash _000933_hash = {
85725 + .next = NULL,
85726 + .name = "lp_write",
85727 + .file = "drivers/char/lp.c",
85728 + .param3 = 1,
85729 +};
85730 +struct size_overflow_hash _000934_hash = {
85731 + .next = NULL,
85732 + .name = "mac80211_format_buffer",
85733 + .file = "net/mac80211/debugfs.c",
85734 + .param2 = 1,
85735 +};
85736 +struct size_overflow_hash _000935_hash = {
85737 + .next = NULL,
85738 + .name = "mce_write",
85739 + .file = "arch/x86/kernel/cpu/mcheck/mce-inject.c",
85740 + .param3 = 1,
85741 +};
85742 +struct size_overflow_hash _000936_hash = {
85743 + .next = NULL,
85744 + .name = "mcs7830_get_reg",
85745 + .file = "drivers/net/usb/mcs7830.c",
85746 + .param3 = 1,
85747 +};
85748 +struct size_overflow_hash _000937_hash = {
85749 + .next = NULL,
85750 + .name = "mcs7830_set_reg",
85751 + .file = "drivers/net/usb/mcs7830.c",
85752 + .param3 = 1,
85753 +};
85754 +struct size_overflow_hash _000938_hash = {
85755 + .next = NULL,
85756 + .name = "mdc800_device_read",
85757 + .file = "drivers/usb/image/mdc800.c",
85758 + .param3 = 1,
85759 +};
85760 +struct size_overflow_hash _000939_hash = {
85761 + .next = NULL,
85762 + .name = "mdiobus_alloc_size",
85763 + .file = "include/linux/phy.h",
85764 + .param1 = 1,
85765 +};
85766 +struct size_overflow_hash _000940_hash = {
85767 + .next = NULL,
85768 + .name = "media_entity_init",
85769 + .file = "include/media/media-entity.h",
85770 + .param2 = 1,
85771 + .param4 = 1,
85772 +};
85773 +struct size_overflow_hash _000942_hash = {
85774 + .next = NULL,
85775 + .name = "memstick_alloc_host",
85776 + .file = "include/linux/memstick.h",
85777 + .param1 = 1,
85778 +};
85779 +struct size_overflow_hash _000943_hash = {
85780 + .next = NULL,
85781 + .name = "mgmt_control",
85782 + .file = "include/net/bluetooth/hci_core.h",
85783 + .param3 = 1,
85784 +};
85785 +struct size_overflow_hash _000944_hash = {
85786 + .next = NULL,
85787 + .name = "mgmt_pending_add",
85788 + .file = "net/bluetooth/mgmt.c",
85789 + .param5 = 1,
85790 +};
85791 +struct size_overflow_hash _000945_hash = {
85792 + .next = &_000321_hash,
85793 + .name = "mic_calc_failure_read",
85794 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85795 + .param3 = 1,
85796 +};
85797 +struct size_overflow_hash _000946_hash = {
85798 + .next = NULL,
85799 + .name = "mic_rx_pkts_read",
85800 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85801 + .param3 = 1,
85802 +};
85803 +struct size_overflow_hash _000947_hash = {
85804 + .next = NULL,
85805 + .name = "minstrel_stats_read",
85806 + .file = "net/mac80211/rc80211_minstrel_debugfs.c",
85807 + .param3 = 1,
85808 +};
85809 +struct size_overflow_hash _000948_hash = {
85810 + .next = NULL,
85811 + .name = "mlx4_en_create_rx_ring",
85812 + .file = "drivers/net/ethernet/mellanox/mlx4/en_rx.c",
85813 + .param3 = 1,
85814 +};
85815 +struct size_overflow_hash _000949_hash = {
85816 + .next = NULL,
85817 + .name = "mlx4_en_create_tx_ring",
85818 + .file = "drivers/net/ethernet/mellanox/mlx4/en_tx.c",
85819 + .param4 = 1,
85820 +};
85821 +struct size_overflow_hash _000950_hash = {
85822 + .next = NULL,
85823 + .name = "mmc_ext_csd_read",
85824 + .file = "drivers/mmc/core/debugfs.c",
85825 + .param3 = 1,
85826 +};
85827 +struct size_overflow_hash _000951_hash = {
85828 + .next = NULL,
85829 + .name = "mmc_send_bus_test",
85830 + .file = "drivers/mmc/core/mmc_ops.c",
85831 + .param4 = 1,
85832 +};
85833 +struct size_overflow_hash _000952_hash = {
85834 + .next = NULL,
85835 + .name = "mmc_send_cxd_data",
85836 + .file = "drivers/mmc/core/mmc_ops.c",
85837 + .param5 = 1,
85838 +};
85839 +struct size_overflow_hash _000953_hash = {
85840 + .next = NULL,
85841 + .name = "mmc_test_alloc_mem",
85842 + .file = "drivers/mmc/card/mmc_test.c",
85843 + .param3 = 1,
85844 +};
85845 +struct size_overflow_hash _000954_hash = {
85846 + .next = NULL,
85847 + .name = "mon_bin_get_event",
85848 + .file = "drivers/usb/mon/mon_bin.c",
85849 + .param4 = 1,
85850 +};
85851 +struct size_overflow_hash _000955_hash = {
85852 + .next = NULL,
85853 + .name = "mon_stat_read",
85854 + .file = "drivers/usb/mon/mon_stat.c",
85855 + .param3 = 1,
85856 +};
85857 +struct size_overflow_hash _000956_hash = {
85858 + .next = NULL,
85859 + .name = "mptctl_getiocinfo",
85860 + .file = "drivers/message/fusion/mptctl.c",
85861 + .param2 = 1,
85862 +};
85863 +struct size_overflow_hash _000957_hash = {
85864 + .next = NULL,
85865 + .name = "msnd_fifo_alloc",
85866 + .file = "sound/oss/msnd.c",
85867 + .param2 = 1,
85868 +};
85869 +struct size_overflow_hash _000958_hash = {
85870 + .next = NULL,
85871 + .name = "mtdchar_readoob",
85872 + .file = "drivers/mtd/mtdchar.c",
85873 + .param4 = 1,
85874 +};
85875 +struct size_overflow_hash _000959_hash = {
85876 + .next = NULL,
85877 + .name = "mtdchar_write",
85878 + .file = "drivers/mtd/mtdchar.c",
85879 + .param3 = 1,
85880 +};
85881 +struct size_overflow_hash _000960_hash = {
85882 + .next = NULL,
85883 + .name = "mtdchar_writeoob",
85884 + .file = "drivers/mtd/mtdchar.c",
85885 + .param4 = 1,
85886 +};
85887 +struct size_overflow_hash _000961_hash = {
85888 + .next = NULL,
85889 + .name = "mtdswap_init",
85890 + .file = "drivers/mtd/mtdswap.c",
85891 + .param2 = 1,
85892 +};
85893 +struct size_overflow_hash _000962_hash = {
85894 + .next = NULL,
85895 + .name = "mtf_test_write",
85896 + .file = "drivers/mmc/card/mmc_test.c",
85897 + .param3 = 1,
85898 +};
85899 +struct size_overflow_hash _000963_hash = {
85900 + .next = NULL,
85901 + .name = "musb_test_mode_write",
85902 + .file = "drivers/usb/musb/musb_debugfs.c",
85903 + .param3 = 1,
85904 +};
85905 +struct size_overflow_hash _000964_hash = {
85906 + .next = NULL,
85907 + .name = "mvumi_alloc_mem_resource",
85908 + .file = "drivers/scsi/mvumi.c",
85909 + .param3 = 1,
85910 +};
85911 +struct size_overflow_hash _000965_hash = {
85912 + .next = NULL,
85913 + .name = "mwifiex_alloc_sdio_mpa_buffers",
85914 + .file = "drivers/net/wireless/mwifiex/sdio.c",
85915 + .param2 = 1,
85916 + .param3 = 1,
85917 +};
85918 +struct size_overflow_hash _000967_hash = {
85919 + .next = NULL,
85920 + .name = "mwifiex_debug_read",
85921 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85922 + .param3 = 1,
85923 +};
85924 +struct size_overflow_hash _000968_hash = {
85925 + .next = NULL,
85926 + .name = "mwifiex_get_common_rates",
85927 + .file = "drivers/net/wireless/mwifiex/join.c",
85928 + .param3 = 1,
85929 +};
85930 +struct size_overflow_hash _000969_hash = {
85931 + .next = NULL,
85932 + .name = "mwifiex_getlog_read",
85933 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85934 + .param3 = 1,
85935 +};
85936 +struct size_overflow_hash _000970_hash = {
85937 + .next = NULL,
85938 + .name = "mwifiex_info_read",
85939 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85940 + .param3 = 1,
85941 +};
85942 +struct size_overflow_hash _000971_hash = {
85943 + .next = NULL,
85944 + .name = "mwifiex_rdeeprom_read",
85945 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85946 + .param3 = 1,
85947 +};
85948 +struct size_overflow_hash _000972_hash = {
85949 + .next = NULL,
85950 + .name = "mwifiex_regrdwr_read",
85951 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85952 + .param3 = 1,
85953 +};
85954 +struct size_overflow_hash _000973_hash = {
85955 + .next = NULL,
85956 + .name = "mwifiex_update_curr_bss_params",
85957 + .file = "drivers/net/wireless/mwifiex/scan.c",
85958 + .param5 = 1,
85959 +};
85960 +struct size_overflow_hash _000974_hash = {
85961 + .next = NULL,
85962 + .name = "nand_bch_init",
85963 + .file = "include/linux/mtd/nand_bch.h",
85964 + .param2 = 1,
85965 + .param3 = 1,
85966 +};
85967 +struct size_overflow_hash _000976_hash = {
85968 + .next = NULL,
85969 + .name = "ncp_file_write",
85970 + .file = "fs/ncpfs/file.c",
85971 + .param3 = 1,
85972 +};
85973 +struct size_overflow_hash _000977_hash = {
85974 + .next = NULL,
85975 + .name = "ncp__vol2io",
85976 + .file = "fs/ncpfs/ncplib_kernel.c",
85977 + .param5 = 1,
85978 +};
85979 +struct size_overflow_hash _000978_hash = {
85980 + .next = NULL,
85981 + .name = "new_bind_ctl",
85982 + .file = "sound/pci/hda/patch_realtek.c",
85983 + .param2 = 1,
85984 +};
85985 +struct size_overflow_hash _000979_hash = {
85986 + .next = NULL,
85987 + .name = "nfc_llcp_build_tlv",
85988 + .file = "net/nfc/llcp/commands.c",
85989 + .param3 = 1,
85990 +};
85991 +struct size_overflow_hash _000980_hash = {
85992 + .next = NULL,
85993 + .name = "nfs4_alloc_slots",
85994 + .file = "fs/nfs/nfs4proc.c",
85995 + .param1 = 1,
85996 +};
85997 +struct size_overflow_hash _000981_hash = {
85998 + .next = NULL,
85999 + .name = "nfs4_write_cached_acl",
86000 + .file = "fs/nfs/nfs4proc.c",
86001 + .param3 = 1,
86002 + .param4 = 1,
86003 +};
86004 +struct size_overflow_hash _000982_hash = {
86005 + .next = NULL,
86006 + .name = "nfsctl_transaction_read",
86007 + .file = "fs/nfsd/nfsctl.c",
86008 + .param3 = 1,
86009 +};
86010 +struct size_overflow_hash _000983_hash = {
86011 + .next = NULL,
86012 + .name = "nfsctl_transaction_write",
86013 + .file = "fs/nfsd/nfsctl.c",
86014 + .param3 = 1,
86015 +};
86016 +struct size_overflow_hash _000984_hash = {
86017 + .next = NULL,
86018 + .name = "nfsd_cache_update",
86019 + .file = "fs/nfsd/nfscache.c",
86020 + .param3 = 1,
86021 +};
86022 +struct size_overflow_hash _000985_hash = {
86023 + .next = NULL,
86024 + .name = "nfs_idmap_get_desc",
86025 + .file = "fs/nfs/idmap.c",
86026 + .param2 = 1,
86027 + .param4 = 1,
86028 +};
86029 +struct size_overflow_hash _000987_hash = {
86030 + .next = NULL,
86031 + .name = "nfs_readdata_alloc",
86032 + .file = "include/linux/nfs_fs.h",
86033 + .param1 = 1,
86034 +};
86035 +struct size_overflow_hash _000988_hash = {
86036 + .next = NULL,
86037 + .name = "nfs_readdir_make_qstr",
86038 + .file = "fs/nfs/dir.c",
86039 + .param3 = 1,
86040 +};
86041 +struct size_overflow_hash _000989_hash = {
86042 + .next = NULL,
86043 + .name = "nfs_writedata_alloc",
86044 + .file = "include/linux/nfs_fs.h",
86045 + .param1 = 1,
86046 +};
86047 +struct size_overflow_hash _000990_hash = {
86048 + .next = NULL,
86049 + .name = "nsm_create_handle",
86050 + .file = "fs/lockd/mon.c",
86051 + .param4 = 1,
86052 +};
86053 +struct size_overflow_hash _000991_hash = {
86054 + .next = NULL,
86055 + .name = "ntfs_copy_from_user",
86056 + .file = "fs/ntfs/file.c",
86057 + .param3 = 1,
86058 + .param5 = 1,
86059 +};
86060 +struct size_overflow_hash _000993_hash = {
86061 + .next = NULL,
86062 + .name = "__ntfs_copy_from_user_iovec_inatomic",
86063 + .file = "fs/ntfs/file.c",
86064 + .param3 = 1,
86065 + .param4 = 1,
86066 +};
86067 +struct size_overflow_hash _000995_hash = {
86068 + .next = NULL,
86069 + .name = "__ntfs_malloc",
86070 + .file = "fs/ntfs/malloc.h",
86071 + .param1 = 1,
86072 +};
86073 +struct size_overflow_hash _000996_hash = {
86074 + .next = NULL,
86075 + .name = "nvme_alloc_iod",
86076 + .file = "drivers/block/nvme.c",
86077 + .param1 = 1,
86078 +};
86079 +struct size_overflow_hash _000997_hash = {
86080 + .next = NULL,
86081 + .name = "nvram_write",
86082 + .file = "drivers/char/nvram.c",
86083 + .param3 = 1,
86084 +};
86085 +struct size_overflow_hash _000998_hash = {
86086 + .next = NULL,
86087 + .name = "o2hb_debug_read",
86088 + .file = "fs/ocfs2/cluster/heartbeat.c",
86089 + .param3 = 1,
86090 +};
86091 +struct size_overflow_hash _000999_hash = {
86092 + .next = NULL,
86093 + .name = "o2net_debug_read",
86094 + .file = "fs/ocfs2/cluster/netdebug.c",
86095 + .param3 = 1,
86096 +};
86097 +struct size_overflow_hash _001000_hash = {
86098 + .next = NULL,
86099 + .name = "o2net_send_message_vec",
86100 + .file = "fs/ocfs2/cluster/tcp.c",
86101 + .param4 = 1,
86102 +};
86103 +struct size_overflow_hash _001001_hash = {
86104 + .next = NULL,
86105 + .name = "ocfs2_control_cfu",
86106 + .file = "fs/ocfs2/stack_user.c",
86107 + .param2 = 1,
86108 +};
86109 +struct size_overflow_hash _001002_hash = {
86110 + .next = NULL,
86111 + .name = "ocfs2_control_read",
86112 + .file = "fs/ocfs2/stack_user.c",
86113 + .param3 = 1,
86114 +};
86115 +struct size_overflow_hash _001003_hash = {
86116 + .next = NULL,
86117 + .name = "ocfs2_debug_read",
86118 + .file = "fs/ocfs2/super.c",
86119 + .param3 = 1,
86120 +};
86121 +struct size_overflow_hash _001004_hash = {
86122 + .next = NULL,
86123 + .name = "opera1_xilinx_rw",
86124 + .file = "drivers/media/dvb/dvb-usb/opera1.c",
86125 + .param5 = 1,
86126 +};
86127 +struct size_overflow_hash _001005_hash = {
86128 + .next = NULL,
86129 + .name = "oprofilefs_str_to_user",
86130 + .file = "include/linux/oprofile.h",
86131 + .param3 = 1,
86132 +};
86133 +struct size_overflow_hash _001006_hash = {
86134 + .next = NULL,
86135 + .name = "oprofilefs_ulong_from_user",
86136 + .file = "include/linux/oprofile.h",
86137 + .param3 = 1,
86138 +};
86139 +struct size_overflow_hash _001007_hash = {
86140 + .next = &_000626_hash,
86141 + .name = "oprofilefs_ulong_to_user",
86142 + .file = "include/linux/oprofile.h",
86143 + .param3 = 1,
86144 +};
86145 +struct size_overflow_hash _001008_hash = {
86146 + .next = NULL,
86147 + .name = "_ore_get_io_state",
86148 + .file = "fs/exofs/ore.c",
86149 + .param3 = 1,
86150 +};
86151 +struct size_overflow_hash _001009_hash = {
86152 + .next = NULL,
86153 + .name = "_osd_realloc_seg",
86154 + .file = "drivers/scsi/osd/osd_initiator.c",
86155 + .param3 = 1,
86156 +};
86157 +struct size_overflow_hash _001010_hash = {
86158 + .next = NULL,
86159 + .name = "_osd_req_list_objects",
86160 + .file = "drivers/scsi/osd/osd_initiator.c",
86161 + .param6 = 1,
86162 +};
86163 +struct size_overflow_hash _001011_hash = {
86164 + .next = NULL,
86165 + .name = "osd_req_read_kern",
86166 + .file = "include/scsi/osd_initiator.h",
86167 + .param5 = 1,
86168 +};
86169 +struct size_overflow_hash _001012_hash = {
86170 + .next = NULL,
86171 + .name = "osd_req_write_kern",
86172 + .file = "include/scsi/osd_initiator.h",
86173 + .param5 = 1,
86174 +};
86175 +struct size_overflow_hash _001013_hash = {
86176 + .next = NULL,
86177 + .name = "osst_execute",
86178 + .file = "drivers/scsi/osst.c",
86179 + .param6 = 1,
86180 +};
86181 +struct size_overflow_hash _001014_hash = {
86182 + .next = NULL,
86183 + .name = "otp_read",
86184 + .file = "drivers/mtd/devices/mtd_dataflash.c",
86185 + .param2 = 1,
86186 + .param5 = 1,
86187 +};
86188 +struct size_overflow_hash _001016_hash = {
86189 + .next = NULL,
86190 + .name = "packet_buffer_init",
86191 + .file = "drivers/firewire/nosy.c",
86192 + .param2 = 1,
86193 +};
86194 +struct size_overflow_hash _001017_hash = {
86195 + .next = NULL,
86196 + .name = "packet_setsockopt",
86197 + .file = "net/packet/af_packet.c",
86198 + .param5 = 1,
86199 +};
86200 +struct size_overflow_hash _001018_hash = {
86201 + .next = NULL,
86202 + .name = "parse_arg",
86203 + .file = "drivers/platform/x86/asus_acpi.c",
86204 + .param2 = 1,
86205 +};
86206 +struct size_overflow_hash _001019_hash = {
86207 + .next = NULL,
86208 + .name = "parse_command",
86209 + .file = "fs/binfmt_misc.c",
86210 + .param2 = 1,
86211 +};
86212 +struct size_overflow_hash _001020_hash = {
86213 + .next = NULL,
86214 + .name = "pcmcia_replace_cis",
86215 + .file = "drivers/pcmcia/cistpl.c",
86216 + .param3 = 1,
86217 +};
86218 +struct size_overflow_hash _001021_hash = {
86219 + .next = NULL,
86220 + .name = "pcnet32_realloc_rx_ring",
86221 + .file = "drivers/net/ethernet/amd/pcnet32.c",
86222 + .param3 = 1,
86223 +};
86224 +struct size_overflow_hash _001022_hash = {
86225 + .next = NULL,
86226 + .name = "pcnet32_realloc_tx_ring",
86227 + .file = "drivers/net/ethernet/amd/pcnet32.c",
86228 + .param3 = 1,
86229 +};
86230 +struct size_overflow_hash _001023_hash = {
86231 + .next = NULL,
86232 + .name = "pgctrl_write",
86233 + .file = "net/core/pktgen.c",
86234 + .param3 = 1,
86235 +};
86236 +struct size_overflow_hash _001024_hash = {
86237 + .next = NULL,
86238 + .name = "pg_read",
86239 + .file = "drivers/block/paride/pg.c",
86240 + .param3 = 1,
86241 +};
86242 +struct size_overflow_hash _001025_hash = {
86243 + .next = NULL,
86244 + .name = "pg_write",
86245 + .file = "drivers/block/paride/pg.c",
86246 + .param3 = 1,
86247 +};
86248 +struct size_overflow_hash _001026_hash = {
86249 + .next = NULL,
86250 + .name = "picolcd_debug_eeprom_read",
86251 + .file = "drivers/hid/hid-picolcd.c",
86252 + .param3 = 1,
86253 +};
86254 +struct size_overflow_hash _001027_hash = {
86255 + .next = NULL,
86256 + .name = "pkt_add",
86257 + .file = "drivers/usb/serial/garmin_gps.c",
86258 + .param3 = 1,
86259 +};
86260 +struct size_overflow_hash _001028_hash = {
86261 + .next = NULL,
86262 + .name = "pktgen_if_write",
86263 + .file = "net/core/pktgen.c",
86264 + .param3 = 1,
86265 +};
86266 +struct size_overflow_hash _001029_hash = {
86267 + .next = NULL,
86268 + .name = "platform_list_read_file",
86269 + .file = "sound/soc/soc-core.c",
86270 + .param3 = 1,
86271 +};
86272 +struct size_overflow_hash _001030_hash = {
86273 + .next = NULL,
86274 + .name = "pm8001_store_update_fw",
86275 + .file = "drivers/scsi/pm8001/pm8001_ctl.c",
86276 + .param4 = 1,
86277 +};
86278 +struct size_overflow_hash _001031_hash = {
86279 + .next = NULL,
86280 + .name = "port_show_regs",
86281 + .file = "drivers/tty/serial/mfd.c",
86282 + .param3 = 1,
86283 +};
86284 +struct size_overflow_hash _001032_hash = {
86285 + .next = NULL,
86286 + .name = "ppp_cp_parse_cr",
86287 + .file = "drivers/net/wan/hdlc_ppp.c",
86288 + .param4 = 1,
86289 +};
86290 +struct size_overflow_hash _001033_hash = {
86291 + .next = NULL,
86292 + .name = "ppp_write",
86293 + .file = "drivers/net/ppp/ppp_generic.c",
86294 + .param3 = 1,
86295 +};
86296 +struct size_overflow_hash _001034_hash = {
86297 + .next = NULL,
86298 + .name = "pp_read",
86299 + .file = "drivers/char/ppdev.c",
86300 + .param3 = 1,
86301 +};
86302 +struct size_overflow_hash _001035_hash = {
86303 + .next = NULL,
86304 + .name = "pp_write",
86305 + .file = "drivers/char/ppdev.c",
86306 + .param3 = 1,
86307 +};
86308 +struct size_overflow_hash _001036_hash = {
86309 + .next = NULL,
86310 + .name = "printer_read",
86311 + .file = "drivers/usb/gadget/printer.c",
86312 + .param3 = 1,
86313 +};
86314 +struct size_overflow_hash _001037_hash = {
86315 + .next = NULL,
86316 + .name = "printer_req_alloc",
86317 + .file = "drivers/usb/gadget/printer.c",
86318 + .param2 = 1,
86319 +};
86320 +struct size_overflow_hash _001038_hash = {
86321 + .next = NULL,
86322 + .name = "printer_write",
86323 + .file = "drivers/usb/gadget/printer.c",
86324 + .param3 = 1,
86325 +};
86326 +struct size_overflow_hash _001039_hash = {
86327 + .next = NULL,
86328 + .name = "prism2_set_genericelement",
86329 + .file = "drivers/net/wireless/hostap/hostap_ioctl.c",
86330 + .param3 = 1,
86331 +};
86332 +struct size_overflow_hash _001040_hash = {
86333 + .next = NULL,
86334 + .name = "proc_read",
86335 + .file = "drivers/net/wireless/airo.c",
86336 + .param3 = 1,
86337 +};
86338 +struct size_overflow_hash _001041_hash = {
86339 + .next = NULL,
86340 + .name = "proc_scsi_devinfo_write",
86341 + .file = "drivers/scsi/scsi_devinfo.c",
86342 + .param3 = 1,
86343 +};
86344 +struct size_overflow_hash _001042_hash = {
86345 + .next = NULL,
86346 + .name = "proc_scsi_write",
86347 + .file = "drivers/scsi/scsi_proc.c",
86348 + .param3 = 1,
86349 +};
86350 +struct size_overflow_hash _001043_hash = {
86351 + .next = NULL,
86352 + .name = "proc_scsi_write_proc",
86353 + .file = "drivers/scsi/scsi_proc.c",
86354 + .param3 = 1,
86355 +};
86356 +struct size_overflow_hash _001044_hash = {
86357 + .next = NULL,
86358 + .name = "proc_write",
86359 + .file = "drivers/net/wireless/airo.c",
86360 + .param3 = 1,
86361 +};
86362 +struct size_overflow_hash _001045_hash = {
86363 + .next = NULL,
86364 + .name = "provide_user_output",
86365 + .file = "fs/ubifs/debug.c",
86366 + .param3 = 1,
86367 +};
86368 +struct size_overflow_hash _001046_hash = {
86369 + .next = NULL,
86370 + .name = "ps_pspoll_max_apturn_read",
86371 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86372 + .param3 = 1,
86373 +};
86374 +struct size_overflow_hash _001047_hash = {
86375 + .next = NULL,
86376 + .name = "ps_pspoll_timeouts_read",
86377 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86378 + .param3 = 1,
86379 +};
86380 +struct size_overflow_hash _001048_hash = {
86381 + .next = NULL,
86382 + .name = "ps_pspoll_utilization_read",
86383 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86384 + .param3 = 1,
86385 +};
86386 +struct size_overflow_hash _001049_hash = {
86387 + .next = NULL,
86388 + .name = "ps_upsd_max_apturn_read",
86389 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86390 + .param3 = 1,
86391 +};
86392 +struct size_overflow_hash _001050_hash = {
86393 + .next = NULL,
86394 + .name = "ps_upsd_max_sptime_read",
86395 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86396 + .param3 = 1,
86397 +};
86398 +struct size_overflow_hash _001051_hash = {
86399 + .next = NULL,
86400 + .name = "ps_upsd_timeouts_read",
86401 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86402 + .param3 = 1,
86403 +};
86404 +struct size_overflow_hash _001052_hash = {
86405 + .next = NULL,
86406 + .name = "ps_upsd_utilization_read",
86407 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86408 + .param3 = 1,
86409 +};
86410 +struct size_overflow_hash _001053_hash = {
86411 + .next = NULL,
86412 + .name = "pti_char_write",
86413 + .file = "drivers/misc/pti.c",
86414 + .param3 = 1,
86415 +};
86416 +struct size_overflow_hash _001054_hash = {
86417 + .next = NULL,
86418 + .name = "pt_read",
86419 + .file = "drivers/block/paride/pt.c",
86420 + .param3 = 1,
86421 +};
86422 +struct size_overflow_hash _001055_hash = {
86423 + .next = NULL,
86424 + .name = "pt_write",
86425 + .file = "drivers/block/paride/pt.c",
86426 + .param3 = 1,
86427 +};
86428 +struct size_overflow_hash _001056_hash = {
86429 + .next = NULL,
86430 + .name = "pvr2_ioread_read",
86431 + .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
86432 + .param3 = 1,
86433 +};
86434 +struct size_overflow_hash _001057_hash = {
86435 + .next = NULL,
86436 + .name = "pvr2_ioread_set_sync_key",
86437 + .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
86438 + .param3 = 1,
86439 +};
86440 +struct size_overflow_hash _001058_hash = {
86441 + .next = NULL,
86442 + .name = "pvr2_stream_buffer_count",
86443 + .file = "drivers/media/video/pvrusb2/pvrusb2-io.c",
86444 + .param2 = 1,
86445 +};
86446 +struct size_overflow_hash _001059_hash = {
86447 + .next = NULL,
86448 + .name = "pwr_disable_ps_read",
86449 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86450 + .param3 = 1,
86451 +};
86452 +struct size_overflow_hash _001060_hash = {
86453 + .next = NULL,
86454 + .name = "pwr_elp_enter_read",
86455 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86456 + .param3 = 1,
86457 +};
86458 +struct size_overflow_hash _001061_hash = {
86459 + .next = NULL,
86460 + .name = "pwr_enable_ps_read",
86461 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86462 + .param3 = 1,
86463 +};
86464 +struct size_overflow_hash _001062_hash = {
86465 + .next = NULL,
86466 + .name = "pwr_fix_tsf_ps_read",
86467 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86468 + .param3 = 1,
86469 +};
86470 +struct size_overflow_hash _001063_hash = {
86471 + .next = NULL,
86472 + .name = "pwr_missing_bcns_read",
86473 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86474 + .param3 = 1,
86475 +};
86476 +struct size_overflow_hash _001064_hash = {
86477 + .next = NULL,
86478 + .name = "pwr_power_save_off_read",
86479 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86480 + .param3 = 1,
86481 +};
86482 +struct size_overflow_hash _001065_hash = {
86483 + .next = NULL,
86484 + .name = "pwr_ps_enter_read",
86485 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86486 + .param3 = 1,
86487 +};
86488 +struct size_overflow_hash _001066_hash = {
86489 + .next = NULL,
86490 + .name = "pwr_rcvd_awake_beacons_read",
86491 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86492 + .param3 = 1,
86493 +};
86494 +struct size_overflow_hash _001067_hash = {
86495 + .next = NULL,
86496 + .name = "pwr_rcvd_beacons_read",
86497 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86498 + .param3 = 1,
86499 +};
86500 +struct size_overflow_hash _001068_hash = {
86501 + .next = NULL,
86502 + .name = "pwr_tx_without_ps_read",
86503 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86504 + .param3 = 1,
86505 +};
86506 +struct size_overflow_hash _001069_hash = {
86507 + .next = NULL,
86508 + .name = "pwr_tx_with_ps_read",
86509 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86510 + .param3 = 1,
86511 +};
86512 +struct size_overflow_hash _001070_hash = {
86513 + .next = NULL,
86514 + .name = "pwr_wake_on_host_read",
86515 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86516 + .param3 = 1,
86517 +};
86518 +struct size_overflow_hash _001071_hash = {
86519 + .next = NULL,
86520 + .name = "pwr_wake_on_timer_exp_read",
86521 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86522 + .param3 = 1,
86523 +};
86524 +struct size_overflow_hash _001072_hash = {
86525 + .next = NULL,
86526 + .name = "qc_capture",
86527 + .file = "drivers/media/video/c-qcam.c",
86528 + .param3 = 1,
86529 +};
86530 +struct size_overflow_hash _001073_hash = {
86531 + .next = NULL,
86532 + .name = "qla2x00_get_ctx_bsg_sp",
86533 + .file = "drivers/scsi/qla2xxx/qla_bsg.c",
86534 + .param3 = 1,
86535 +};
86536 +struct size_overflow_hash _001074_hash = {
86537 + .next = NULL,
86538 + .name = "qla2x00_get_ctx_sp",
86539 + .file = "drivers/scsi/qla2xxx/qla_init.c",
86540 + .param3 = 1,
86541 +};
86542 +struct size_overflow_hash _001075_hash = {
86543 + .next = NULL,
86544 + .name = "qlcnic_alloc_msix_entries",
86545 + .file = "drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c",
86546 + .param2 = 1,
86547 +};
86548 +struct size_overflow_hash _001076_hash = {
86549 + .next = NULL,
86550 + .name = "queues_read",
86551 + .file = "net/mac80211/debugfs.c",
86552 + .param3 = 1,
86553 +};
86554 +struct size_overflow_hash _001077_hash = {
86555 + .next = NULL,
86556 + .name = "r3964_write",
86557 + .file = "drivers/tty/n_r3964.c",
86558 + .param4 = 1,
86559 +};
86560 +struct size_overflow_hash _001078_hash = {
86561 + .next = NULL,
86562 + .name = "raw_setsockopt",
86563 + .file = "net/can/raw.c",
86564 + .param5 = 1,
86565 +};
86566 +struct size_overflow_hash _001079_hash = {
86567 + .next = NULL,
86568 + .name = "ray_cs_essid_proc_write",
86569 + .file = "drivers/net/wireless/ray_cs.c",
86570 + .param3 = 1,
86571 +};
86572 +struct size_overflow_hash _001080_hash = {
86573 + .next = NULL,
86574 + .name = "rbd_snap_add",
86575 + .file = "drivers/block/rbd.c",
86576 + .param4 = 1,
86577 +};
86578 +struct size_overflow_hash _001081_hash = {
86579 + .next = NULL,
86580 + .name = "rcname_read",
86581 + .file = "net/mac80211/rate.c",
86582 + .param3 = 1,
86583 +};
86584 +struct size_overflow_hash _001082_hash = {
86585 + .next = NULL,
86586 + .name = "rds_message_alloc",
86587 + .file = "net/rds/message.c",
86588 + .param1 = 1,
86589 +};
86590 +struct size_overflow_hash _001083_hash = {
86591 + .next = NULL,
86592 + .name = "rds_page_copy_user",
86593 + .file = "net/rds/page.c",
86594 + .param4 = 1,
86595 +};
86596 +struct size_overflow_hash _001084_hash = {
86597 + .next = NULL,
86598 + .name = "read",
86599 + .file = "drivers/pci/hotplug/cpqphp_sysfs.c",
86600 + .param3 = 1,
86601 +};
86602 +struct size_overflow_hash _001085_hash = {
86603 + .next = NULL,
86604 + .name = "read_4k_modal_eeprom",
86605 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86606 + .param3 = 1,
86607 +};
86608 +struct size_overflow_hash _001086_hash = {
86609 + .next = NULL,
86610 + .name = "read_9287_modal_eeprom",
86611 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86612 + .param3 = 1,
86613 +};
86614 +struct size_overflow_hash _001087_hash = {
86615 + .next = NULL,
86616 + .name = "read_buf",
86617 + .file = "fs/nfsd/nfs4xdr.c",
86618 + .param2 = 1,
86619 +};
86620 +struct size_overflow_hash _001088_hash = {
86621 + .next = NULL,
86622 + .name = "read_cis_cache",
86623 + .file = "drivers/pcmcia/cistpl.c",
86624 + .param4 = 1,
86625 +};
86626 +struct size_overflow_hash _001089_hash = {
86627 + .next = NULL,
86628 + .name = "read_def_modal_eeprom",
86629 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86630 + .param3 = 1,
86631 +};
86632 +struct size_overflow_hash _001090_hash = {
86633 + .next = NULL,
86634 + .name = "read_file_ani",
86635 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86636 + .param3 = 1,
86637 +};
86638 +struct size_overflow_hash _001091_hash = {
86639 + .next = NULL,
86640 + .name = "read_file_antenna",
86641 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86642 + .param3 = 1,
86643 +};
86644 +struct size_overflow_hash _001092_hash = {
86645 + .next = NULL,
86646 + .name = "read_file_base_eeprom",
86647 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86648 + .param3 = 1,
86649 +};
86650 +struct size_overflow_hash _001093_hash = {
86651 + .next = NULL,
86652 + .name = "read_file_base_eeprom",
86653 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86654 + .param3 = 1,
86655 +};
86656 +struct size_overflow_hash _001094_hash = {
86657 + .next = NULL,
86658 + .name = "read_file_beacon",
86659 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86660 + .param3 = 1,
86661 +};
86662 +struct size_overflow_hash _001095_hash = {
86663 + .next = NULL,
86664 + .name = "read_file_credit_dist_stats",
86665 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
86666 + .param3 = 1,
86667 +};
86668 +struct size_overflow_hash _001096_hash = {
86669 + .next = NULL,
86670 + .name = "read_file_debug",
86671 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86672 + .param3 = 1,
86673 +};
86674 +struct size_overflow_hash _001097_hash = {
86675 + .next = NULL,
86676 + .name = "read_file_debug",
86677 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86678 + .param3 = 1,
86679 +};
86680 +struct size_overflow_hash _001098_hash = {
86681 + .next = NULL,
86682 + .name = "read_file_debug",
86683 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86684 + .param3 = 1,
86685 +};
86686 +struct size_overflow_hash _001099_hash = {
86687 + .next = NULL,
86688 + .name = "read_file_disable_ani",
86689 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86690 + .param3 = 1,
86691 +};
86692 +struct size_overflow_hash _001100_hash = {
86693 + .next = NULL,
86694 + .name = "read_file_dma",
86695 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86696 + .param3 = 1,
86697 +};
86698 +struct size_overflow_hash _001101_hash = {
86699 + .next = NULL,
86700 + .name = "read_file_dump_nfcal",
86701 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86702 + .param3 = 1,
86703 +};
86704 +struct size_overflow_hash _001102_hash = {
86705 + .next = NULL,
86706 + .name = "read_file_frameerrors",
86707 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86708 + .param3 = 1,
86709 +};
86710 +struct size_overflow_hash _001103_hash = {
86711 + .next = NULL,
86712 + .name = "read_file_interrupt",
86713 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86714 + .param3 = 1,
86715 +};
86716 +struct size_overflow_hash _001104_hash = {
86717 + .next = NULL,
86718 + .name = "read_file_misc",
86719 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86720 + .param3 = 1,
86721 +};
86722 +struct size_overflow_hash _001105_hash = {
86723 + .next = NULL,
86724 + .name = "read_file_misc",
86725 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86726 + .param3 = 1,
86727 +};
86728 +struct size_overflow_hash _001106_hash = {
86729 + .next = NULL,
86730 + .name = "read_file_modal_eeprom",
86731 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86732 + .param3 = 1,
86733 +};
86734 +struct size_overflow_hash _001107_hash = {
86735 + .next = NULL,
86736 + .name = "read_file_queue",
86737 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86738 + .param3 = 1,
86739 +};
86740 +struct size_overflow_hash _001108_hash = {
86741 + .next = NULL,
86742 + .name = "read_file_queue",
86743 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86744 + .param3 = 1,
86745 +};
86746 +struct size_overflow_hash _001109_hash = {
86747 + .next = NULL,
86748 + .name = "read_file_rcstat",
86749 + .file = "drivers/net/wireless/ath/ath9k/rc.c",
86750 + .param3 = 1,
86751 +};
86752 +struct size_overflow_hash _001110_hash = {
86753 + .next = NULL,
86754 + .name = "read_file_recv",
86755 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86756 + .param3 = 1,
86757 +};
86758 +struct size_overflow_hash _001111_hash = {
86759 + .next = NULL,
86760 + .name = "read_file_recv",
86761 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86762 + .param3 = 1,
86763 +};
86764 +struct size_overflow_hash _001112_hash = {
86765 + .next = NULL,
86766 + .name = "read_file_regidx",
86767 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86768 + .param3 = 1,
86769 +};
86770 +struct size_overflow_hash _001113_hash = {
86771 + .next = &_001103_hash,
86772 + .name = "read_file_regval",
86773 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86774 + .param3 = 1,
86775 +};
86776 +struct size_overflow_hash _001114_hash = {
86777 + .next = NULL,
86778 + .name = "read_file_rx_chainmask",
86779 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86780 + .param3 = 1,
86781 +};
86782 +struct size_overflow_hash _001115_hash = {
86783 + .next = NULL,
86784 + .name = "read_file_slot",
86785 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86786 + .param3 = 1,
86787 +};
86788 +struct size_overflow_hash _001116_hash = {
86789 + .next = NULL,
86790 + .name = "read_file_stations",
86791 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86792 + .param3 = 1,
86793 +};
86794 +struct size_overflow_hash _001117_hash = {
86795 + .next = NULL,
86796 + .name = "read_file_tgt_int_stats",
86797 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86798 + .param3 = 1,
86799 +};
86800 +struct size_overflow_hash _001118_hash = {
86801 + .next = NULL,
86802 + .name = "read_file_tgt_rx_stats",
86803 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86804 + .param3 = 1,
86805 +};
86806 +struct size_overflow_hash _001119_hash = {
86807 + .next = NULL,
86808 + .name = "read_file_tgt_stats",
86809 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
86810 + .param3 = 1,
86811 +};
86812 +struct size_overflow_hash _001120_hash = {
86813 + .next = NULL,
86814 + .name = "read_file_tgt_tx_stats",
86815 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86816 + .param3 = 1,
86817 +};
86818 +struct size_overflow_hash _001121_hash = {
86819 + .next = NULL,
86820 + .name = "read_file_tx_chainmask",
86821 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86822 + .param3 = 1,
86823 +};
86824 +struct size_overflow_hash _001122_hash = {
86825 + .next = NULL,
86826 + .name = "read_file_war_stats",
86827 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
86828 + .param3 = 1,
86829 +};
86830 +struct size_overflow_hash _001123_hash = {
86831 + .next = NULL,
86832 + .name = "read_file_wiphy",
86833 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86834 + .param3 = 1,
86835 +};
86836 +struct size_overflow_hash _001124_hash = {
86837 + .next = NULL,
86838 + .name = "read_file_xmit",
86839 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86840 + .param3 = 1,
86841 +};
86842 +struct size_overflow_hash _001125_hash = {
86843 + .next = NULL,
86844 + .name = "read_file_xmit",
86845 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86846 + .param3 = 1,
86847 +};
86848 +struct size_overflow_hash _001126_hash = {
86849 + .next = NULL,
86850 + .name = "read_flush",
86851 + .file = "net/sunrpc/cache.c",
86852 + .param3 = 1,
86853 +};
86854 +struct size_overflow_hash _001127_hash = {
86855 + .next = NULL,
86856 + .name = "realloc_buffer",
86857 + .file = "drivers/scsi/device_handler/scsi_dh_alua.c",
86858 + .param2 = 1,
86859 +};
86860 +struct size_overflow_hash _001128_hash = {
86861 + .next = NULL,
86862 + .name = "receive_DataRequest",
86863 + .file = "drivers/block/drbd/drbd_receiver.c",
86864 + .param3 = 1,
86865 +};
86866 +struct size_overflow_hash _001129_hash = {
86867 + .next = NULL,
86868 + .name = "recent_mt_proc_write",
86869 + .file = "net/netfilter/xt_recent.c",
86870 + .param3 = 1,
86871 +};
86872 +struct size_overflow_hash _001130_hash = {
86873 + .next = NULL,
86874 + .name = "redrat3_transmit_ir",
86875 + .file = "drivers/media/rc/redrat3.c",
86876 + .param3 = 1,
86877 +};
86878 +struct size_overflow_hash _001131_hash = {
86879 + .next = NULL,
86880 + .name = "reg_w_buf",
86881 + .file = "drivers/media/video/gspca/t613.c",
86882 + .param3 = 1,
86883 +};
86884 +struct size_overflow_hash _001132_hash = {
86885 + .next = NULL,
86886 + .name = "reg_w_ixbuf",
86887 + .file = "drivers/media/video/gspca/t613.c",
86888 + .param4 = 1,
86889 +};
86890 +struct size_overflow_hash _001133_hash = {
86891 + .next = NULL,
86892 + .name = "reiserfs_allocate_list_bitmaps",
86893 + .file = "include/linux/reiserfs_fs.h",
86894 + .param3 = 1,
86895 +};
86896 +struct size_overflow_hash _001134_hash = {
86897 + .next = NULL,
86898 + .name = "reiserfs_resize",
86899 + .file = "include/linux/reiserfs_fs_sb.h",
86900 + .param2 = 1,
86901 +};
86902 +struct size_overflow_hash _001135_hash = {
86903 + .next = NULL,
86904 + .name = "remote_settings_file_write",
86905 + .file = "drivers/misc/ibmasm/ibmasmfs.c",
86906 + .param3 = 1,
86907 +};
86908 +struct size_overflow_hash _001136_hash = {
86909 + .next = NULL,
86910 + .name = "_req_append_segment",
86911 + .file = "drivers/scsi/osd/osd_initiator.c",
86912 + .param2 = 1,
86913 +};
86914 +struct size_overflow_hash _001137_hash = {
86915 + .next = NULL,
86916 + .name = "retry_count_read",
86917 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86918 + .param3 = 1,
86919 +};
86920 +struct size_overflow_hash _001138_hash = {
86921 + .next = NULL,
86922 + .name = "revalidate",
86923 + .file = "drivers/block/aoe/aoechr.c",
86924 + .param2 = 1,
86925 +};
86926 +struct size_overflow_hash _001139_hash = {
86927 + .next = NULL,
86928 + .name = "rfcomm_sock_setsockopt",
86929 + .file = "net/bluetooth/rfcomm/sock.c",
86930 + .param5 = 1,
86931 +};
86932 +struct size_overflow_hash _001140_hash = {
86933 + .next = NULL,
86934 + .name = "rfkill_fop_read",
86935 + .file = "net/rfkill/core.c",
86936 + .param3 = 1,
86937 +};
86938 +struct size_overflow_hash _001141_hash = {
86939 + .next = NULL,
86940 + .name = "rndis_add_response",
86941 + .file = "drivers/usb/gadget/rndis.c",
86942 + .param2 = 1,
86943 +};
86944 +struct size_overflow_hash _001142_hash = {
86945 + .next = NULL,
86946 + .name = "rng_dev_read",
86947 + .file = "drivers/char/hw_random/core.c",
86948 + .param3 = 1,
86949 +};
86950 +struct size_overflow_hash _001143_hash = {
86951 + .next = NULL,
86952 + .name = "roccat_common_receive",
86953 + .file = "drivers/hid/hid-roccat-common.c",
86954 + .param4 = 1,
86955 +};
86956 +struct size_overflow_hash _001144_hash = {
86957 + .next = NULL,
86958 + .name = "roccat_common_send",
86959 + .file = "drivers/hid/hid-roccat-common.c",
86960 + .param4 = 1,
86961 +};
86962 +struct size_overflow_hash _001145_hash = {
86963 + .next = NULL,
86964 + .name = "roccat_read",
86965 + .file = "drivers/hid/hid-roccat.c",
86966 + .param3 = 1,
86967 +};
86968 +struct size_overflow_hash _001146_hash = {
86969 + .next = NULL,
86970 + .name = "rpc_malloc",
86971 + .file = "include/linux/sunrpc/sched.h",
86972 + .param2 = 1,
86973 +};
86974 +struct size_overflow_hash _001147_hash = {
86975 + .next = NULL,
86976 + .name = "rs_sta_dbgfs_rate_scale_data_read",
86977 + .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
86978 + .param3 = 1,
86979 +};
86980 +struct size_overflow_hash _001148_hash = {
86981 + .next = NULL,
86982 + .name = "rs_sta_dbgfs_scale_table_read",
86983 + .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
86984 + .param3 = 1,
86985 +};
86986 +struct size_overflow_hash _001149_hash = {
86987 + .next = NULL,
86988 + .name = "rs_sta_dbgfs_stats_table_read",
86989 + .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
86990 + .param3 = 1,
86991 +};
86992 +struct size_overflow_hash _001150_hash = {
86993 + .next = NULL,
86994 + .name = "rt2x00debug_write_bbp",
86995 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
86996 + .param3 = 1,
86997 +};
86998 +struct size_overflow_hash _001151_hash = {
86999 + .next = NULL,
87000 + .name = "rt2x00debug_write_csr",
87001 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
87002 + .param3 = 1,
87003 +};
87004 +struct size_overflow_hash _001152_hash = {
87005 + .next = &_000808_hash,
87006 + .name = "rt2x00debug_write_eeprom",
87007 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
87008 + .param3 = 1,
87009 +};
87010 +struct size_overflow_hash _001153_hash = {
87011 + .next = NULL,
87012 + .name = "rt2x00debug_write_rf",
87013 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
87014 + .param3 = 1,
87015 +};
87016 +struct size_overflow_hash _001154_hash = {
87017 + .next = NULL,
87018 + .name = "rts51x_read_mem",
87019 + .file = "drivers/usb/storage/realtek_cr.c",
87020 + .param4 = 1,
87021 +};
87022 +struct size_overflow_hash _001155_hash = {
87023 + .next = NULL,
87024 + .name = "rts51x_write_mem",
87025 + .file = "drivers/usb/storage/realtek_cr.c",
87026 + .param4 = 1,
87027 +};
87028 +struct size_overflow_hash _001156_hash = {
87029 + .next = NULL,
87030 + .name = "rts_threshold_read",
87031 + .file = "net/wireless/debugfs.c",
87032 + .param3 = 1,
87033 +};
87034 +struct size_overflow_hash _001157_hash = {
87035 + .next = NULL,
87036 + .name = "rx_dropped_read",
87037 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87038 + .param3 = 1,
87039 +};
87040 +struct size_overflow_hash _001158_hash = {
87041 + .next = NULL,
87042 + .name = "rx_fcs_err_read",
87043 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87044 + .param3 = 1,
87045 +};
87046 +struct size_overflow_hash _001159_hash = {
87047 + .next = NULL,
87048 + .name = "rx_hdr_overflow_read",
87049 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87050 + .param3 = 1,
87051 +};
87052 +struct size_overflow_hash _001160_hash = {
87053 + .next = NULL,
87054 + .name = "rx_hw_stuck_read",
87055 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87056 + .param3 = 1,
87057 +};
87058 +struct size_overflow_hash _001161_hash = {
87059 + .next = NULL,
87060 + .name = "rx_out_of_mem_read",
87061 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87062 + .param3 = 1,
87063 +};
87064 +struct size_overflow_hash _001162_hash = {
87065 + .next = NULL,
87066 + .name = "rx_path_reset_read",
87067 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87068 + .param3 = 1,
87069 +};
87070 +struct size_overflow_hash _001163_hash = {
87071 + .next = NULL,
87072 + .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read",
87073 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87074 + .param3 = 1,
87075 +};
87076 +struct size_overflow_hash _001164_hash = {
87077 + .next = NULL,
87078 + .name = "rxpipe_descr_host_int_trig_rx_data_read",
87079 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87080 + .param3 = 1,
87081 +};
87082 +struct size_overflow_hash _001165_hash = {
87083 + .next = NULL,
87084 + .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read",
87085 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87086 + .param3 = 1,
87087 +};
87088 +struct size_overflow_hash _001166_hash = {
87089 + .next = NULL,
87090 + .name = "rxpipe_rx_prep_beacon_drop_read",
87091 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87092 + .param3 = 1,
87093 +};
87094 +struct size_overflow_hash _001167_hash = {
87095 + .next = NULL,
87096 + .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read",
87097 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87098 + .param3 = 1,
87099 +};
87100 +struct size_overflow_hash _001168_hash = {
87101 + .next = NULL,
87102 + .name = "rx_reset_counter_read",
87103 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87104 + .param3 = 1,
87105 +};
87106 +struct size_overflow_hash _001169_hash = {
87107 + .next = NULL,
87108 + .name = "rx_xfr_hint_trig_read",
87109 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87110 + .param3 = 1,
87111 +};
87112 +struct size_overflow_hash _001170_hash = {
87113 + .next = NULL,
87114 + .name = "saa7164_buffer_alloc_user",
87115 + .file = "drivers/media/video/saa7164/saa7164-buffer.c",
87116 + .param2 = 1,
87117 +};
87118 +struct size_overflow_hash _001171_hash = {
87119 + .next = NULL,
87120 + .name = "scsi_execute",
87121 + .file = "include/scsi/scsi_device.h",
87122 + .param5 = 1,
87123 +};
87124 +struct size_overflow_hash _001172_hash = {
87125 + .next = NULL,
87126 + .name = "scsi_tgt_copy_sense",
87127 + .file = "drivers/scsi/scsi_tgt_lib.c",
87128 + .param3 = 1,
87129 +};
87130 +struct size_overflow_hash _001173_hash = {
87131 + .next = NULL,
87132 + .name = "sctp_auth_create_key",
87133 + .file = "net/sctp/auth.c",
87134 + .param1 = 1,
87135 +};
87136 +struct size_overflow_hash _001174_hash = {
87137 + .next = NULL,
87138 + .name = "sctp_make_abort_user",
87139 + .file = "include/net/sctp/sm.h",
87140 + .param3 = 1,
87141 +};
87142 +struct size_overflow_hash _001175_hash = {
87143 + .next = NULL,
87144 + .name = "sctpprobe_read",
87145 + .file = "net/sctp/probe.c",
87146 + .param3 = 1,
87147 +};
87148 +struct size_overflow_hash _001176_hash = {
87149 + .next = NULL,
87150 + .name = "sctp_setsockopt_active_key",
87151 + .file = "net/sctp/socket.c",
87152 + .param3 = 1,
87153 +};
87154 +struct size_overflow_hash _001177_hash = {
87155 + .next = NULL,
87156 + .name = "sctp_setsockopt_adaptation_layer",
87157 + .file = "net/sctp/socket.c",
87158 + .param3 = 1,
87159 +};
87160 +struct size_overflow_hash _001178_hash = {
87161 + .next = NULL,
87162 + .name = "sctp_setsockopt_associnfo",
87163 + .file = "net/sctp/socket.c",
87164 + .param3 = 1,
87165 +};
87166 +struct size_overflow_hash _001179_hash = {
87167 + .next = NULL,
87168 + .name = "sctp_setsockopt_auth_chunk",
87169 + .file = "net/sctp/socket.c",
87170 + .param3 = 1,
87171 +};
87172 +struct size_overflow_hash _001180_hash = {
87173 + .next = NULL,
87174 + .name = "sctp_setsockopt_auth_key",
87175 + .file = "net/sctp/socket.c",
87176 + .param3 = 1,
87177 +};
87178 +struct size_overflow_hash _001181_hash = {
87179 + .next = NULL,
87180 + .name = "sctp_setsockopt_autoclose",
87181 + .file = "net/sctp/socket.c",
87182 + .param3 = 1,
87183 +};
87184 +struct size_overflow_hash _001182_hash = {
87185 + .next = NULL,
87186 + .name = "sctp_setsockopt_context",
87187 + .file = "net/sctp/socket.c",
87188 + .param3 = 1,
87189 +};
87190 +struct size_overflow_hash _001183_hash = {
87191 + .next = NULL,
87192 + .name = "sctp_setsockopt_default_send_param",
87193 + .file = "net/sctp/socket.c",
87194 + .param3 = 1,
87195 +};
87196 +struct size_overflow_hash _001184_hash = {
87197 + .next = NULL,
87198 + .name = "sctp_setsockopt_delayed_ack",
87199 + .file = "net/sctp/socket.c",
87200 + .param3 = 1,
87201 +};
87202 +struct size_overflow_hash _001185_hash = {
87203 + .next = NULL,
87204 + .name = "sctp_setsockopt_del_key",
87205 + .file = "net/sctp/socket.c",
87206 + .param3 = 1,
87207 +};
87208 +struct size_overflow_hash _001186_hash = {
87209 + .next = NULL,
87210 + .name = "sctp_setsockopt_events",
87211 + .file = "net/sctp/socket.c",
87212 + .param3 = 1,
87213 +};
87214 +struct size_overflow_hash _001187_hash = {
87215 + .next = NULL,
87216 + .name = "sctp_setsockopt_hmac_ident",
87217 + .file = "net/sctp/socket.c",
87218 + .param3 = 1,
87219 +};
87220 +struct size_overflow_hash _001188_hash = {
87221 + .next = NULL,
87222 + .name = "sctp_setsockopt_initmsg",
87223 + .file = "net/sctp/socket.c",
87224 + .param3 = 1,
87225 +};
87226 +struct size_overflow_hash _001189_hash = {
87227 + .next = NULL,
87228 + .name = "sctp_setsockopt_maxburst",
87229 + .file = "net/sctp/socket.c",
87230 + .param3 = 1,
87231 +};
87232 +struct size_overflow_hash _001190_hash = {
87233 + .next = NULL,
87234 + .name = "sctp_setsockopt_maxseg",
87235 + .file = "net/sctp/socket.c",
87236 + .param3 = 1,
87237 +};
87238 +struct size_overflow_hash _001191_hash = {
87239 + .next = NULL,
87240 + .name = "sctp_setsockopt_peer_addr_params",
87241 + .file = "net/sctp/socket.c",
87242 + .param3 = 1,
87243 +};
87244 +struct size_overflow_hash _001192_hash = {
87245 + .next = NULL,
87246 + .name = "sctp_setsockopt_peer_primary_addr",
87247 + .file = "net/sctp/socket.c",
87248 + .param3 = 1,
87249 +};
87250 +struct size_overflow_hash _001193_hash = {
87251 + .next = NULL,
87252 + .name = "sctp_setsockopt_rtoinfo",
87253 + .file = "net/sctp/socket.c",
87254 + .param3 = 1,
87255 +};
87256 +struct size_overflow_hash _001194_hash = {
87257 + .next = NULL,
87258 + .name = "sctp_tsnmap_init",
87259 + .file = "include/net/sctp/tsnmap.h",
87260 + .param2 = 1,
87261 +};
87262 +struct size_overflow_hash _001195_hash = {
87263 + .next = NULL,
87264 + .name = "send_control_msg",
87265 + .file = "drivers/media/video/zr364xx.c",
87266 + .param6 = 1,
87267 +};
87268 +struct size_overflow_hash _001196_hash = {
87269 + .next = NULL,
87270 + .name = "set_aoe_iflist",
87271 + .file = "drivers/block/aoe/aoenet.c",
87272 + .param2 = 1,
87273 +};
87274 +struct size_overflow_hash _001197_hash = {
87275 + .next = NULL,
87276 + .name = "set_registers",
87277 + .file = "drivers/net/usb/pegasus.c",
87278 + .param3 = 1,
87279 +};
87280 +struct size_overflow_hash _001198_hash = {
87281 + .next = NULL,
87282 + .name = "setsockopt",
87283 + .file = "net/caif/caif_socket.c",
87284 + .param5 = 1,
87285 +};
87286 +struct size_overflow_hash _001199_hash = {
87287 + .next = NULL,
87288 + .name = "setup_req",
87289 + .file = "drivers/usb/gadget/inode.c",
87290 + .param3 = 1,
87291 +};
87292 +struct size_overflow_hash _001200_hash = {
87293 + .next = NULL,
87294 + .name = "sfq_alloc",
87295 + .file = "net/sched/sch_sfq.c",
87296 + .param1 = 1,
87297 +};
87298 +struct size_overflow_hash _001201_hash = {
87299 + .next = NULL,
87300 + .name = "sgl_map_user_pages",
87301 + .file = "drivers/scsi/st.c",
87302 + .param2 = 1,
87303 +};
87304 +struct size_overflow_hash _001202_hash = {
87305 + .next = NULL,
87306 + .name = "short_retry_limit_read",
87307 + .file = "net/wireless/debugfs.c",
87308 + .param3 = 1,
87309 +};
87310 +struct size_overflow_hash _001203_hash = {
87311 + .next = NULL,
87312 + .name = "sm501_create_subdev",
87313 + .file = "drivers/mfd/sm501.c",
87314 + .param3 = 1,
87315 + .param4 = 1,
87316 +};
87317 +struct size_overflow_hash _001205_hash = {
87318 + .next = NULL,
87319 + .name = "sn9c102_read",
87320 + .file = "drivers/media/video/sn9c102/sn9c102_core.c",
87321 + .param3 = 1,
87322 +};
87323 +struct size_overflow_hash _001206_hash = {
87324 + .next = NULL,
87325 + .name = "snd_ac97_pcm_assign",
87326 + .file = "include/sound/ac97_codec.h",
87327 + .param2 = 1,
87328 +};
87329 +struct size_overflow_hash _001207_hash = {
87330 + .next = NULL,
87331 + .name = "snd_ctl_elem_user_tlv",
87332 + .file = "sound/core/control.c",
87333 + .param3 = 1,
87334 +};
87335 +struct size_overflow_hash _001208_hash = {
87336 + .next = NULL,
87337 + .name = "snd_emu10k1_fx8010_read",
87338 + .file = "sound/pci/emu10k1/emuproc.c",
87339 + .param5 = 1,
87340 +};
87341 +struct size_overflow_hash _001209_hash = {
87342 + .next = NULL,
87343 + .name = "snd_es1938_capture_copy",
87344 + .file = "sound/pci/es1938.c",
87345 + .param5 = 1,
87346 +};
87347 +struct size_overflow_hash _001210_hash = {
87348 + .next = NULL,
87349 + .name = "snd_gus_dram_peek",
87350 + .file = "sound/isa/gus/gus_dram.c",
87351 + .param4 = 1,
87352 +};
87353 +struct size_overflow_hash _001211_hash = {
87354 + .next = NULL,
87355 + .name = "snd_gus_dram_poke",
87356 + .file = "sound/isa/gus/gus_dram.c",
87357 + .param4 = 1,
87358 +};
87359 +struct size_overflow_hash _001212_hash = {
87360 + .next = NULL,
87361 + .name = "snd_hdsp_capture_copy",
87362 + .file = "sound/pci/rme9652/hdsp.c",
87363 + .param5 = 1,
87364 +};
87365 +struct size_overflow_hash _001213_hash = {
87366 + .next = NULL,
87367 + .name = "snd_hdsp_playback_copy",
87368 + .file = "sound/pci/rme9652/hdsp.c",
87369 + .param5 = 1,
87370 +};
87371 +struct size_overflow_hash _001214_hash = {
87372 + .next = NULL,
87373 + .name = "snd_info_entry_write",
87374 + .file = "sound/core/info.c",
87375 + .param3 = 1,
87376 +};
87377 +struct size_overflow_hash _001215_hash = {
87378 + .next = NULL,
87379 + .name = "snd_opl4_mem_proc_read",
87380 + .file = "sound/drivers/opl4/opl4_proc.c",
87381 + .param5 = 1,
87382 +};
87383 +struct size_overflow_hash _001216_hash = {
87384 + .next = NULL,
87385 + .name = "snd_opl4_mem_proc_write",
87386 + .file = "sound/drivers/opl4/opl4_proc.c",
87387 + .param5 = 1,
87388 +};
87389 +struct size_overflow_hash _001217_hash = {
87390 + .next = NULL,
87391 + .name = "snd_pcm_aio_read",
87392 + .file = "sound/core/pcm_native.c",
87393 + .param3 = 1,
87394 +};
87395 +struct size_overflow_hash _001218_hash = {
87396 + .next = NULL,
87397 + .name = "snd_pcm_aio_write",
87398 + .file = "sound/core/pcm_native.c",
87399 + .param3 = 1,
87400 +};
87401 +struct size_overflow_hash _001219_hash = {
87402 + .next = NULL,
87403 + .name = "snd_pcm_alloc_vmalloc_buffer",
87404 + .file = "drivers/media/video/cx231xx/cx231xx-audio.c",
87405 + .param2 = 1,
87406 +};
87407 +struct size_overflow_hash _001220_hash = {
87408 + .next = NULL,
87409 + .name = "snd_pcm_alloc_vmalloc_buffer",
87410 + .file = "drivers/media/video/cx18/cx18-alsa-pcm.c",
87411 + .param2 = 1,
87412 +};
87413 +struct size_overflow_hash _001221_hash = {
87414 + .next = NULL,
87415 + .name = "snd_pcm_alloc_vmalloc_buffer",
87416 + .file = "drivers/media/video/em28xx/em28xx-audio.c",
87417 + .param2 = 1,
87418 +};
87419 +struct size_overflow_hash _001222_hash = {
87420 + .next = NULL,
87421 + .name = "_snd_pcm_lib_alloc_vmalloc_buffer",
87422 + .file = "include/sound/pcm.h",
87423 + .param2 = 1,
87424 +};
87425 +struct size_overflow_hash _001223_hash = {
87426 + .next = NULL,
87427 + .name = "snd_pcm_oss_read1",
87428 + .file = "sound/core/oss/pcm_oss.c",
87429 + .param3 = 1,
87430 +};
87431 +struct size_overflow_hash _001224_hash = {
87432 + .next = NULL,
87433 + .name = "snd_pcm_oss_write1",
87434 + .file = "sound/core/oss/pcm_oss.c",
87435 + .param3 = 1,
87436 +};
87437 +struct size_overflow_hash _001225_hash = {
87438 + .next = NULL,
87439 + .name = "snd_pcm_oss_write2",
87440 + .file = "sound/core/oss/pcm_oss.c",
87441 + .param3 = 1,
87442 +};
87443 +struct size_overflow_hash _001226_hash = {
87444 + .next = NULL,
87445 + .name = "snd_pcm_plugin_build",
87446 + .file = "sound/core/oss/pcm_plugin.c",
87447 + .param5 = 1,
87448 +};
87449 +struct size_overflow_hash _001227_hash = {
87450 + .next = NULL,
87451 + .name = "snd_rme9652_capture_copy",
87452 + .file = "sound/pci/rme9652/rme9652.c",
87453 + .param5 = 1,
87454 +};
87455 +struct size_overflow_hash _001228_hash = {
87456 + .next = NULL,
87457 + .name = "snd_rme9652_playback_copy",
87458 + .file = "sound/pci/rme9652/rme9652.c",
87459 + .param5 = 1,
87460 +};
87461 +struct size_overflow_hash _001229_hash = {
87462 + .next = NULL,
87463 + .name = "snd_soc_hw_bulk_write_raw",
87464 + .file = "sound/soc/soc-io.c",
87465 + .param4 = 1,
87466 +};
87467 +struct size_overflow_hash _001230_hash = {
87468 + .next = NULL,
87469 + .name = "snd_usb_ctl_msg",
87470 + .file = "sound/usb/helper.c",
87471 + .param8 = 1,
87472 +};
87473 +struct size_overflow_hash _001231_hash = {
87474 + .next = NULL,
87475 + .name = "_sp2d_alloc",
87476 + .file = "fs/exofs/ore_raid.c",
87477 + .param1 = 1,
87478 +};
87479 +struct size_overflow_hash _001232_hash = {
87480 + .next = NULL,
87481 + .name = "spidev_message",
87482 + .file = "drivers/spi/spidev.c",
87483 + .param3 = 1,
87484 +};
87485 +struct size_overflow_hash _001233_hash = {
87486 + .next = NULL,
87487 + .name = "spidev_write",
87488 + .file = "drivers/spi/spidev.c",
87489 + .param3 = 1,
87490 +};
87491 +struct size_overflow_hash _001234_hash = {
87492 + .next = NULL,
87493 + .name = "spi_show_regs",
87494 + .file = "drivers/spi/spi-dw.c",
87495 + .param3 = 1,
87496 +};
87497 +struct size_overflow_hash _001235_hash = {
87498 + .next = NULL,
87499 + .name = "srp_alloc_iu",
87500 + .file = "drivers/infiniband/ulp/srp/ib_srp.c",
87501 + .param2 = 1,
87502 +};
87503 +struct size_overflow_hash _001236_hash = {
87504 + .next = NULL,
87505 + .name = "srp_iu_pool_alloc",
87506 + .file = "drivers/scsi/libsrp.c",
87507 + .param2 = 1,
87508 +};
87509 +struct size_overflow_hash _001237_hash = {
87510 + .next = NULL,
87511 + .name = "srp_ring_alloc",
87512 + .file = "drivers/scsi/libsrp.c",
87513 + .param2 = 1,
87514 +};
87515 +struct size_overflow_hash _001238_hash = {
87516 + .next = NULL,
87517 + .name = "sta_agg_status_read",
87518 + .file = "net/mac80211/debugfs_sta.c",
87519 + .param3 = 1,
87520 +};
87521 +struct size_overflow_hash _001239_hash = {
87522 + .next = NULL,
87523 + .name = "sta_agg_status_write",
87524 + .file = "net/mac80211/debugfs_sta.c",
87525 + .param3 = 1,
87526 +};
87527 +struct size_overflow_hash _001240_hash = {
87528 + .next = NULL,
87529 + .name = "sta_connected_time_read",
87530 + .file = "net/mac80211/debugfs_sta.c",
87531 + .param3 = 1,
87532 +};
87533 +struct size_overflow_hash _001241_hash = {
87534 + .next = NULL,
87535 + .name = "sta_flags_read",
87536 + .file = "net/mac80211/debugfs_sta.c",
87537 + .param3 = 1,
87538 +};
87539 +struct size_overflow_hash _001242_hash = {
87540 + .next = NULL,
87541 + .name = "sta_ht_capa_read",
87542 + .file = "net/mac80211/debugfs_sta.c",
87543 + .param3 = 1,
87544 +};
87545 +struct size_overflow_hash _001243_hash = {
87546 + .next = NULL,
87547 + .name = "sta_last_seq_ctrl_read",
87548 + .file = "net/mac80211/debugfs_sta.c",
87549 + .param3 = 1,
87550 +};
87551 +struct size_overflow_hash _001244_hash = {
87552 + .next = NULL,
87553 + .name = "sta_num_ps_buf_frames_read",
87554 + .file = "net/mac80211/debugfs_sta.c",
87555 + .param3 = 1,
87556 +};
87557 +struct size_overflow_hash _001245_hash = {
87558 + .next = NULL,
87559 + .name = "stk_prepare_sio_buffers",
87560 + .file = "drivers/media/video/stk-webcam.c",
87561 + .param2 = 1,
87562 +};
87563 +struct size_overflow_hash _001246_hash = {
87564 + .next = NULL,
87565 + .name = "store_iwmct_log_level",
87566 + .file = "drivers/misc/iwmc3200top/log.c",
87567 + .param4 = 1,
87568 +};
87569 +struct size_overflow_hash _001247_hash = {
87570 + .next = NULL,
87571 + .name = "store_iwmct_log_level_fw",
87572 + .file = "drivers/misc/iwmc3200top/log.c",
87573 + .param4 = 1,
87574 +};
87575 +struct size_overflow_hash _001248_hash = {
87576 + .next = NULL,
87577 + .name = "str_to_user",
87578 + .file = "drivers/input/evdev.c",
87579 + .param2 = 1,
87580 +};
87581 +struct size_overflow_hash _001249_hash = {
87582 + .next = NULL,
87583 + .name = "svc_pool_map_alloc_arrays",
87584 + .file = "net/sunrpc/svc.c",
87585 + .param2 = 1,
87586 +};
87587 +struct size_overflow_hash _001250_hash = {
87588 + .next = NULL,
87589 + .name = "svc_setsockopt",
87590 + .file = "net/atm/svc.c",
87591 + .param5 = 1,
87592 +};
87593 +struct size_overflow_hash _001251_hash = {
87594 + .next = NULL,
87595 + .name = "t4_alloc_mem",
87596 + .file = "drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c",
87597 + .param1 = 1,
87598 +};
87599 +struct size_overflow_hash _001252_hash = {
87600 + .next = NULL,
87601 + .name = "tda10048_writeregbulk",
87602 + .file = "drivers/media/dvb/frontends/tda10048.c",
87603 + .param4 = 1,
87604 +};
87605 +struct size_overflow_hash _001253_hash = {
87606 + .next = NULL,
87607 + .name = "__team_options_register",
87608 + .file = "drivers/net/team/team.c",
87609 + .param3 = 1,
87610 +};
87611 +struct size_overflow_hash _001254_hash = {
87612 + .next = NULL,
87613 + .name = "tifm_alloc_adapter",
87614 + .file = "include/linux/tifm.h",
87615 + .param1 = 1,
87616 +};
87617 +struct size_overflow_hash _001255_hash = {
87618 + .next = NULL,
87619 + .name = "tipc_subseq_alloc",
87620 + .file = "net/tipc/name_table.c",
87621 + .param1 = 1,
87622 +};
87623 +struct size_overflow_hash _001256_hash = {
87624 + .next = NULL,
87625 + .name = "tm6000_read_write_usb",
87626 + .file = "drivers/media/video/tm6000/tm6000-core.c",
87627 + .param7 = 1,
87628 +};
87629 +struct size_overflow_hash _001257_hash = {
87630 + .next = NULL,
87631 + .name = "tower_write",
87632 + .file = "drivers/usb/misc/legousbtower.c",
87633 + .param3 = 1,
87634 +};
87635 +struct size_overflow_hash _001258_hash = {
87636 + .next = NULL,
87637 + .name = "trusted_instantiate",
87638 + .file = "security/keys/trusted.c",
87639 + .param3 = 1,
87640 +};
87641 +struct size_overflow_hash _001259_hash = {
87642 + .next = NULL,
87643 + .name = "trusted_update",
87644 + .file = "security/keys/trusted.c",
87645 + .param3 = 1,
87646 +};
87647 +struct size_overflow_hash _001260_hash = {
87648 + .next = NULL,
87649 + .name = "TSS_rawhmac",
87650 + .file = "security/keys/trusted.c",
87651 + .param3 = 1,
87652 +};
87653 +struct size_overflow_hash _001261_hash = {
87654 + .next = NULL,
87655 + .name = "tx_internal_desc_overflow_read",
87656 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87657 + .param3 = 1,
87658 +};
87659 +struct size_overflow_hash _001262_hash = {
87660 + .next = NULL,
87661 + .name = "tx_queue_len_read",
87662 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87663 + .param3 = 1,
87664 +};
87665 +struct size_overflow_hash _001263_hash = {
87666 + .next = NULL,
87667 + .name = "tx_queue_len_read",
87668 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
87669 + .param3 = 1,
87670 +};
87671 +struct size_overflow_hash _001264_hash = {
87672 + .next = NULL,
87673 + .name = "tx_queue_status_read",
87674 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87675 + .param3 = 1,
87676 +};
87677 +struct size_overflow_hash _001265_hash = {
87678 + .next = NULL,
87679 + .name = "udf_alloc_i_data",
87680 + .file = "fs/udf/inode.c",
87681 + .param2 = 1,
87682 +};
87683 +struct size_overflow_hash _001266_hash = {
87684 + .next = NULL,
87685 + .name = "udf_sb_alloc_partition_maps",
87686 + .file = "fs/udf/super.c",
87687 + .param2 = 1,
87688 +};
87689 +struct size_overflow_hash _001267_hash = {
87690 + .next = NULL,
87691 + .name = "uea_idma_write",
87692 + .file = "drivers/usb/atm/ueagle-atm.c",
87693 + .param3 = 1,
87694 +};
87695 +struct size_overflow_hash _001268_hash = {
87696 + .next = NULL,
87697 + .name = "uea_request",
87698 + .file = "drivers/usb/atm/ueagle-atm.c",
87699 + .param4 = 1,
87700 +};
87701 +struct size_overflow_hash _001269_hash = {
87702 + .next = NULL,
87703 + .name = "uea_send_modem_cmd",
87704 + .file = "drivers/usb/atm/ueagle-atm.c",
87705 + .param3 = 1,
87706 +};
87707 +struct size_overflow_hash _001270_hash = {
87708 + .next = NULL,
87709 + .name = "uhci_debug_read",
87710 + .file = "drivers/usb/host/uhci-debug.c",
87711 + .param3 = 1,
87712 +};
87713 +struct size_overflow_hash _001271_hash = {
87714 + .next = NULL,
87715 + .name = "uio_read",
87716 + .file = "drivers/uio/uio.c",
87717 + .param3 = 1,
87718 +};
87719 +struct size_overflow_hash _001272_hash = {
87720 + .next = NULL,
87721 + .name = "uio_write",
87722 + .file = "drivers/uio/uio.c",
87723 + .param3 = 1,
87724 +};
87725 +struct size_overflow_hash _001273_hash = {
87726 + .next = NULL,
87727 + .name = "um_idi_write",
87728 + .file = "drivers/isdn/hardware/eicon/divasi.c",
87729 + .param3 = 1,
87730 +};
87731 +struct size_overflow_hash _001274_hash = {
87732 + .next = NULL,
87733 + .name = "unlink_queued",
87734 + .file = "drivers/usb/misc/usbtest.c",
87735 + .param3 = 1,
87736 + .param4 = 1,
87737 +};
87738 +struct size_overflow_hash _001275_hash = {
87739 + .next = NULL,
87740 + .name = "us122l_ctl_msg",
87741 + .file = "sound/usb/usx2y/us122l.c",
87742 + .param8 = 1,
87743 +};
87744 +struct size_overflow_hash _001276_hash = {
87745 + .next = NULL,
87746 + .name = "usbdev_read",
87747 + .file = "drivers/usb/core/devio.c",
87748 + .param3 = 1,
87749 +};
87750 +struct size_overflow_hash _001277_hash = {
87751 + .next = NULL,
87752 + .name = "usblp_read",
87753 + .file = "drivers/usb/class/usblp.c",
87754 + .param3 = 1,
87755 +};
87756 +struct size_overflow_hash _001278_hash = {
87757 + .next = NULL,
87758 + .name = "usblp_write",
87759 + .file = "drivers/usb/class/usblp.c",
87760 + .param3 = 1,
87761 +};
87762 +struct size_overflow_hash _001279_hash = {
87763 + .next = NULL,
87764 + .name = "usbtest_alloc_urb",
87765 + .file = "drivers/usb/misc/usbtest.c",
87766 + .param3 = 1,
87767 + .param5 = 1,
87768 +};
87769 +struct size_overflow_hash _001281_hash = {
87770 + .next = NULL,
87771 + .name = "usbtmc_read",
87772 + .file = "drivers/usb/class/usbtmc.c",
87773 + .param3 = 1,
87774 +};
87775 +struct size_overflow_hash _001282_hash = {
87776 + .next = NULL,
87777 + .name = "usbtmc_write",
87778 + .file = "drivers/usb/class/usbtmc.c",
87779 + .param3 = 1,
87780 +};
87781 +struct size_overflow_hash _001283_hash = {
87782 + .next = NULL,
87783 + .name = "usbvision_v4l2_read",
87784 + .file = "drivers/media/video/usbvision/usbvision-video.c",
87785 + .param3 = 1,
87786 +};
87787 +struct size_overflow_hash _001284_hash = {
87788 + .next = NULL,
87789 + .name = "uvc_alloc_buffers",
87790 + .file = "drivers/usb/gadget/uvc_queue.c",
87791 + .param2 = 1,
87792 +};
87793 +struct size_overflow_hash _001285_hash = {
87794 + .next = NULL,
87795 + .name = "uvc_alloc_entity",
87796 + .file = "drivers/media/video/uvc/uvc_driver.c",
87797 + .param3 = 1,
87798 +};
87799 +struct size_overflow_hash _001286_hash = {
87800 + .next = NULL,
87801 + .name = "uvc_debugfs_stats_read",
87802 + .file = "drivers/media/video/uvc/uvc_debugfs.c",
87803 + .param3 = 1,
87804 +};
87805 +struct size_overflow_hash _001287_hash = {
87806 + .next = NULL,
87807 + .name = "uvc_simplify_fraction",
87808 + .file = "drivers/media/video/uvc/uvc_driver.c",
87809 + .param3 = 1,
87810 +};
87811 +struct size_overflow_hash _001288_hash = {
87812 + .next = NULL,
87813 + .name = "uwb_rc_neh_grok_event",
87814 + .file = "drivers/uwb/neh.c",
87815 + .param3 = 1,
87816 +};
87817 +struct size_overflow_hash _001289_hash = {
87818 + .next = NULL,
87819 + .name = "v4l2_event_subscribe",
87820 + .file = "include/media/v4l2-event.h",
87821 + .param3 = 1,
87822 +};
87823 +struct size_overflow_hash _001290_hash = {
87824 + .next = NULL,
87825 + .name = "v4l_stk_read",
87826 + .file = "drivers/media/video/stk-webcam.c",
87827 + .param3 = 1,
87828 +};
87829 +struct size_overflow_hash _001291_hash = {
87830 + .next = NULL,
87831 + .name = "__vb2_perform_fileio",
87832 + .file = "drivers/media/video/videobuf2-core.c",
87833 + .param3 = 1,
87834 +};
87835 +struct size_overflow_hash _001292_hash = {
87836 + .next = NULL,
87837 + .name = "vdma_mem_alloc",
87838 + .file = "arch/x86/include/asm/floppy.h",
87839 + .param1 = 1,
87840 +};
87841 +struct size_overflow_hash _001293_hash = {
87842 + .next = NULL,
87843 + .name = "vfd_write",
87844 + .file = "drivers/media/rc/imon.c",
87845 + .param3 = 1,
87846 +};
87847 +struct size_overflow_hash _001294_hash = {
87848 + .next = NULL,
87849 + .name = "vhci_get_user",
87850 + .file = "drivers/bluetooth/hci_vhci.c",
87851 + .param3 = 1,
87852 +};
87853 +struct size_overflow_hash _001295_hash = {
87854 + .next = NULL,
87855 + .name = "__vhost_add_used_n",
87856 + .file = "drivers/vhost/vhost.c",
87857 + .param3 = 1,
87858 +};
87859 +struct size_overflow_hash _001296_hash = {
87860 + .next = NULL,
87861 + .name = "__videobuf_alloc_vb",
87862 + .file = "drivers/media/video/videobuf-dma-sg.c",
87863 + .param1 = 1,
87864 +};
87865 +struct size_overflow_hash _001297_hash = {
87866 + .next = NULL,
87867 + .name = "__videobuf_alloc_vb",
87868 + .file = "drivers/media/video/videobuf-dma-contig.c",
87869 + .param1 = 1,
87870 +};
87871 +struct size_overflow_hash _001298_hash = {
87872 + .next = NULL,
87873 + .name = "__videobuf_alloc_vb",
87874 + .file = "drivers/media/video/videobuf-vmalloc.c",
87875 + .param1 = 1,
87876 +};
87877 +struct size_overflow_hash _001299_hash = {
87878 + .next = NULL,
87879 + .name = "__videobuf_copy_to_user",
87880 + .file = "drivers/media/video/videobuf-core.c",
87881 + .param4 = 1,
87882 +};
87883 +struct size_overflow_hash _001300_hash = {
87884 + .next = NULL,
87885 + .name = "video_proc_write",
87886 + .file = "drivers/platform/x86/toshiba_acpi.c",
87887 + .param3 = 1,
87888 +};
87889 +struct size_overflow_hash _001301_hash = {
87890 + .next = NULL,
87891 + .name = "vifs_state_read",
87892 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
87893 + .param3 = 1,
87894 +};
87895 +struct size_overflow_hash _001302_hash = {
87896 + .next = NULL,
87897 + .name = "vlsi_alloc_ring",
87898 + .file = "drivers/net/irda/vlsi_ir.c",
87899 + .param3 = 1,
87900 + .param4 = 1,
87901 +};
87902 +struct size_overflow_hash _001304_hash = {
87903 + .next = NULL,
87904 + .name = "vol_cdev_direct_write",
87905 + .file = "drivers/mtd/ubi/cdev.c",
87906 + .param3 = 1,
87907 +};
87908 +struct size_overflow_hash _001305_hash = {
87909 + .next = NULL,
87910 + .name = "vol_cdev_read",
87911 + .file = "drivers/mtd/ubi/cdev.c",
87912 + .param3 = 1,
87913 +};
87914 +struct size_overflow_hash _001306_hash = {
87915 + .next = NULL,
87916 + .name = "vring_add_indirect",
87917 + .file = "drivers/virtio/virtio_ring.c",
87918 + .param3 = 1,
87919 + .param4 = 1,
87920 +};
87921 +struct size_overflow_hash _001308_hash = {
87922 + .next = NULL,
87923 + .name = "vring_new_virtqueue",
87924 + .file = "include/linux/virtio_ring.h",
87925 + .param1 = 1,
87926 +};
87927 +struct size_overflow_hash _001309_hash = {
87928 + .next = NULL,
87929 + .name = "__vxge_hw_channel_allocate",
87930 + .file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
87931 + .param3 = 1,
87932 +};
87933 +struct size_overflow_hash _001310_hash = {
87934 + .next = NULL,
87935 + .name = "vxge_os_dma_malloc",
87936 + .file = "drivers/net/ethernet/neterion/vxge/vxge-config.h",
87937 + .param2 = 1,
87938 +};
87939 +struct size_overflow_hash _001311_hash = {
87940 + .next = NULL,
87941 + .name = "vxge_os_dma_malloc_async",
87942 + .file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
87943 + .param3 = 1,
87944 +};
87945 +struct size_overflow_hash _001312_hash = {
87946 + .next = NULL,
87947 + .name = "w9966_v4l_read",
87948 + .file = "drivers/media/video/w9966.c",
87949 + .param3 = 1,
87950 +};
87951 +struct size_overflow_hash _001313_hash = {
87952 + .next = NULL,
87953 + .name = "waiters_read",
87954 + .file = "fs/dlm/debug_fs.c",
87955 + .param3 = 1,
87956 +};
87957 +struct size_overflow_hash _001314_hash = {
87958 + .next = NULL,
87959 + .name = "wa_nep_queue",
87960 + .file = "drivers/usb/wusbcore/wa-nep.c",
87961 + .param2 = 1,
87962 +};
87963 +struct size_overflow_hash _001315_hash = {
87964 + .next = NULL,
87965 + .name = "__wa_xfer_setup_segs",
87966 + .file = "drivers/usb/wusbcore/wa-xfer.c",
87967 + .param2 = 1,
87968 +};
87969 +struct size_overflow_hash _001316_hash = {
87970 + .next = NULL,
87971 + .name = "wdm_read",
87972 + .file = "drivers/usb/class/cdc-wdm.c",
87973 + .param3 = 1,
87974 +};
87975 +struct size_overflow_hash _001317_hash = {
87976 + .next = NULL,
87977 + .name = "wdm_write",
87978 + .file = "drivers/usb/class/cdc-wdm.c",
87979 + .param3 = 1,
87980 +};
87981 +struct size_overflow_hash _001318_hash = {
87982 + .next = NULL,
87983 + .name = "wep_addr_key_count_read",
87984 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87985 + .param3 = 1,
87986 +};
87987 +struct size_overflow_hash _001319_hash = {
87988 + .next = &_000480_hash,
87989 + .name = "wep_decrypt_fail_read",
87990 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87991 + .param3 = 1,
87992 +};
87993 +struct size_overflow_hash _001320_hash = {
87994 + .next = NULL,
87995 + .name = "wep_default_key_count_read",
87996 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87997 + .param3 = 1,
87998 +};
87999 +struct size_overflow_hash _001321_hash = {
88000 + .next = NULL,
88001 + .name = "wep_interrupt_read",
88002 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88003 + .param3 = 1,
88004 +};
88005 +struct size_overflow_hash _001322_hash = {
88006 + .next = NULL,
88007 + .name = "wep_key_not_found_read",
88008 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88009 + .param3 = 1,
88010 +};
88011 +struct size_overflow_hash _001323_hash = {
88012 + .next = NULL,
88013 + .name = "wep_packets_read",
88014 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88015 + .param3 = 1,
88016 +};
88017 +struct size_overflow_hash _001324_hash = {
88018 + .next = NULL,
88019 + .name = "wiimote_hid_send",
88020 + .file = "drivers/hid/hid-wiimote-core.c",
88021 + .param3 = 1,
88022 +};
88023 +struct size_overflow_hash _001325_hash = {
88024 + .next = NULL,
88025 + .name = "wl1271_format_buffer",
88026 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88027 + .param2 = 1,
88028 +};
88029 +struct size_overflow_hash _001326_hash = {
88030 + .next = NULL,
88031 + .name = "wl1273_fm_fops_write",
88032 + .file = "drivers/media/radio/radio-wl1273.c",
88033 + .param3 = 1,
88034 +};
88035 +struct size_overflow_hash _001327_hash = {
88036 + .next = NULL,
88037 + .name = "wlc_phy_loadsampletable_nphy",
88038 + .file = "drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c",
88039 + .param3 = 1,
88040 +};
88041 +struct size_overflow_hash _001328_hash = {
88042 + .next = NULL,
88043 + .name = "wpan_phy_alloc",
88044 + .file = "include/net/wpan-phy.h",
88045 + .param1 = 1,
88046 +};
88047 +struct size_overflow_hash _001329_hash = {
88048 + .next = NULL,
88049 + .name = "write_flush",
88050 + .file = "net/sunrpc/cache.c",
88051 + .param3 = 1,
88052 +};
88053 +struct size_overflow_hash _001330_hash = {
88054 + .next = NULL,
88055 + .name = "write_rio",
88056 + .file = "drivers/usb/misc/rio500.c",
88057 + .param3 = 1,
88058 +};
88059 +struct size_overflow_hash _001331_hash = {
88060 + .next = NULL,
88061 + .name = "wusb_ccm_mac",
88062 + .file = "drivers/usb/wusbcore/crypto.c",
88063 + .param7 = 1,
88064 +};
88065 +struct size_overflow_hash _001332_hash = {
88066 + .next = NULL,
88067 + .name = "xfs_attrmulti_attr_set",
88068 + .file = "fs/xfs/xfs_ioctl.c",
88069 + .param4 = 1,
88070 +};
88071 +struct size_overflow_hash _001333_hash = {
88072 + .next = NULL,
88073 + .name = "xfs_handle_to_dentry",
88074 + .file = "fs/xfs/xfs_ioctl.c",
88075 + .param3 = 1,
88076 +};
88077 +struct size_overflow_hash _001334_hash = {
88078 + .next = NULL,
88079 + .name = "xhci_alloc_stream_info",
88080 + .file = "drivers/usb/host/xhci-mem.c",
88081 + .param3 = 1,
88082 +};
88083 +struct size_overflow_hash _001335_hash = {
88084 + .next = NULL,
88085 + .name = "xprt_alloc",
88086 + .file = "include/linux/sunrpc/xprt.h",
88087 + .param2 = 1,
88088 +};
88089 +struct size_overflow_hash _001336_hash = {
88090 + .next = NULL,
88091 + .name = "xprt_rdma_allocate",
88092 + .file = "net/sunrpc/xprtrdma/transport.c",
88093 + .param2 = 1,
88094 +};
88095 +struct size_overflow_hash _001337_hash = {
88096 + .next = NULL,
88097 + .name = "xt_alloc_table_info",
88098 + .file = "include/linux/netfilter/x_tables.h",
88099 + .param1 = 1,
88100 +};
88101 +struct size_overflow_hash _001338_hash = {
88102 + .next = NULL,
88103 + .name = "zd_usb_iowrite16v_async",
88104 + .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
88105 + .param3 = 1,
88106 +};
88107 +struct size_overflow_hash _001339_hash = {
88108 + .next = NULL,
88109 + .name = "zd_usb_read_fw",
88110 + .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
88111 + .param4 = 1,
88112 +};
88113 +struct size_overflow_hash _001340_hash = {
88114 + .next = NULL,
88115 + .name = "zoran_write",
88116 + .file = "drivers/media/video/zoran/zoran_procfs.c",
88117 + .param3 = 1,
88118 +};
88119 +struct size_overflow_hash _001341_hash = {
88120 + .next = NULL,
88121 + .name = "ad7879_spi_multi_read",
88122 + .file = "drivers/input/touchscreen/ad7879-spi.c",
88123 + .param3 = 1,
88124 +};
88125 +struct size_overflow_hash _001342_hash = {
88126 + .next = NULL,
88127 + .name = "aes_decrypt_fail_read",
88128 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88129 + .param3 = 1,
88130 +};
88131 +struct size_overflow_hash _001343_hash = {
88132 + .next = NULL,
88133 + .name = "aes_decrypt_interrupt_read",
88134 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88135 + .param3 = 1,
88136 +};
88137 +struct size_overflow_hash _001344_hash = {
88138 + .next = NULL,
88139 + .name = "aes_decrypt_packets_read",
88140 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88141 + .param3 = 1,
88142 +};
88143 +struct size_overflow_hash _001345_hash = {
88144 + .next = NULL,
88145 + .name = "aes_encrypt_fail_read",
88146 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88147 + .param3 = 1,
88148 +};
88149 +struct size_overflow_hash _001346_hash = {
88150 + .next = NULL,
88151 + .name = "aes_encrypt_interrupt_read",
88152 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88153 + .param3 = 1,
88154 +};
88155 +struct size_overflow_hash _001347_hash = {
88156 + .next = NULL,
88157 + .name = "aes_encrypt_packets_read",
88158 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88159 + .param3 = 1,
88160 +};
88161 +struct size_overflow_hash _001348_hash = {
88162 + .next = NULL,
88163 + .name = "afs_cell_create",
88164 + .file = "fs/afs/cell.c",
88165 + .param2 = 1,
88166 +};
88167 +struct size_overflow_hash _001349_hash = {
88168 + .next = NULL,
88169 + .name = "agp_create_user_memory",
88170 + .file = "drivers/char/agp/generic.c",
88171 + .param1 = 1,
88172 +};
88173 +struct size_overflow_hash _001350_hash = {
88174 + .next = NULL,
88175 + .name = "alg_setsockopt",
88176 + .file = "crypto/af_alg.c",
88177 + .param5 = 1,
88178 +};
88179 +struct size_overflow_hash _001351_hash = {
88180 + .next = NULL,
88181 + .name = "alloc_targets",
88182 + .file = "drivers/md/dm-table.c",
88183 + .param2 = 1,
88184 +};
88185 +struct size_overflow_hash _001352_hash = {
88186 + .next = NULL,
88187 + .name = "aoechr_write",
88188 + .file = "drivers/block/aoe/aoechr.c",
88189 + .param3 = 1,
88190 +};
88191 +struct size_overflow_hash _001353_hash = {
88192 + .next = NULL,
88193 + .name = "ath6kl_cfg80211_connect_event",
88194 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
88195 + .param7 = 1,
88196 + .param9 = 1,
88197 + .param8 = 1,
88198 +};
88199 +struct size_overflow_hash _001356_hash = {
88200 + .next = NULL,
88201 + .name = "ath6kl_mgmt_tx",
88202 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
88203 + .param9 = 1,
88204 +};
88205 +struct size_overflow_hash _001357_hash = {
88206 + .next = NULL,
88207 + .name = "atomic_read_file",
88208 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
88209 + .param3 = 1,
88210 +};
88211 +struct size_overflow_hash _001358_hash = {
88212 + .next = NULL,
88213 + .name = "beacon_interval_read",
88214 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88215 + .param3 = 1,
88216 +};
88217 +struct size_overflow_hash _001359_hash = {
88218 + .next = NULL,
88219 + .name = "bm_entry_write",
88220 + .file = "fs/binfmt_misc.c",
88221 + .param3 = 1,
88222 +};
88223 +struct size_overflow_hash _001360_hash = {
88224 + .next = NULL,
88225 + .name = "bm_init",
88226 + .file = "lib/ts_bm.c",
88227 + .param2 = 1,
88228 +};
88229 +struct size_overflow_hash _001361_hash = {
88230 + .next = NULL,
88231 + .name = "bm_register_write",
88232 + .file = "fs/binfmt_misc.c",
88233 + .param3 = 1,
88234 +};
88235 +struct size_overflow_hash _001362_hash = {
88236 + .next = NULL,
88237 + .name = "bm_status_write",
88238 + .file = "fs/binfmt_misc.c",
88239 + .param3 = 1,
88240 +};
88241 +struct size_overflow_hash _001363_hash = {
88242 + .next = NULL,
88243 + .name = "brn_proc_write",
88244 + .file = "drivers/platform/x86/asus_acpi.c",
88245 + .param3 = 1,
88246 +};
88247 +struct size_overflow_hash _001364_hash = {
88248 + .next = NULL,
88249 + .name = "btrfs_map_block",
88250 + .file = "fs/btrfs/volumes.c",
88251 + .param3 = 1,
88252 +};
88253 +struct size_overflow_hash _001365_hash = {
88254 + .next = NULL,
88255 + .name = "cache_downcall",
88256 + .file = "net/sunrpc/cache.c",
88257 + .param3 = 1,
88258 +};
88259 +struct size_overflow_hash _001366_hash = {
88260 + .next = NULL,
88261 + .name = "cache_slow_downcall",
88262 + .file = "net/sunrpc/cache.c",
88263 + .param2 = 1,
88264 +};
88265 +struct size_overflow_hash _001367_hash = {
88266 + .next = NULL,
88267 + .name = "ceph_dns_resolve_name",
88268 + .file = "net/ceph/messenger.c",
88269 + .param1 = 1,
88270 +};
88271 +struct size_overflow_hash _001368_hash = {
88272 + .next = NULL,
88273 + .name = "cfg80211_roamed",
88274 + .file = "include/net/cfg80211.h",
88275 + .param5 = 1,
88276 + .param7 = 1,
88277 +};
88278 +struct size_overflow_hash _001370_hash = {
88279 + .next = NULL,
88280 + .name = "cifs_readv_from_socket",
88281 + .file = "fs/cifs/connect.c",
88282 + .param3 = 1,
88283 +};
88284 +struct size_overflow_hash _001371_hash = {
88285 + .next = NULL,
88286 + .name = "configfs_write_file",
88287 + .file = "fs/configfs/file.c",
88288 + .param3 = 1,
88289 +};
88290 +struct size_overflow_hash _001372_hash = {
88291 + .next = &_001370_hash,
88292 + .name = "cpu_type_read",
88293 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88294 + .param3 = 1,
88295 +};
88296 +struct size_overflow_hash _001373_hash = {
88297 + .next = NULL,
88298 + .name = "cx18_copy_mdl_to_user",
88299 + .file = "drivers/media/video/cx18/cx18-fileops.c",
88300 + .param4 = 1,
88301 +};
88302 +struct size_overflow_hash _001374_hash = {
88303 + .next = NULL,
88304 + .name = "cxgbi_ddp_reserve",
88305 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
88306 + .param4 = 1,
88307 +};
88308 +struct size_overflow_hash _001375_hash = {
88309 + .next = NULL,
88310 + .name = "cxgbi_device_portmap_create",
88311 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
88312 + .param3 = 1,
88313 +};
88314 +struct size_overflow_hash _001376_hash = {
88315 + .next = NULL,
88316 + .name = "datablob_hmac_append",
88317 + .file = "security/keys/encrypted-keys/encrypted.c",
88318 + .param3 = 1,
88319 +};
88320 +struct size_overflow_hash _001377_hash = {
88321 + .next = NULL,
88322 + .name = "datablob_hmac_verify",
88323 + .file = "security/keys/encrypted-keys/encrypted.c",
88324 + .param4 = 1,
88325 +};
88326 +struct size_overflow_hash _001378_hash = {
88327 + .next = NULL,
88328 + .name = "dataflash_read_fact_otp",
88329 + .file = "drivers/mtd/devices/mtd_dataflash.c",
88330 + .param3 = 1,
88331 +};
88332 +struct size_overflow_hash _001379_hash = {
88333 + .next = NULL,
88334 + .name = "dataflash_read_user_otp",
88335 + .file = "drivers/mtd/devices/mtd_dataflash.c",
88336 + .param3 = 1,
88337 +};
88338 +struct size_overflow_hash _001380_hash = {
88339 + .next = NULL,
88340 + .name = "depth_read",
88341 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88342 + .param3 = 1,
88343 +};
88344 +struct size_overflow_hash _001381_hash = {
88345 + .next = NULL,
88346 + .name = "depth_write",
88347 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88348 + .param3 = 1,
88349 +};
88350 +struct size_overflow_hash _001382_hash = {
88351 + .next = NULL,
88352 + .name = "dev_irnet_write",
88353 + .file = "net/irda/irnet/irnet_ppp.c",
88354 + .param3 = 1,
88355 +};
88356 +struct size_overflow_hash _001383_hash = {
88357 + .next = NULL,
88358 + .name = "dev_write",
88359 + .file = "sound/oss/msnd_pinnacle.c",
88360 + .param3 = 1,
88361 +};
88362 +struct size_overflow_hash _001384_hash = {
88363 + .next = NULL,
88364 + .name = "dfs_file_read",
88365 + .file = "fs/ubifs/debug.c",
88366 + .param3 = 1,
88367 +};
88368 +struct size_overflow_hash _001385_hash = {
88369 + .next = NULL,
88370 + .name = "dfs_file_write",
88371 + .file = "fs/ubifs/debug.c",
88372 + .param3 = 1,
88373 +};
88374 +struct size_overflow_hash _001386_hash = {
88375 + .next = NULL,
88376 + .name = "dfs_global_file_read",
88377 + .file = "fs/ubifs/debug.c",
88378 + .param3 = 1,
88379 +};
88380 +struct size_overflow_hash _001387_hash = {
88381 + .next = NULL,
88382 + .name = "dfs_global_file_write",
88383 + .file = "fs/ubifs/debug.c",
88384 + .param3 = 1,
88385 +};
88386 +struct size_overflow_hash _001388_hash = {
88387 + .next = NULL,
88388 + .name = "disconnect",
88389 + .file = "net/bluetooth/mgmt.c",
88390 + .param4 = 1,
88391 +};
88392 +struct size_overflow_hash _001389_hash = {
88393 + .next = NULL,
88394 + .name = "disp_proc_write",
88395 + .file = "drivers/platform/x86/asus_acpi.c",
88396 + .param3 = 1,
88397 +};
88398 +struct size_overflow_hash _001390_hash = {
88399 + .next = NULL,
88400 + .name = "dma_rx_errors_read",
88401 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88402 + .param3 = 1,
88403 +};
88404 +struct size_overflow_hash _001391_hash = {
88405 + .next = NULL,
88406 + .name = "dma_rx_requested_read",
88407 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88408 + .param3 = 1,
88409 +};
88410 +struct size_overflow_hash _001392_hash = {
88411 + .next = NULL,
88412 + .name = "dma_tx_errors_read",
88413 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88414 + .param3 = 1,
88415 +};
88416 +struct size_overflow_hash _001393_hash = {
88417 + .next = NULL,
88418 + .name = "dma_tx_requested_read",
88419 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88420 + .param3 = 1,
88421 +};
88422 +struct size_overflow_hash _001394_hash = {
88423 + .next = NULL,
88424 + .name = "dm_exception_table_init",
88425 + .file = "drivers/md/dm-snap.c",
88426 + .param2 = 1,
88427 +};
88428 +struct size_overflow_hash _001395_hash = {
88429 + .next = NULL,
88430 + .name = "do_dccp_setsockopt",
88431 + .file = "net/dccp/proto.c",
88432 + .param5 = 1,
88433 +};
88434 +struct size_overflow_hash _001396_hash = {
88435 + .next = NULL,
88436 + .name = "dtim_interval_read",
88437 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88438 + .param3 = 1,
88439 +};
88440 +struct size_overflow_hash _001397_hash = {
88441 + .next = NULL,
88442 + .name = "dvb_audio_write",
88443 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
88444 + .param3 = 1,
88445 +};
88446 +struct size_overflow_hash _001398_hash = {
88447 + .next = NULL,
88448 + .name = "dvb_demux_do_ioctl",
88449 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
88450 + .param3 = 1,
88451 +};
88452 +struct size_overflow_hash _001399_hash = {
88453 + .next = NULL,
88454 + .name = "dvb_dvr_do_ioctl",
88455 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
88456 + .param3 = 1,
88457 +};
88458 +struct size_overflow_hash _001400_hash = {
88459 + .next = NULL,
88460 + .name = "dvb_video_write",
88461 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
88462 + .param3 = 1,
88463 +};
88464 +struct size_overflow_hash _001401_hash = {
88465 + .next = NULL,
88466 + .name = "ecryptfs_decode_and_decrypt_filename",
88467 + .file = "fs/ecryptfs/crypto.c",
88468 + .param5 = 1,
88469 +};
88470 +struct size_overflow_hash _001402_hash = {
88471 + .next = NULL,
88472 + .name = "ecryptfs_encrypt_and_encode_filename",
88473 + .file = "fs/ecryptfs/crypto.c",
88474 + .param6 = 1,
88475 +};
88476 +struct size_overflow_hash _001403_hash = {
88477 + .next = NULL,
88478 + .name = "enable_read",
88479 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88480 + .param3 = 1,
88481 +};
88482 +struct size_overflow_hash _001404_hash = {
88483 + .next = NULL,
88484 + .name = "enable_write",
88485 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88486 + .param3 = 1,
88487 +};
88488 +struct size_overflow_hash _001405_hash = {
88489 + .next = NULL,
88490 + .name = "event_calibration_read",
88491 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88492 + .param3 = 1,
88493 +};
88494 +struct size_overflow_hash _001406_hash = {
88495 + .next = NULL,
88496 + .name = "event_heart_beat_read",
88497 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88498 + .param3 = 1,
88499 +};
88500 +struct size_overflow_hash _001407_hash = {
88501 + .next = NULL,
88502 + .name = "event_oom_late_read",
88503 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88504 + .param3 = 1,
88505 +};
88506 +struct size_overflow_hash _001408_hash = {
88507 + .next = NULL,
88508 + .name = "event_phy_transmit_error_read",
88509 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88510 + .param3 = 1,
88511 +};
88512 +struct size_overflow_hash _001409_hash = {
88513 + .next = NULL,
88514 + .name = "event_rx_mem_empty_read",
88515 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88516 + .param3 = 1,
88517 +};
88518 +struct size_overflow_hash _001410_hash = {
88519 + .next = NULL,
88520 + .name = "event_rx_mismatch_read",
88521 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88522 + .param3 = 1,
88523 +};
88524 +struct size_overflow_hash _001411_hash = {
88525 + .next = NULL,
88526 + .name = "event_rx_pool_read",
88527 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88528 + .param3 = 1,
88529 +};
88530 +struct size_overflow_hash _001412_hash = {
88531 + .next = NULL,
88532 + .name = "event_tx_stuck_read",
88533 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88534 + .param3 = 1,
88535 +};
88536 +struct size_overflow_hash _001413_hash = {
88537 + .next = NULL,
88538 + .name = "excessive_retries_read",
88539 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88540 + .param3 = 1,
88541 +};
88542 +struct size_overflow_hash _001414_hash = {
88543 + .next = NULL,
88544 + .name = "exofs_read_kern",
88545 + .file = "fs/exofs/super.c",
88546 + .param6 = 1,
88547 +};
88548 +struct size_overflow_hash _001415_hash = {
88549 + .next = NULL,
88550 + .name = "fallback_on_nodma_alloc",
88551 + .file = "drivers/block/floppy.c",
88552 + .param2 = 1,
88553 +};
88554 +struct size_overflow_hash _001416_hash = {
88555 + .next = NULL,
88556 + .name = "__feat_register_sp",
88557 + .file = "net/dccp/feat.c",
88558 + .param6 = 1,
88559 +};
88560 +struct size_overflow_hash _001417_hash = {
88561 + .next = NULL,
88562 + .name = "ffs_ep0_write",
88563 + .file = "drivers/usb/gadget/f_fs.c",
88564 + .param3 = 1,
88565 +};
88566 +struct size_overflow_hash _001418_hash = {
88567 + .next = NULL,
88568 + .name = "ffs_epfile_read",
88569 + .file = "drivers/usb/gadget/f_fs.c",
88570 + .param3 = 1,
88571 +};
88572 +struct size_overflow_hash _001419_hash = {
88573 + .next = NULL,
88574 + .name = "ffs_epfile_write",
88575 + .file = "drivers/usb/gadget/f_fs.c",
88576 + .param3 = 1,
88577 +};
88578 +struct size_overflow_hash _001420_hash = {
88579 + .next = NULL,
88580 + .name = "frequency_read",
88581 + .file = "net/mac80211/debugfs.c",
88582 + .param3 = 1,
88583 +};
88584 +struct size_overflow_hash _001421_hash = {
88585 + .next = NULL,
88586 + .name = "fsm_init",
88587 + .file = "lib/ts_fsm.c",
88588 + .param2 = 1,
88589 +};
88590 +struct size_overflow_hash _001422_hash = {
88591 + .next = NULL,
88592 + .name = "garmin_read_process",
88593 + .file = "drivers/usb/serial/garmin_gps.c",
88594 + .param3 = 1,
88595 +};
88596 +struct size_overflow_hash _001423_hash = {
88597 + .next = NULL,
88598 + .name = "garp_request_join",
88599 + .file = "include/net/garp.h",
88600 + .param4 = 1,
88601 +};
88602 +struct size_overflow_hash _001424_hash = {
88603 + .next = NULL,
88604 + .name = "hcd_alloc_coherent",
88605 + .file = "drivers/usb/core/hcd.c",
88606 + .param5 = 1,
88607 +};
88608 +struct size_overflow_hash _001425_hash = {
88609 + .next = NULL,
88610 + .name = "hci_sock_sendmsg",
88611 + .file = "net/bluetooth/hci_sock.c",
88612 + .param4 = 1,
88613 +};
88614 +struct size_overflow_hash _001426_hash = {
88615 + .next = NULL,
88616 + .name = "__hwahc_op_set_gtk",
88617 + .file = "drivers/usb/host/hwa-hc.c",
88618 + .param4 = 1,
88619 +};
88620 +struct size_overflow_hash _001427_hash = {
88621 + .next = NULL,
88622 + .name = "__hwahc_op_set_ptk",
88623 + .file = "drivers/usb/host/hwa-hc.c",
88624 + .param5 = 1,
88625 +};
88626 +struct size_overflow_hash _001428_hash = {
88627 + .next = NULL,
88628 + .name = "ib_send_cm_drep",
88629 + .file = "include/rdma/ib_cm.h",
88630 + .param3 = 1,
88631 +};
88632 +struct size_overflow_hash _001429_hash = {
88633 + .next = NULL,
88634 + .name = "ib_send_cm_mra",
88635 + .file = "include/rdma/ib_cm.h",
88636 + .param4 = 1,
88637 +};
88638 +struct size_overflow_hash _001430_hash = {
88639 + .next = NULL,
88640 + .name = "ib_send_cm_rtu",
88641 + .file = "include/rdma/ib_cm.h",
88642 + .param3 = 1,
88643 +};
88644 +struct size_overflow_hash _001431_hash = {
88645 + .next = NULL,
88646 + .name = "ieee80211_bss_info_update",
88647 + .file = "net/mac80211/scan.c",
88648 + .param4 = 1,
88649 +};
88650 +struct size_overflow_hash _001432_hash = {
88651 + .next = NULL,
88652 + .name = "ieee80211_if_read_aid",
88653 + .file = "net/mac80211/debugfs_netdev.c",
88654 + .param3 = 1,
88655 +};
88656 +struct size_overflow_hash _001433_hash = {
88657 + .next = NULL,
88658 + .name = "ieee80211_if_read_auto_open_plinks",
88659 + .file = "net/mac80211/debugfs_netdev.c",
88660 + .param3 = 1,
88661 +};
88662 +struct size_overflow_hash _001434_hash = {
88663 + .next = NULL,
88664 + .name = "ieee80211_if_read_ave_beacon",
88665 + .file = "net/mac80211/debugfs_netdev.c",
88666 + .param3 = 1,
88667 +};
88668 +struct size_overflow_hash _001435_hash = {
88669 + .next = NULL,
88670 + .name = "ieee80211_if_read_bssid",
88671 + .file = "net/mac80211/debugfs_netdev.c",
88672 + .param3 = 1,
88673 +};
88674 +struct size_overflow_hash _001436_hash = {
88675 + .next = NULL,
88676 + .name = "ieee80211_if_read_channel_type",
88677 + .file = "net/mac80211/debugfs_netdev.c",
88678 + .param3 = 1,
88679 +};
88680 +struct size_overflow_hash _001437_hash = {
88681 + .next = NULL,
88682 + .name = "ieee80211_if_read_dot11MeshConfirmTimeout",
88683 + .file = "net/mac80211/debugfs_netdev.c",
88684 + .param3 = 1,
88685 +};
88686 +struct size_overflow_hash _001438_hash = {
88687 + .next = NULL,
88688 + .name = "ieee80211_if_read_dot11MeshGateAnnouncementProtocol",
88689 + .file = "net/mac80211/debugfs_netdev.c",
88690 + .param3 = 1,
88691 +};
88692 +struct size_overflow_hash _001439_hash = {
88693 + .next = NULL,
88694 + .name = "ieee80211_if_read_dot11MeshHoldingTimeout",
88695 + .file = "net/mac80211/debugfs_netdev.c",
88696 + .param3 = 1,
88697 +};
88698 +struct size_overflow_hash _001440_hash = {
88699 + .next = NULL,
88700 + .name = "ieee80211_if_read_dot11MeshHWMPactivePathTimeout",
88701 + .file = "net/mac80211/debugfs_netdev.c",
88702 + .param3 = 1,
88703 +};
88704 +struct size_overflow_hash _001441_hash = {
88705 + .next = NULL,
88706 + .name = "ieee80211_if_read_dot11MeshHWMPmaxPREQretries",
88707 + .file = "net/mac80211/debugfs_netdev.c",
88708 + .param3 = 1,
88709 +};
88710 +struct size_overflow_hash _001442_hash = {
88711 + .next = NULL,
88712 + .name = "ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime",
88713 + .file = "net/mac80211/debugfs_netdev.c",
88714 + .param3 = 1,
88715 +};
88716 +struct size_overflow_hash _001443_hash = {
88717 + .next = NULL,
88718 + .name = "ieee80211_if_read_dot11MeshHWMPperrMinInterval",
88719 + .file = "net/mac80211/debugfs_netdev.c",
88720 + .param3 = 1,
88721 +};
88722 +struct size_overflow_hash _001444_hash = {
88723 + .next = NULL,
88724 + .name = "ieee80211_if_read_dot11MeshHWMPpreqMinInterval",
88725 + .file = "net/mac80211/debugfs_netdev.c",
88726 + .param3 = 1,
88727 +};
88728 +struct size_overflow_hash _001445_hash = {
88729 + .next = NULL,
88730 + .name = "ieee80211_if_read_dot11MeshHWMPRannInterval",
88731 + .file = "net/mac80211/debugfs_netdev.c",
88732 + .param3 = 1,
88733 +};
88734 +struct size_overflow_hash _001446_hash = {
88735 + .next = NULL,
88736 + .name = "ieee80211_if_read_dot11MeshHWMPRootMode",
88737 + .file = "net/mac80211/debugfs_netdev.c",
88738 + .param3 = 1,
88739 +};
88740 +struct size_overflow_hash _001447_hash = {
88741 + .next = NULL,
88742 + .name = "ieee80211_if_read_dot11MeshMaxPeerLinks",
88743 + .file = "net/mac80211/debugfs_netdev.c",
88744 + .param3 = 1,
88745 +};
88746 +struct size_overflow_hash _001448_hash = {
88747 + .next = NULL,
88748 + .name = "ieee80211_if_read_dot11MeshMaxRetries",
88749 + .file = "net/mac80211/debugfs_netdev.c",
88750 + .param3 = 1,
88751 +};
88752 +struct size_overflow_hash _001449_hash = {
88753 + .next = NULL,
88754 + .name = "ieee80211_if_read_dot11MeshRetryTimeout",
88755 + .file = "net/mac80211/debugfs_netdev.c",
88756 + .param3 = 1,
88757 +};
88758 +struct size_overflow_hash _001450_hash = {
88759 + .next = NULL,
88760 + .name = "ieee80211_if_read_dot11MeshTTL",
88761 + .file = "net/mac80211/debugfs_netdev.c",
88762 + .param3 = 1,
88763 +};
88764 +struct size_overflow_hash _001451_hash = {
88765 + .next = NULL,
88766 + .name = "ieee80211_if_read_dropped_frames_congestion",
88767 + .file = "net/mac80211/debugfs_netdev.c",
88768 + .param3 = 1,
88769 +};
88770 +struct size_overflow_hash _001452_hash = {
88771 + .next = NULL,
88772 + .name = "ieee80211_if_read_dropped_frames_no_route",
88773 + .file = "net/mac80211/debugfs_netdev.c",
88774 + .param3 = 1,
88775 +};
88776 +struct size_overflow_hash _001453_hash = {
88777 + .next = NULL,
88778 + .name = "ieee80211_if_read_dropped_frames_ttl",
88779 + .file = "net/mac80211/debugfs_netdev.c",
88780 + .param3 = 1,
88781 +};
88782 +struct size_overflow_hash _001454_hash = {
88783 + .next = NULL,
88784 + .name = "ieee80211_if_read_drop_unencrypted",
88785 + .file = "net/mac80211/debugfs_netdev.c",
88786 + .param3 = 1,
88787 +};
88788 +struct size_overflow_hash _001455_hash = {
88789 + .next = NULL,
88790 + .name = "ieee80211_if_read_dtim_count",
88791 + .file = "net/mac80211/debugfs_netdev.c",
88792 + .param3 = 1,
88793 +};
88794 +struct size_overflow_hash _001456_hash = {
88795 + .next = NULL,
88796 + .name = "ieee80211_if_read_element_ttl",
88797 + .file = "net/mac80211/debugfs_netdev.c",
88798 + .param3 = 1,
88799 +};
88800 +struct size_overflow_hash _001457_hash = {
88801 + .next = NULL,
88802 + .name = "ieee80211_if_read_estab_plinks",
88803 + .file = "net/mac80211/debugfs_netdev.c",
88804 + .param3 = 1,
88805 +};
88806 +struct size_overflow_hash _001458_hash = {
88807 + .next = NULL,
88808 + .name = "ieee80211_if_read_flags",
88809 + .file = "net/mac80211/debugfs_netdev.c",
88810 + .param3 = 1,
88811 +};
88812 +struct size_overflow_hash _001459_hash = {
88813 + .next = NULL,
88814 + .name = "ieee80211_if_read_fwded_frames",
88815 + .file = "net/mac80211/debugfs_netdev.c",
88816 + .param3 = 1,
88817 +};
88818 +struct size_overflow_hash _001460_hash = {
88819 + .next = NULL,
88820 + .name = "ieee80211_if_read_fwded_mcast",
88821 + .file = "net/mac80211/debugfs_netdev.c",
88822 + .param3 = 1,
88823 +};
88824 +struct size_overflow_hash _001461_hash = {
88825 + .next = NULL,
88826 + .name = "ieee80211_if_read_fwded_unicast",
88827 + .file = "net/mac80211/debugfs_netdev.c",
88828 + .param3 = 1,
88829 +};
88830 +struct size_overflow_hash _001462_hash = {
88831 + .next = NULL,
88832 + .name = "ieee80211_if_read_last_beacon",
88833 + .file = "net/mac80211/debugfs_netdev.c",
88834 + .param3 = 1,
88835 +};
88836 +struct size_overflow_hash _001463_hash = {
88837 + .next = NULL,
88838 + .name = "ieee80211_if_read_min_discovery_timeout",
88839 + .file = "net/mac80211/debugfs_netdev.c",
88840 + .param3 = 1,
88841 +};
88842 +struct size_overflow_hash _001464_hash = {
88843 + .next = NULL,
88844 + .name = "ieee80211_if_read_num_buffered_multicast",
88845 + .file = "net/mac80211/debugfs_netdev.c",
88846 + .param3 = 1,
88847 +};
88848 +struct size_overflow_hash _001465_hash = {
88849 + .next = NULL,
88850 + .name = "ieee80211_if_read_num_sta_authorized",
88851 + .file = "net/mac80211/debugfs_netdev.c",
88852 + .param3 = 1,
88853 +};
88854 +struct size_overflow_hash _001466_hash = {
88855 + .next = NULL,
88856 + .name = "ieee80211_if_read_num_sta_ps",
88857 + .file = "net/mac80211/debugfs_netdev.c",
88858 + .param3 = 1,
88859 +};
88860 +struct size_overflow_hash _001467_hash = {
88861 + .next = NULL,
88862 + .name = "ieee80211_if_read_path_refresh_time",
88863 + .file = "net/mac80211/debugfs_netdev.c",
88864 + .param3 = 1,
88865 +};
88866 +struct size_overflow_hash _001468_hash = {
88867 + .next = NULL,
88868 + .name = "ieee80211_if_read_peer",
88869 + .file = "net/mac80211/debugfs_netdev.c",
88870 + .param3 = 1,
88871 +};
88872 +struct size_overflow_hash _001469_hash = {
88873 + .next = NULL,
88874 + .name = "ieee80211_if_read_rc_rateidx_mask_2ghz",
88875 + .file = "net/mac80211/debugfs_netdev.c",
88876 + .param3 = 1,
88877 +};
88878 +struct size_overflow_hash _001470_hash = {
88879 + .next = NULL,
88880 + .name = "ieee80211_if_read_rc_rateidx_mask_5ghz",
88881 + .file = "net/mac80211/debugfs_netdev.c",
88882 + .param3 = 1,
88883 +};
88884 +struct size_overflow_hash _001471_hash = {
88885 + .next = NULL,
88886 + .name = "ieee80211_if_read_smps",
88887 + .file = "net/mac80211/debugfs_netdev.c",
88888 + .param3 = 1,
88889 +};
88890 +struct size_overflow_hash _001472_hash = {
88891 + .next = NULL,
88892 + .name = "ieee80211_if_read_state",
88893 + .file = "net/mac80211/debugfs_netdev.c",
88894 + .param3 = 1,
88895 +};
88896 +struct size_overflow_hash _001473_hash = {
88897 + .next = NULL,
88898 + .name = "ieee80211_if_read_tkip_mic_test",
88899 + .file = "net/mac80211/debugfs_netdev.c",
88900 + .param3 = 1,
88901 +};
88902 +struct size_overflow_hash _001474_hash = {
88903 + .next = NULL,
88904 + .name = "ieee80211_if_read_tsf",
88905 + .file = "net/mac80211/debugfs_netdev.c",
88906 + .param3 = 1,
88907 +};
88908 +struct size_overflow_hash _001475_hash = {
88909 + .next = NULL,
88910 + .name = "ieee80211_send_probe_req",
88911 + .file = "net/mac80211/util.c",
88912 + .param6 = 1,
88913 +};
88914 +struct size_overflow_hash _001476_hash = {
88915 + .next = NULL,
88916 + .name = "init_map_ipmac",
88917 + .file = "net/netfilter/ipset/ip_set_bitmap_ipmac.c",
88918 + .param3 = 1,
88919 + .param4 = 1,
88920 +};
88921 +struct size_overflow_hash _001478_hash = {
88922 + .next = NULL,
88923 + .name = "init_tid_tabs",
88924 + .file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
88925 + .param2 = 1,
88926 + .param4 = 1,
88927 + .param3 = 1,
88928 +};
88929 +struct size_overflow_hash _001481_hash = {
88930 + .next = NULL,
88931 + .name = "isr_cmd_cmplt_read",
88932 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88933 + .param3 = 1,
88934 +};
88935 +struct size_overflow_hash _001482_hash = {
88936 + .next = NULL,
88937 + .name = "isr_commands_read",
88938 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88939 + .param3 = 1,
88940 +};
88941 +struct size_overflow_hash _001483_hash = {
88942 + .next = NULL,
88943 + .name = "isr_decrypt_done_read",
88944 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88945 + .param3 = 1,
88946 +};
88947 +struct size_overflow_hash _001484_hash = {
88948 + .next = NULL,
88949 + .name = "isr_dma0_done_read",
88950 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88951 + .param3 = 1,
88952 +};
88953 +struct size_overflow_hash _001485_hash = {
88954 + .next = NULL,
88955 + .name = "isr_dma1_done_read",
88956 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88957 + .param3 = 1,
88958 +};
88959 +struct size_overflow_hash _001486_hash = {
88960 + .next = NULL,
88961 + .name = "isr_fiqs_read",
88962 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88963 + .param3 = 1,
88964 +};
88965 +struct size_overflow_hash _001487_hash = {
88966 + .next = NULL,
88967 + .name = "isr_host_acknowledges_read",
88968 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88969 + .param3 = 1,
88970 +};
88971 +struct size_overflow_hash _001488_hash = {
88972 + .next = &_001393_hash,
88973 + .name = "isr_hw_pm_mode_changes_read",
88974 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88975 + .param3 = 1,
88976 +};
88977 +struct size_overflow_hash _001489_hash = {
88978 + .next = &_001205_hash,
88979 + .name = "isr_irqs_read",
88980 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88981 + .param3 = 1,
88982 +};
88983 +struct size_overflow_hash _001490_hash = {
88984 + .next = NULL,
88985 + .name = "isr_low_rssi_read",
88986 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88987 + .param3 = 1,
88988 +};
88989 +struct size_overflow_hash _001491_hash = {
88990 + .next = NULL,
88991 + .name = "isr_pci_pm_read",
88992 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88993 + .param3 = 1,
88994 +};
88995 +struct size_overflow_hash _001492_hash = {
88996 + .next = NULL,
88997 + .name = "isr_rx_headers_read",
88998 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88999 + .param3 = 1,
89000 +};
89001 +struct size_overflow_hash _001493_hash = {
89002 + .next = NULL,
89003 + .name = "isr_rx_mem_overflow_read",
89004 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89005 + .param3 = 1,
89006 +};
89007 +struct size_overflow_hash _001494_hash = {
89008 + .next = NULL,
89009 + .name = "isr_rx_procs_read",
89010 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89011 + .param3 = 1,
89012 +};
89013 +struct size_overflow_hash _001495_hash = {
89014 + .next = NULL,
89015 + .name = "isr_rx_rdys_read",
89016 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89017 + .param3 = 1,
89018 +};
89019 +struct size_overflow_hash _001496_hash = {
89020 + .next = NULL,
89021 + .name = "isr_tx_exch_complete_read",
89022 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89023 + .param3 = 1,
89024 +};
89025 +struct size_overflow_hash _001497_hash = {
89026 + .next = NULL,
89027 + .name = "isr_tx_procs_read",
89028 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89029 + .param3 = 1,
89030 +};
89031 +struct size_overflow_hash _001498_hash = {
89032 + .next = NULL,
89033 + .name = "isr_wakeups_read",
89034 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89035 + .param3 = 1,
89036 +};
89037 +struct size_overflow_hash _001499_hash = {
89038 + .next = NULL,
89039 + .name = "ivtv_read",
89040 + .file = "drivers/media/video/ivtv/ivtv-fileops.c",
89041 + .param3 = 1,
89042 +};
89043 +struct size_overflow_hash _001500_hash = {
89044 + .next = NULL,
89045 + .name = "kmem_realloc",
89046 + .file = "fs/xfs/kmem.c",
89047 + .param2 = 1,
89048 +};
89049 +struct size_overflow_hash _001501_hash = {
89050 + .next = NULL,
89051 + .name = "kmem_zalloc",
89052 + .file = "fs/xfs/kmem.c",
89053 + .param1 = 1,
89054 +};
89055 +struct size_overflow_hash _001502_hash = {
89056 + .next = NULL,
89057 + .name = "kmem_zalloc_greedy",
89058 + .file = "fs/xfs/kmem.c",
89059 + .param2 = 1,
89060 + .param3 = 1,
89061 +};
89062 +struct size_overflow_hash _001504_hash = {
89063 + .next = NULL,
89064 + .name = "kmp_init",
89065 + .file = "lib/ts_kmp.c",
89066 + .param2 = 1,
89067 +};
89068 +struct size_overflow_hash _001505_hash = {
89069 + .next = NULL,
89070 + .name = "lcd_proc_write",
89071 + .file = "drivers/platform/x86/asus_acpi.c",
89072 + .param3 = 1,
89073 +};
89074 +struct size_overflow_hash _001506_hash = {
89075 + .next = NULL,
89076 + .name = "ledd_proc_write",
89077 + .file = "drivers/platform/x86/asus_acpi.c",
89078 + .param3 = 1,
89079 +};
89080 +struct size_overflow_hash _001507_hash = {
89081 + .next = NULL,
89082 + .name = "mic_calc_failure_read",
89083 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89084 + .param3 = 1,
89085 +};
89086 +struct size_overflow_hash _001508_hash = {
89087 + .next = NULL,
89088 + .name = "mic_rx_pkts_read",
89089 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89090 + .param3 = 1,
89091 +};
89092 +struct size_overflow_hash _001509_hash = {
89093 + .next = NULL,
89094 + .name = "nfs4_realloc_slot_table",
89095 + .file = "fs/nfs/nfs4proc.c",
89096 + .param2 = 1,
89097 +};
89098 +struct size_overflow_hash _001510_hash = {
89099 + .next = NULL,
89100 + .name = "nfs_idmap_request_key",
89101 + .file = "fs/nfs/idmap.c",
89102 + .param2 = 1,
89103 + .param3 = 1,
89104 +};
89105 +struct size_overflow_hash _001511_hash = {
89106 + .next = NULL,
89107 + .name = "nsm_get_handle",
89108 + .file = "include/linux/lockd/lockd.h",
89109 + .param4 = 1,
89110 +};
89111 +struct size_overflow_hash _001512_hash = {
89112 + .next = NULL,
89113 + .name = "ntfs_copy_from_user_iovec",
89114 + .file = "fs/ntfs/file.c",
89115 + .param3 = 1,
89116 + .param6 = 1,
89117 +};
89118 +struct size_overflow_hash _001514_hash = {
89119 + .next = NULL,
89120 + .name = "ntfs_file_buffered_write",
89121 + .file = "fs/ntfs/file.c",
89122 + .param6 = 1,
89123 +};
89124 +struct size_overflow_hash _001515_hash = {
89125 + .next = NULL,
89126 + .name = "ntfs_malloc_nofs",
89127 + .file = "fs/ntfs/malloc.h",
89128 + .param1 = 1,
89129 +};
89130 +struct size_overflow_hash _001516_hash = {
89131 + .next = NULL,
89132 + .name = "ntfs_malloc_nofs_nofail",
89133 + .file = "fs/ntfs/malloc.h",
89134 + .param1 = 1,
89135 +};
89136 +struct size_overflow_hash _001517_hash = {
89137 + .next = NULL,
89138 + .name = "ocfs2_control_message",
89139 + .file = "fs/ocfs2/stack_user.c",
89140 + .param3 = 1,
89141 +};
89142 +struct size_overflow_hash _001518_hash = {
89143 + .next = NULL,
89144 + .name = "opera1_usb_i2c_msgxfer",
89145 + .file = "drivers/media/dvb/dvb-usb/opera1.c",
89146 + .param4 = 1,
89147 +};
89148 +struct size_overflow_hash _001519_hash = {
89149 + .next = NULL,
89150 + .name = "orinoco_add_extscan_result",
89151 + .file = "drivers/net/wireless/orinoco/scan.c",
89152 + .param3 = 1,
89153 +};
89154 +struct size_overflow_hash _001520_hash = {
89155 + .next = NULL,
89156 + .name = "osd_req_list_collection_objects",
89157 + .file = "include/scsi/osd_initiator.h",
89158 + .param5 = 1,
89159 +};
89160 +struct size_overflow_hash _001521_hash = {
89161 + .next = NULL,
89162 + .name = "osd_req_list_partition_objects",
89163 + .file = "include/scsi/osd_initiator.h",
89164 + .param5 = 1,
89165 +};
89166 +struct size_overflow_hash _001522_hash = {
89167 + .next = NULL,
89168 + .name = "pair_device",
89169 + .file = "net/bluetooth/mgmt.c",
89170 + .param4 = 1,
89171 +};
89172 +struct size_overflow_hash _001523_hash = {
89173 + .next = NULL,
89174 + .name = "pccard_store_cis",
89175 + .file = "drivers/pcmcia/cistpl.c",
89176 + .param6 = 1,
89177 +};
89178 +struct size_overflow_hash _001524_hash = {
89179 + .next = NULL,
89180 + .name = "pin_code_reply",
89181 + .file = "net/bluetooth/mgmt.c",
89182 + .param4 = 1,
89183 +};
89184 +struct size_overflow_hash _001525_hash = {
89185 + .next = NULL,
89186 + .name = "play_iframe",
89187 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
89188 + .param3 = 1,
89189 +};
89190 +struct size_overflow_hash _001526_hash = {
89191 + .next = NULL,
89192 + .name = "pointer_size_read",
89193 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
89194 + .param3 = 1,
89195 +};
89196 +struct size_overflow_hash _001527_hash = {
89197 + .next = NULL,
89198 + .name = "power_read",
89199 + .file = "net/mac80211/debugfs.c",
89200 + .param3 = 1,
89201 +};
89202 +struct size_overflow_hash _001528_hash = {
89203 + .next = NULL,
89204 + .name = "ps_pspoll_max_apturn_read",
89205 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89206 + .param3 = 1,
89207 +};
89208 +struct size_overflow_hash _001529_hash = {
89209 + .next = NULL,
89210 + .name = "ps_pspoll_timeouts_read",
89211 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89212 + .param3 = 1,
89213 +};
89214 +struct size_overflow_hash _001530_hash = {
89215 + .next = NULL,
89216 + .name = "ps_pspoll_utilization_read",
89217 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89218 + .param3 = 1,
89219 +};
89220 +struct size_overflow_hash _001531_hash = {
89221 + .next = NULL,
89222 + .name = "ps_upsd_max_apturn_read",
89223 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89224 + .param3 = 1,
89225 +};
89226 +struct size_overflow_hash _001532_hash = {
89227 + .next = NULL,
89228 + .name = "ps_upsd_max_sptime_read",
89229 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89230 + .param3 = 1,
89231 +};
89232 +struct size_overflow_hash _001533_hash = {
89233 + .next = NULL,
89234 + .name = "ps_upsd_timeouts_read",
89235 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89236 + .param3 = 1,
89237 +};
89238 +struct size_overflow_hash _001534_hash = {
89239 + .next = NULL,
89240 + .name = "ps_upsd_utilization_read",
89241 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89242 + .param3 = 1,
89243 +};
89244 +struct size_overflow_hash _001535_hash = {
89245 + .next = NULL,
89246 + .name = "pwr_disable_ps_read",
89247 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89248 + .param3 = 1,
89249 +};
89250 +struct size_overflow_hash _001536_hash = {
89251 + .next = NULL,
89252 + .name = "pwr_elp_enter_read",
89253 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89254 + .param3 = 1,
89255 +};
89256 +struct size_overflow_hash _001537_hash = {
89257 + .next = NULL,
89258 + .name = "pwr_enable_ps_read",
89259 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89260 + .param3 = 1,
89261 +};
89262 +struct size_overflow_hash _001538_hash = {
89263 + .next = NULL,
89264 + .name = "pwr_fix_tsf_ps_read",
89265 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89266 + .param3 = 1,
89267 +};
89268 +struct size_overflow_hash _001539_hash = {
89269 + .next = NULL,
89270 + .name = "pwr_missing_bcns_read",
89271 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89272 + .param3 = 1,
89273 +};
89274 +struct size_overflow_hash _001540_hash = {
89275 + .next = NULL,
89276 + .name = "pwr_power_save_off_read",
89277 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89278 + .param3 = 1,
89279 +};
89280 +struct size_overflow_hash _001541_hash = {
89281 + .next = NULL,
89282 + .name = "pwr_ps_enter_read",
89283 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89284 + .param3 = 1,
89285 +};
89286 +struct size_overflow_hash _001542_hash = {
89287 + .next = NULL,
89288 + .name = "pwr_rcvd_awake_beacons_read",
89289 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89290 + .param3 = 1,
89291 +};
89292 +struct size_overflow_hash _001543_hash = {
89293 + .next = NULL,
89294 + .name = "pwr_rcvd_beacons_read",
89295 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89296 + .param3 = 1,
89297 +};
89298 +struct size_overflow_hash _001544_hash = {
89299 + .next = NULL,
89300 + .name = "pwr_tx_without_ps_read",
89301 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89302 + .param3 = 1,
89303 +};
89304 +struct size_overflow_hash _001545_hash = {
89305 + .next = NULL,
89306 + .name = "pwr_tx_with_ps_read",
89307 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89308 + .param3 = 1,
89309 +};
89310 +struct size_overflow_hash _001546_hash = {
89311 + .next = NULL,
89312 + .name = "pwr_wake_on_host_read",
89313 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89314 + .param3 = 1,
89315 +};
89316 +struct size_overflow_hash _001547_hash = {
89317 + .next = NULL,
89318 + .name = "pwr_wake_on_timer_exp_read",
89319 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89320 + .param3 = 1,
89321 +};
89322 +struct size_overflow_hash _001548_hash = {
89323 + .next = NULL,
89324 + .name = "qcam_read",
89325 + .file = "drivers/media/video/c-qcam.c",
89326 + .param3 = 1,
89327 +};
89328 +struct size_overflow_hash _001549_hash = {
89329 + .next = NULL,
89330 + .name = "retry_count_read",
89331 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89332 + .param3 = 1,
89333 +};
89334 +struct size_overflow_hash _001550_hash = {
89335 + .next = NULL,
89336 + .name = "rx_dropped_read",
89337 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89338 + .param3 = 1,
89339 +};
89340 +struct size_overflow_hash _001551_hash = {
89341 + .next = NULL,
89342 + .name = "rx_fcs_err_read",
89343 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89344 + .param3 = 1,
89345 +};
89346 +struct size_overflow_hash _001552_hash = {
89347 + .next = NULL,
89348 + .name = "rx_hdr_overflow_read",
89349 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89350 + .param3 = 1,
89351 +};
89352 +struct size_overflow_hash _001553_hash = {
89353 + .next = NULL,
89354 + .name = "rx_hw_stuck_read",
89355 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89356 + .param3 = 1,
89357 +};
89358 +struct size_overflow_hash _001554_hash = {
89359 + .next = NULL,
89360 + .name = "rx_out_of_mem_read",
89361 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89362 + .param3 = 1,
89363 +};
89364 +struct size_overflow_hash _001555_hash = {
89365 + .next = NULL,
89366 + .name = "rx_path_reset_read",
89367 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89368 + .param3 = 1,
89369 +};
89370 +struct size_overflow_hash _001556_hash = {
89371 + .next = NULL,
89372 + .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read",
89373 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89374 + .param3 = 1,
89375 +};
89376 +struct size_overflow_hash _001557_hash = {
89377 + .next = NULL,
89378 + .name = "rxpipe_descr_host_int_trig_rx_data_read",
89379 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89380 + .param3 = 1,
89381 +};
89382 +struct size_overflow_hash _001558_hash = {
89383 + .next = NULL,
89384 + .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read",
89385 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89386 + .param3 = 1,
89387 +};
89388 +struct size_overflow_hash _001559_hash = {
89389 + .next = NULL,
89390 + .name = "rxpipe_rx_prep_beacon_drop_read",
89391 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89392 + .param3 = 1,
89393 +};
89394 +struct size_overflow_hash _001560_hash = {
89395 + .next = NULL,
89396 + .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read",
89397 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89398 + .param3 = 1,
89399 +};
89400 +struct size_overflow_hash _001561_hash = {
89401 + .next = NULL,
89402 + .name = "rx_reset_counter_read",
89403 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89404 + .param3 = 1,
89405 +};
89406 +struct size_overflow_hash _001562_hash = {
89407 + .next = NULL,
89408 + .name = "rx_streaming_always_read",
89409 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89410 + .param3 = 1,
89411 +};
89412 +struct size_overflow_hash _001563_hash = {
89413 + .next = NULL,
89414 + .name = "rx_streaming_interval_read",
89415 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89416 + .param3 = 1,
89417 +};
89418 +struct size_overflow_hash _001564_hash = {
89419 + .next = NULL,
89420 + .name = "rx_xfr_hint_trig_read",
89421 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89422 + .param3 = 1,
89423 +};
89424 +struct size_overflow_hash _001565_hash = {
89425 + .next = NULL,
89426 + .name = "scsi_execute_req",
89427 + .file = "include/scsi/scsi_device.h",
89428 + .param5 = 1,
89429 +};
89430 +struct size_overflow_hash _001566_hash = {
89431 + .next = NULL,
89432 + .name = "scsi_tgt_kspace_exec",
89433 + .file = "drivers/scsi/scsi_tgt_lib.c",
89434 + .param8 = 1,
89435 +};
89436 +struct size_overflow_hash _001567_hash = {
89437 + .next = NULL,
89438 + .name = "sctp_sendmsg",
89439 + .file = "net/sctp/socket.c",
89440 + .param4 = 1,
89441 +};
89442 +struct size_overflow_hash _001568_hash = {
89443 + .next = NULL,
89444 + .name = "sctp_setsockopt",
89445 + .file = "net/sctp/socket.c",
89446 + .param5 = 1,
89447 +};
89448 +struct size_overflow_hash _001569_hash = {
89449 + .next = NULL,
89450 + .name = "set_connectable",
89451 + .file = "net/bluetooth/mgmt.c",
89452 + .param4 = 1,
89453 +};
89454 +struct size_overflow_hash _001570_hash = {
89455 + .next = NULL,
89456 + .name = "set_discoverable",
89457 + .file = "net/bluetooth/mgmt.c",
89458 + .param4 = 1,
89459 +};
89460 +struct size_overflow_hash _001571_hash = {
89461 + .next = NULL,
89462 + .name = "set_local_name",
89463 + .file = "net/bluetooth/mgmt.c",
89464 + .param4 = 1,
89465 +};
89466 +struct size_overflow_hash _001572_hash = {
89467 + .next = NULL,
89468 + .name = "set_powered",
89469 + .file = "net/bluetooth/mgmt.c",
89470 + .param4 = 1,
89471 +};
89472 +struct size_overflow_hash _001573_hash = {
89473 + .next = NULL,
89474 + .name = "simple_alloc_urb",
89475 + .file = "drivers/usb/misc/usbtest.c",
89476 + .param3 = 1,
89477 +};
89478 +struct size_overflow_hash _001574_hash = {
89479 + .next = NULL,
89480 + .name = "sm_checker_extend",
89481 + .file = "drivers/md/persistent-data/dm-space-map-checker.c",
89482 + .param2 = 1,
89483 +};
89484 +struct size_overflow_hash _001575_hash = {
89485 + .next = NULL,
89486 + .name = "snd_cs4281_BA0_read",
89487 + .file = "sound/pci/cs4281.c",
89488 + .param5 = 1,
89489 +};
89490 +struct size_overflow_hash _001576_hash = {
89491 + .next = NULL,
89492 + .name = "snd_cs4281_BA1_read",
89493 + .file = "sound/pci/cs4281.c",
89494 + .param5 = 1,
89495 +};
89496 +struct size_overflow_hash _001577_hash = {
89497 + .next = NULL,
89498 + .name = "snd_cs46xx_io_read",
89499 + .file = "sound/pci/cs46xx/cs46xx_lib.c",
89500 + .param5 = 1,
89501 +};
89502 +struct size_overflow_hash _001578_hash = {
89503 + .next = NULL,
89504 + .name = "snd_gus_dram_read",
89505 + .file = "include/sound/gus.h",
89506 + .param4 = 1,
89507 +};
89508 +struct size_overflow_hash _001579_hash = {
89509 + .next = NULL,
89510 + .name = "snd_gus_dram_write",
89511 + .file = "include/sound/gus.h",
89512 + .param4 = 1,
89513 +};
89514 +struct size_overflow_hash _001580_hash = {
89515 + .next = NULL,
89516 + .name = "snd_mem_proc_write",
89517 + .file = "sound/core/memalloc.c",
89518 + .param3 = 1,
89519 +};
89520 +struct size_overflow_hash _001581_hash = {
89521 + .next = NULL,
89522 + .name = "snd_pcm_oss_read",
89523 + .file = "sound/core/oss/pcm_oss.c",
89524 + .param3 = 1,
89525 +};
89526 +struct size_overflow_hash _001582_hash = {
89527 + .next = NULL,
89528 + .name = "snd_pcm_oss_sync1",
89529 + .file = "sound/core/oss/pcm_oss.c",
89530 + .param2 = 1,
89531 +};
89532 +struct size_overflow_hash _001583_hash = {
89533 + .next = NULL,
89534 + .name = "snd_pcm_oss_write",
89535 + .file = "sound/core/oss/pcm_oss.c",
89536 + .param3 = 1,
89537 +};
89538 +struct size_overflow_hash _001584_hash = {
89539 + .next = NULL,
89540 + .name = "snd_rme32_capture_copy",
89541 + .file = "sound/pci/rme32.c",
89542 + .param5 = 1,
89543 +};
89544 +struct size_overflow_hash _001585_hash = {
89545 + .next = NULL,
89546 + .name = "snd_rme32_playback_copy",
89547 + .file = "sound/pci/rme32.c",
89548 + .param5 = 1,
89549 +};
89550 +struct size_overflow_hash _001586_hash = {
89551 + .next = NULL,
89552 + .name = "snd_rme96_capture_copy",
89553 + .file = "sound/pci/rme96.c",
89554 + .param5 = 1,
89555 +};
89556 +struct size_overflow_hash _001587_hash = {
89557 + .next = NULL,
89558 + .name = "snd_rme96_playback_copy",
89559 + .file = "sound/pci/rme96.c",
89560 + .param5 = 1,
89561 +};
89562 +struct size_overflow_hash _001588_hash = {
89563 + .next = NULL,
89564 + .name = "spi_execute",
89565 + .file = "drivers/scsi/scsi_transport_spi.c",
89566 + .param5 = 1,
89567 +};
89568 +struct size_overflow_hash _001589_hash = {
89569 + .next = NULL,
89570 + .name = "srp_target_alloc",
89571 + .file = "include/scsi/libsrp.h",
89572 + .param3 = 1,
89573 +};
89574 +struct size_overflow_hash _001590_hash = {
89575 + .next = NULL,
89576 + .name = "stats_dot11ACKFailureCount_read",
89577 + .file = "net/mac80211/debugfs.c",
89578 + .param3 = 1,
89579 +};
89580 +struct size_overflow_hash _001591_hash = {
89581 + .next = NULL,
89582 + .name = "stats_dot11FCSErrorCount_read",
89583 + .file = "net/mac80211/debugfs.c",
89584 + .param3 = 1,
89585 +};
89586 +struct size_overflow_hash _001592_hash = {
89587 + .next = NULL,
89588 + .name = "stats_dot11RTSFailureCount_read",
89589 + .file = "net/mac80211/debugfs.c",
89590 + .param3 = 1,
89591 +};
89592 +struct size_overflow_hash _001593_hash = {
89593 + .next = NULL,
89594 + .name = "stats_dot11RTSSuccessCount_read",
89595 + .file = "net/mac80211/debugfs.c",
89596 + .param3 = 1,
89597 +};
89598 +struct size_overflow_hash _001594_hash = {
89599 + .next = NULL,
89600 + .name = "stk_allocate_buffers",
89601 + .file = "drivers/media/video/stk-webcam.c",
89602 + .param2 = 1,
89603 +};
89604 +struct size_overflow_hash _001595_hash = {
89605 + .next = NULL,
89606 + .name = "submit_inquiry",
89607 + .file = "drivers/scsi/device_handler/scsi_dh_rdac.c",
89608 + .param3 = 1,
89609 +};
89610 +struct size_overflow_hash _001596_hash = {
89611 + .next = NULL,
89612 + .name = "team_options_register",
89613 + .file = "include/linux/if_team.h",
89614 + .param3 = 1,
89615 +};
89616 +struct size_overflow_hash _001597_hash = {
89617 + .next = NULL,
89618 + .name = "test_unaligned_bulk",
89619 + .file = "drivers/usb/misc/usbtest.c",
89620 + .param3 = 1,
89621 +};
89622 +struct size_overflow_hash _001598_hash = {
89623 + .next = NULL,
89624 + .name = "timeout_read",
89625 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
89626 + .param3 = 1,
89627 +};
89628 +struct size_overflow_hash _001599_hash = {
89629 + .next = NULL,
89630 + .name = "timeout_write",
89631 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
89632 + .param3 = 1,
89633 +};
89634 +struct size_overflow_hash _001600_hash = {
89635 + .next = NULL,
89636 + .name = "tipc_link_send_sections_fast",
89637 + .file = "net/tipc/link.c",
89638 + .param4 = 1,
89639 +};
89640 +struct size_overflow_hash _001601_hash = {
89641 + .next = NULL,
89642 + .name = "total_ps_buffered_read",
89643 + .file = "net/mac80211/debugfs.c",
89644 + .param3 = 1,
89645 +};
89646 +struct size_overflow_hash _001602_hash = {
89647 + .next = NULL,
89648 + .name = "ts_read",
89649 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
89650 + .param3 = 1,
89651 +};
89652 +struct size_overflow_hash _001603_hash = {
89653 + .next = NULL,
89654 + .name = "TSS_authhmac",
89655 + .file = "security/keys/trusted.c",
89656 + .param3 = 1,
89657 +};
89658 +struct size_overflow_hash _001604_hash = {
89659 + .next = NULL,
89660 + .name = "TSS_checkhmac1",
89661 + .file = "security/keys/trusted.c",
89662 + .param5 = 1,
89663 +};
89664 +struct size_overflow_hash _001605_hash = {
89665 + .next = NULL,
89666 + .name = "TSS_checkhmac2",
89667 + .file = "security/keys/trusted.c",
89668 + .param5 = 1,
89669 + .param7 = 1,
89670 +};
89671 +struct size_overflow_hash _001607_hash = {
89672 + .next = NULL,
89673 + .name = "ts_write",
89674 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
89675 + .param3 = 1,
89676 +};
89677 +struct size_overflow_hash _001608_hash = {
89678 + .next = NULL,
89679 + .name = "tx_internal_desc_overflow_read",
89680 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89681 + .param3 = 1,
89682 +};
89683 +struct size_overflow_hash _001609_hash = {
89684 + .next = NULL,
89685 + .name = "uapsd_max_sp_len_read",
89686 + .file = "net/mac80211/debugfs.c",
89687 + .param3 = 1,
89688 +};
89689 +struct size_overflow_hash _001610_hash = {
89690 + .next = NULL,
89691 + .name = "uapsd_queues_read",
89692 + .file = "net/mac80211/debugfs.c",
89693 + .param3 = 1,
89694 +};
89695 +struct size_overflow_hash _001611_hash = {
89696 + .next = NULL,
89697 + .name = "ulong_read_file",
89698 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
89699 + .param3 = 1,
89700 +};
89701 +struct size_overflow_hash _001612_hash = {
89702 + .next = NULL,
89703 + .name = "ulong_write_file",
89704 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
89705 + .param3 = 1,
89706 +};
89707 +struct size_overflow_hash _001613_hash = {
89708 + .next = NULL,
89709 + .name = "usb_alloc_coherent",
89710 + .file = "include/linux/usb.h",
89711 + .param2 = 1,
89712 +};
89713 +struct size_overflow_hash _001614_hash = {
89714 + .next = NULL,
89715 + .name = "user_power_read",
89716 + .file = "net/mac80211/debugfs.c",
89717 + .param3 = 1,
89718 +};
89719 +struct size_overflow_hash _001615_hash = {
89720 + .next = NULL,
89721 + .name = "vb2_read",
89722 + .file = "include/media/videobuf2-core.h",
89723 + .param3 = 1,
89724 +};
89725 +struct size_overflow_hash _001616_hash = {
89726 + .next = NULL,
89727 + .name = "vb2_write",
89728 + .file = "include/media/videobuf2-core.h",
89729 + .param3 = 1,
89730 +};
89731 +struct size_overflow_hash _001617_hash = {
89732 + .next = NULL,
89733 + .name = "vhost_add_used_n",
89734 + .file = "drivers/vhost/vhost.c",
89735 + .param3 = 1,
89736 +};
89737 +struct size_overflow_hash _001618_hash = {
89738 + .next = NULL,
89739 + .name = "virtqueue_add_buf",
89740 + .file = "include/linux/virtio.h",
89741 + .param3 = 1,
89742 + .param4 = 1,
89743 +};
89744 +struct size_overflow_hash _001620_hash = {
89745 + .next = NULL,
89746 + .name = "vmbus_establish_gpadl",
89747 + .file = "include/linux/hyperv.h",
89748 + .param3 = 1,
89749 +};
89750 +struct size_overflow_hash _001621_hash = {
89751 + .next = NULL,
89752 + .name = "wep_addr_key_count_read",
89753 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89754 + .param3 = 1,
89755 +};
89756 +struct size_overflow_hash _001622_hash = {
89757 + .next = NULL,
89758 + .name = "wep_decrypt_fail_read",
89759 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89760 + .param3 = 1,
89761 +};
89762 +struct size_overflow_hash _001623_hash = {
89763 + .next = NULL,
89764 + .name = "wep_default_key_count_read",
89765 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89766 + .param3 = 1,
89767 +};
89768 +struct size_overflow_hash _001624_hash = {
89769 + .next = NULL,
89770 + .name = "wep_interrupt_read",
89771 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89772 + .param3 = 1,
89773 +};
89774 +struct size_overflow_hash _001625_hash = {
89775 + .next = NULL,
89776 + .name = "wep_iv_read",
89777 + .file = "net/mac80211/debugfs.c",
89778 + .param3 = 1,
89779 +};
89780 +struct size_overflow_hash _001626_hash = {
89781 + .next = NULL,
89782 + .name = "wep_key_not_found_read",
89783 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89784 + .param3 = 1,
89785 +};
89786 +struct size_overflow_hash _001627_hash = {
89787 + .next = NULL,
89788 + .name = "wep_packets_read",
89789 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89790 + .param3 = 1,
89791 +};
89792 +struct size_overflow_hash _001628_hash = {
89793 + .next = NULL,
89794 + .name = "write_led",
89795 + .file = "drivers/platform/x86/asus_acpi.c",
89796 + .param2 = 1,
89797 +};
89798 +struct size_overflow_hash _001629_hash = {
89799 + .next = NULL,
89800 + .name = "wusb_prf",
89801 + .file = "include/linux/usb/wusb.h",
89802 + .param7 = 1,
89803 +};
89804 +struct size_overflow_hash _001630_hash = {
89805 + .next = NULL,
89806 + .name = "zd_usb_iowrite16v",
89807 + .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
89808 + .param3 = 1,
89809 +};
89810 +struct size_overflow_hash _001631_hash = {
89811 + .next = NULL,
89812 + .name = "afs_cell_lookup",
89813 + .file = "fs/afs/cell.c",
89814 + .param2 = 1,
89815 +};
89816 +struct size_overflow_hash _001632_hash = {
89817 + .next = NULL,
89818 + .name = "agp_generic_alloc_user",
89819 + .file = "drivers/char/agp/generic.c",
89820 + .param1 = 1,
89821 +};
89822 +struct size_overflow_hash _001634_hash = {
89823 + .next = NULL,
89824 + .name = "bluetooth_proc_write",
89825 + .file = "drivers/platform/x86/asus_acpi.c",
89826 + .param3 = 1,
89827 +};
89828 +struct size_overflow_hash _001635_hash = {
89829 + .next = NULL,
89830 + .name = "cache_write",
89831 + .file = "net/sunrpc/cache.c",
89832 + .param3 = 1,
89833 +};
89834 +struct size_overflow_hash _001636_hash = {
89835 + .next = NULL,
89836 + .name = "ch_do_scsi",
89837 + .file = "drivers/scsi/ch.c",
89838 + .param4 = 1,
89839 +};
89840 +struct size_overflow_hash _001637_hash = {
89841 + .next = NULL,
89842 + .name = "cx18_read",
89843 + .file = "drivers/media/video/cx18/cx18-fileops.c",
89844 + .param3 = 1,
89845 +};
89846 +struct size_overflow_hash _001638_hash = {
89847 + .next = NULL,
89848 + .name = "dccp_feat_register_sp",
89849 + .file = "net/dccp/feat.c",
89850 + .param5 = 1,
89851 +};
89852 +struct size_overflow_hash _001640_hash = {
89853 + .next = NULL,
89854 + .name = "iso_alloc_urb",
89855 + .file = "drivers/usb/misc/usbtest.c",
89856 + .param5 = 1,
89857 +};
89858 +struct size_overflow_hash _001641_hash = {
89859 + .next = NULL,
89860 + .name = "ivtv_read_pos",
89861 + .file = "drivers/media/video/ivtv/ivtv-fileops.c",
89862 + .param3 = 1,
89863 +};
89864 +struct size_overflow_hash _001642_hash = {
89865 + .next = NULL,
89866 + .name = "mcam_v4l_read",
89867 + .file = "drivers/media/video/marvell-ccic/mcam-core.c",
89868 + .param3 = 1,
89869 +};
89870 +struct size_overflow_hash _001643_hash = {
89871 + .next = NULL,
89872 + .name = "mled_proc_write",
89873 + .file = "drivers/platform/x86/asus_acpi.c",
89874 + .param3 = 1,
89875 +};
89876 +struct size_overflow_hash _001644_hash = {
89877 + .next = NULL,
89878 + .name = "nfs_idmap_lookup_id",
89879 + .file = "fs/nfs/idmap.c",
89880 + .param2 = 1,
89881 +};
89882 +struct size_overflow_hash _001645_hash = {
89883 + .next = NULL,
89884 + .name = "ocfs2_control_write",
89885 + .file = "fs/ocfs2/stack_user.c",
89886 + .param3 = 1,
89887 +};
89888 +struct size_overflow_hash _001646_hash = {
89889 + .next = NULL,
89890 + .name = "osd_req_list_dev_partitions",
89891 + .file = "include/scsi/osd_initiator.h",
89892 + .param4 = 1,
89893 +};
89894 +struct size_overflow_hash _001647_hash = {
89895 + .next = NULL,
89896 + .name = "osd_req_list_partition_collections",
89897 + .file = "include/scsi/osd_initiator.h",
89898 + .param5 = 1,
89899 +};
89900 +struct size_overflow_hash _001648_hash = {
89901 + .next = NULL,
89902 + .name = "pwc_video_read",
89903 + .file = "drivers/media/video/pwc/pwc-if.c",
89904 + .param3 = 1,
89905 +};
89906 +struct size_overflow_hash _001649_hash = {
89907 + .next = NULL,
89908 + .name = "scsi_vpd_inquiry",
89909 + .file = "drivers/scsi/scsi.c",
89910 + .param4 = 1,
89911 +};
89912 +struct size_overflow_hash _001650_hash = {
89913 + .next = NULL,
89914 + .name = "snd_gf1_mem_proc_dump",
89915 + .file = "sound/isa/gus/gus_mem_proc.c",
89916 + .param5 = 1,
89917 +};
89918 +struct size_overflow_hash _001651_hash = {
89919 + .next = NULL,
89920 + .name = "spi_dv_device_echo_buffer",
89921 + .file = "drivers/scsi/scsi_transport_spi.c",
89922 + .param2 = 1,
89923 + .param3 = 1,
89924 +};
89925 +struct size_overflow_hash _001653_hash = {
89926 + .next = NULL,
89927 + .name = "tled_proc_write",
89928 + .file = "drivers/platform/x86/asus_acpi.c",
89929 + .param3 = 1,
89930 +};
89931 +struct size_overflow_hash _001655_hash = {
89932 + .next = NULL,
89933 + .name = "usb_allocate_stream_buffers",
89934 + .file = "drivers/media/dvb/dvb-usb/usb-urb.c",
89935 + .param3 = 1,
89936 +};
89937 +struct size_overflow_hash _001656_hash = {
89938 + .next = NULL,
89939 + .name = "_usb_writeN_sync",
89940 + .file = "drivers/net/wireless/rtlwifi/usb.c",
89941 + .param4 = 1,
89942 +};
89943 +struct size_overflow_hash _001657_hash = {
89944 + .next = NULL,
89945 + .name = "vhost_add_used_and_signal_n",
89946 + .file = "drivers/vhost/vhost.c",
89947 + .param4 = 1,
89948 +};
89949 +struct size_overflow_hash _001658_hash = {
89950 + .next = NULL,
89951 + .name = "vmbus_open",
89952 + .file = "include/linux/hyperv.h",
89953 + .param2 = 1,
89954 + .param3 = 1,
89955 +};
89956 +struct size_overflow_hash _001660_hash = {
89957 + .next = NULL,
89958 + .name = "wled_proc_write",
89959 + .file = "drivers/platform/x86/asus_acpi.c",
89960 + .param3 = 1,
89961 +};
89962 +struct size_overflow_hash _001661_hash = {
89963 + .next = NULL,
89964 + .name = "wusb_prf_256",
89965 + .file = "include/linux/usb/wusb.h",
89966 + .param7 = 1,
89967 +};
89968 +struct size_overflow_hash _001662_hash = {
89969 + .next = NULL,
89970 + .name = "wusb_prf_64",
89971 + .file = "include/linux/usb/wusb.h",
89972 + .param7 = 1,
89973 +};
89974 +struct size_overflow_hash _001663_hash = {
89975 + .next = NULL,
89976 + .name = "agp_allocate_memory",
89977 + .file = "include/linux/agp_backend.h",
89978 + .param2 = 1,
89979 +};
89980 +struct size_overflow_hash _001664_hash = {
89981 + .next = NULL,
89982 + .name = "cx18_read_pos",
89983 + .file = "drivers/media/video/cx18/cx18-fileops.c",
89984 + .param3 = 1,
89985 +};
89986 +struct size_overflow_hash _001665_hash = {
89987 + .next = NULL,
89988 + .name = "nfs_map_group_to_gid",
89989 + .file = "include/linux/nfs_idmap.h",
89990 + .param3 = 1,
89991 +};
89992 +struct size_overflow_hash _001666_hash = {
89993 + .next = NULL,
89994 + .name = "nfs_map_name_to_uid",
89995 + .file = "include/linux/nfs_idmap.h",
89996 + .param3 = 1,
89997 +};
89998 +struct size_overflow_hash _001667_hash = {
89999 + .next = NULL,
90000 + .name = "test_iso_queue",
90001 + .file = "drivers/usb/misc/usbtest.c",
90002 + .param5 = 1,
90003 +};
90004 +struct size_overflow_hash _001668_hash = {
90005 + .next = NULL,
90006 + .name = "agp_allocate_memory_wrap",
90007 + .file = "drivers/char/agp/frontend.c",
90008 + .param1 = 1,
90009 +};
90010 +struct size_overflow_hash _001669_hash = {
90011 + .next = NULL,
90012 + .name = "alloc_irq_cpu_rmap",
90013 + .file = "include/linux/cpu_rmap.h",
90014 + .param1 = 1,
90015 +};
90016 +struct size_overflow_hash _001670_hash = {
90017 + .next = NULL,
90018 + .name = "alloc_ring",
90019 + .file = "drivers/net/ethernet/chelsio/cxgb4/sge.c",
90020 + .param2 = 1,
90021 + .param4 = 1,
90022 +};
90023 +struct size_overflow_hash _001672_hash = {
90024 + .next = &_001124_hash,
90025 + .name = "atomic_counters_read",
90026 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
90027 + .param3 = 1,
90028 +};
90029 +struct size_overflow_hash _001673_hash = {
90030 + .next = NULL,
90031 + .name = "atomic_stats_read",
90032 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
90033 + .param3 = 1,
90034 +};
90035 +struct size_overflow_hash _001674_hash = {
90036 + .next = NULL,
90037 + .name = "c4iw_init_resource_fifo",
90038 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
90039 + .param3 = 1,
90040 +};
90041 +struct size_overflow_hash _001675_hash = {
90042 + .next = NULL,
90043 + .name = "c4iw_init_resource_fifo_random",
90044 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
90045 + .param3 = 1,
90046 +};
90047 +struct size_overflow_hash _001676_hash = {
90048 + .next = NULL,
90049 + .name = "compat_do_arpt_set_ctl",
90050 + .file = "net/ipv4/netfilter/arp_tables.c",
90051 + .param4 = 1,
90052 +};
90053 +struct size_overflow_hash _001677_hash = {
90054 + .next = NULL,
90055 + .name = "compat_do_ip6t_set_ctl",
90056 + .file = "net/ipv6/netfilter/ip6_tables.c",
90057 + .param4 = 1,
90058 +};
90059 +struct size_overflow_hash _001678_hash = {
90060 + .next = NULL,
90061 + .name = "compat_do_ipt_set_ctl",
90062 + .file = "net/ipv4/netfilter/ip_tables.c",
90063 + .param4 = 1,
90064 +};
90065 +struct size_overflow_hash _001679_hash = {
90066 + .next = NULL,
90067 + .name = "cxio_init_resource_fifo",
90068 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
90069 + .param3 = 1,
90070 +};
90071 +struct size_overflow_hash _001680_hash = {
90072 + .next = NULL,
90073 + .name = "cxio_init_resource_fifo_random",
90074 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
90075 + .param3 = 1,
90076 +};
90077 +struct size_overflow_hash _001681_hash = {
90078 + .next = NULL,
90079 + .name = "dev_counters_read",
90080 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90081 + .param3 = 1,
90082 +};
90083 +struct size_overflow_hash _001682_hash = {
90084 + .next = NULL,
90085 + .name = "dev_names_read",
90086 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90087 + .param3 = 1,
90088 +};
90089 +struct size_overflow_hash _001683_hash = {
90090 + .next = &_001468_hash,
90091 + .name = "do_arpt_set_ctl",
90092 + .file = "net/ipv4/netfilter/arp_tables.c",
90093 + .param4 = 1,
90094 +};
90095 +struct size_overflow_hash _001684_hash = {
90096 + .next = NULL,
90097 + .name = "do_ip6t_set_ctl",
90098 + .file = "net/ipv6/netfilter/ip6_tables.c",
90099 + .param4 = 1,
90100 +};
90101 +struct size_overflow_hash _001685_hash = {
90102 + .next = NULL,
90103 + .name = "do_ipt_set_ctl",
90104 + .file = "net/ipv4/netfilter/ip_tables.c",
90105 + .param4 = 1,
90106 +};
90107 +struct size_overflow_hash _001686_hash = {
90108 + .next = NULL,
90109 + .name = "drbd_bm_resize",
90110 + .file = "drivers/block/drbd/drbd_bitmap.c",
90111 + .param2 = 1,
90112 +};
90113 +struct size_overflow_hash _001687_hash = {
90114 + .next = NULL,
90115 + .name = "driver_names_read",
90116 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90117 + .param3 = 1,
90118 +};
90119 +struct size_overflow_hash _001688_hash = {
90120 + .next = NULL,
90121 + .name = "driver_stats_read",
90122 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90123 + .param3 = 1,
90124 +};
90125 +struct size_overflow_hash _001689_hash = {
90126 + .next = NULL,
90127 + .name = "flash_read",
90128 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90129 + .param3 = 1,
90130 +};
90131 +struct size_overflow_hash _001690_hash = {
90132 + .next = NULL,
90133 + .name = "flash_read",
90134 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
90135 + .param3 = 1,
90136 +};
90137 +struct size_overflow_hash _001691_hash = {
90138 + .next = NULL,
90139 + .name = "flash_write",
90140 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90141 + .param3 = 1,
90142 +};
90143 +struct size_overflow_hash _001692_hash = {
90144 + .next = NULL,
90145 + .name = "flash_write",
90146 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
90147 + .param3 = 1,
90148 +};
90149 +struct size_overflow_hash _001693_hash = {
90150 + .next = NULL,
90151 + .name = "ghash_async_setkey",
90152 + .file = "arch/x86/crypto/ghash-clmulni-intel_glue.c",
90153 + .param3 = 1,
90154 +};
90155 +struct size_overflow_hash _001694_hash = {
90156 + .next = NULL,
90157 + .name = "handle_eviocgbit",
90158 + .file = "drivers/input/evdev.c",
90159 + .param3 = 1,
90160 +};
90161 +struct size_overflow_hash _001695_hash = {
90162 + .next = NULL,
90163 + .name = "hid_parse_report",
90164 + .file = "include/linux/hid.h",
90165 + .param3 = 1,
90166 +};
90167 +struct size_overflow_hash _001696_hash = {
90168 + .next = NULL,
90169 + .name = "ipath_get_base_info",
90170 + .file = "drivers/infiniband/hw/ipath/ipath_file_ops.c",
90171 + .param3 = 1,
90172 +};
90173 +struct size_overflow_hash _001697_hash = {
90174 + .next = NULL,
90175 + .name = "options_write",
90176 + .file = "drivers/misc/sgi-gru/gruprocfs.c",
90177 + .param3 = 1,
90178 +};
90179 +struct size_overflow_hash _001698_hash = {
90180 + .next = NULL,
90181 + .name = "portcntrs_1_read",
90182 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90183 + .param3 = 1,
90184 +};
90185 +struct size_overflow_hash _001699_hash = {
90186 + .next = NULL,
90187 + .name = "portcntrs_2_read",
90188 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90189 + .param3 = 1,
90190 +};
90191 +struct size_overflow_hash _001700_hash = {
90192 + .next = NULL,
90193 + .name = "portnames_read",
90194 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90195 + .param3 = 1,
90196 +};
90197 +struct size_overflow_hash _001701_hash = {
90198 + .next = NULL,
90199 + .name = "qib_alloc_devdata",
90200 + .file = "drivers/infiniband/hw/qib/qib_init.c",
90201 + .param2 = 1,
90202 +};
90203 +struct size_overflow_hash _001702_hash = {
90204 + .next = NULL,
90205 + .name = "qib_diag_write",
90206 + .file = "drivers/infiniband/hw/qib/qib_diag.c",
90207 + .param3 = 1,
90208 +};
90209 +struct size_overflow_hash _001703_hash = {
90210 + .next = NULL,
90211 + .name = "qib_get_base_info",
90212 + .file = "drivers/infiniband/hw/qib/qib_file_ops.c",
90213 + .param3 = 1,
90214 +};
90215 +struct size_overflow_hash _001704_hash = {
90216 + .next = NULL,
90217 + .name = "qsfp_1_read",
90218 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90219 + .param3 = 1,
90220 +};
90221 +struct size_overflow_hash _001705_hash = {
90222 + .next = NULL,
90223 + .name = "qsfp_2_read",
90224 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90225 + .param3 = 1,
90226 +};
90227 +struct size_overflow_hash _001706_hash = {
90228 + .next = NULL,
90229 + .name = "rfc4106_set_key",
90230 + .file = "arch/x86/crypto/aesni-intel_glue.c",
90231 + .param3 = 1,
90232 +};
90233 +struct size_overflow_hash _001707_hash = {
90234 + .next = &_000258_hash,
90235 + .name = "stats_read_ul",
90236 + .file = "drivers/idle/i7300_idle.c",
90237 + .param3 = 1,
90238 +};
90239 +struct size_overflow_hash _001708_hash = {
90240 + .next = NULL,
90241 + .name = "xpc_kmalloc_cacheline_aligned",
90242 + .file = "drivers/misc/sgi-xp/xpc_partition.c",
90243 + .param1 = 1,
90244 +};
90245 +struct size_overflow_hash _001709_hash = {
90246 + .next = NULL,
90247 + .name = "xpc_kzalloc_cacheline_aligned",
90248 + .file = "drivers/misc/sgi-xp/xpc_main.c",
90249 + .param1 = 1,
90250 +};
90251 +struct size_overflow_hash _001710_hash = {
90252 + .next = NULL,
90253 + .name = "c4iw_init_resource",
90254 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
90255 + .param2 = 1,
90256 + .param3 = 1,
90257 +};
90258 +struct size_overflow_hash _001712_hash = {
90259 + .next = NULL,
90260 + .name = "cxio_hal_init_resource",
90261 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
90262 + .param2 = 1,
90263 + .param7 = 1,
90264 + .param6 = 1,
90265 +};
90266 +struct size_overflow_hash _001715_hash = {
90267 + .next = &_000734_hash,
90268 + .name = "cxio_hal_init_rhdl_resource",
90269 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
90270 + .param1 = 1,
90271 +};
90272 +struct size_overflow_hash _001716_hash = {
90273 + .next = NULL,
90274 + .name = "amthi_read",
90275 + .file = "drivers/staging/mei/iorw.c",
90276 + .param4 = 1,
90277 +};
90278 +struct size_overflow_hash _001717_hash = {
90279 + .next = NULL,
90280 + .name = "bcm_char_read",
90281 + .file = "drivers/staging/bcm/Bcmchar.c",
90282 + .param3 = 1,
90283 +};
90284 +struct size_overflow_hash _001718_hash = {
90285 + .next = NULL,
90286 + .name = "BcmCopySection",
90287 + .file = "drivers/staging/bcm/nvm.c",
90288 + .param5 = 1,
90289 +};
90290 +struct size_overflow_hash _001719_hash = {
90291 + .next = NULL,
90292 + .name = "buffer_from_user",
90293 + .file = "drivers/staging/vme/devices/vme_user.c",
90294 + .param3 = 1,
90295 +};
90296 +struct size_overflow_hash _001720_hash = {
90297 + .next = NULL,
90298 + .name = "buffer_to_user",
90299 + .file = "drivers/staging/vme/devices/vme_user.c",
90300 + .param3 = 1,
90301 +};
90302 +struct size_overflow_hash _001721_hash = {
90303 + .next = NULL,
90304 + .name = "capabilities_read",
90305 + .file = "drivers/xen/xenfs/super.c",
90306 + .param3 = 1,
90307 +};
90308 +struct size_overflow_hash _001722_hash = {
90309 + .next = NULL,
90310 + .name = "chd_dec_fetch_cdata",
90311 + .file = "drivers/staging/crystalhd/crystalhd_lnx.c",
90312 + .param3 = 1,
90313 +};
90314 +struct size_overflow_hash _001723_hash = {
90315 + .next = NULL,
90316 + .name = "create_bounce_buffer",
90317 + .file = "drivers/staging/hv/storvsc_drv.c",
90318 + .param3 = 1,
90319 +};
90320 +struct size_overflow_hash _001724_hash = {
90321 + .next = NULL,
90322 + .name = "crystalhd_create_dio_pool",
90323 + .file = "drivers/staging/crystalhd/crystalhd_misc.c",
90324 + .param2 = 1,
90325 +};
90326 +struct size_overflow_hash _001725_hash = {
90327 + .next = NULL,
90328 + .name = "do_read_log_to_user",
90329 + .file = "drivers/staging/android/logger.c",
90330 + .param4 = 1,
90331 +};
90332 +struct size_overflow_hash _001726_hash = {
90333 + .next = NULL,
90334 + .name = "do_write_log_from_user",
90335 + .file = "drivers/staging/android/logger.c",
90336 + .param3 = 1,
90337 +};
90338 +struct size_overflow_hash _001727_hash = {
90339 + .next = NULL,
90340 + .name = "dt3155_read",
90341 + .file = "drivers/staging/media/dt3155v4l/dt3155v4l.c",
90342 + .param3 = 1,
90343 +};
90344 +struct size_overflow_hash _001728_hash = {
90345 + .next = NULL,
90346 + .name = "easycap_alsa_vmalloc",
90347 + .file = "drivers/staging/media/easycap/easycap_sound.c",
90348 + .param2 = 1,
90349 +};
90350 +struct size_overflow_hash _001729_hash = {
90351 + .next = NULL,
90352 + .name = "evm_read_key",
90353 + .file = "security/integrity/evm/evm_secfs.c",
90354 + .param3 = 1,
90355 +};
90356 +struct size_overflow_hash _001730_hash = {
90357 + .next = NULL,
90358 + .name = "evm_write_key",
90359 + .file = "security/integrity/evm/evm_secfs.c",
90360 + .param3 = 1,
90361 +};
90362 +struct size_overflow_hash _001731_hash = {
90363 + .next = NULL,
90364 + .name = "evtchn_read",
90365 + .file = "drivers/xen/evtchn.c",
90366 + .param3 = 1,
90367 +};
90368 +struct size_overflow_hash _001732_hash = {
90369 + .next = NULL,
90370 + .name = "gather_array",
90371 + .file = "drivers/xen/privcmd.c",
90372 + .param3 = 1,
90373 +};
90374 +struct size_overflow_hash _001733_hash = {
90375 + .next = NULL,
90376 + .name = "gnttab_map",
90377 + .file = "drivers/xen/grant-table.c",
90378 + .param2 = 1,
90379 +};
90380 +struct size_overflow_hash _001734_hash = {
90381 + .next = NULL,
90382 + .name = "iio_read_first_n_kfifo",
90383 + .file = "drivers/staging/iio/kfifo_buf.c",
90384 + .param2 = 1,
90385 +};
90386 +struct size_overflow_hash _001735_hash = {
90387 + .next = NULL,
90388 + .name = "iio_read_first_n_sw_rb",
90389 + .file = "drivers/staging/iio/ring_sw.c",
90390 + .param2 = 1,
90391 +};
90392 +struct size_overflow_hash _001736_hash = {
90393 + .next = NULL,
90394 + .name = "keymap_store",
90395 + .file = "drivers/staging/speakup/kobjects.c",
90396 + .param4 = 1,
90397 +};
90398 +struct size_overflow_hash _001737_hash = {
90399 + .next = NULL,
90400 + .name = "line6_dumpreq_initbuf",
90401 + .file = "drivers/staging/line6/dumprequest.c",
90402 + .param3 = 1,
90403 +};
90404 +struct size_overflow_hash _001738_hash = {
90405 + .next = NULL,
90406 + .name = "lirc_write",
90407 + .file = "drivers/staging/media/lirc/lirc_parallel.c",
90408 + .param3 = 1,
90409 +};
90410 +struct size_overflow_hash _001739_hash = {
90411 + .next = NULL,
90412 + .name = "lirc_write",
90413 + .file = "drivers/staging/media/lirc/lirc_sir.c",
90414 + .param3 = 1,
90415 +};
90416 +struct size_overflow_hash _001740_hash = {
90417 + .next = &_000815_hash,
90418 + .name = "lirc_write",
90419 + .file = "drivers/staging/media/lirc/lirc_serial.c",
90420 + .param3 = 1,
90421 +};
90422 +struct size_overflow_hash _001741_hash = {
90423 + .next = &_001021_hash,
90424 + .name = "_malloc",
90425 + .file = "drivers/staging/rtl8712/osdep_service.h",
90426 + .param1 = 1,
90427 +};
90428 +struct size_overflow_hash _001742_hash = {
90429 + .next = NULL,
90430 + .name = "mei_read",
90431 + .file = "drivers/staging/mei/main.c",
90432 + .param3 = 1,
90433 +};
90434 +struct size_overflow_hash _001743_hash = {
90435 + .next = NULL,
90436 + .name = "mei_write",
90437 + .file = "drivers/staging/mei/main.c",
90438 + .param3 = 1,
90439 +};
90440 +struct size_overflow_hash _001744_hash = {
90441 + .next = NULL,
90442 + .name = "msg_set",
90443 + .file = "drivers/staging/speakup/i18n.c",
90444 + .param3 = 1,
90445 +};
90446 +struct size_overflow_hash _001745_hash = {
90447 + .next = NULL,
90448 + .name = "OS_kmalloc",
90449 + .file = "drivers/staging/cxt1e1/sbecom_inline_linux.h",
90450 + .param1 = 1,
90451 +};
90452 +struct size_overflow_hash _001746_hash = {
90453 + .next = NULL,
90454 + .name = "queue_reply",
90455 + .file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
90456 + .param3 = 1,
90457 +};
90458 +struct size_overflow_hash _001747_hash = {
90459 + .next = &_000841_hash,
90460 + .name = "resource_from_user",
90461 + .file = "drivers/staging/vme/devices/vme_user.c",
90462 + .param3 = 1,
90463 +};
90464 +struct size_overflow_hash _001748_hash = {
90465 + .next = NULL,
90466 + .name = "sca3000_read_first_n_hw_rb",
90467 + .file = "drivers/staging/iio/accel/sca3000_ring.c",
90468 + .param2 = 1,
90469 +};
90470 +struct size_overflow_hash _001749_hash = {
90471 + .next = NULL,
90472 + .name = "sep_lock_user_pages",
90473 + .file = "drivers/staging/sep/sep_driver.c",
90474 + .param2 = 1,
90475 + .param3 = 1,
90476 +};
90477 +struct size_overflow_hash _001751_hash = {
90478 + .next = NULL,
90479 + .name = "sep_prepare_input_output_dma_table_in_dcb",
90480 + .file = "drivers/staging/sep/sep_driver.c",
90481 + .param4 = 1,
90482 + .param5 = 1,
90483 + .param2 = 1,
90484 + .param3 = 1,
90485 +};
90486 +struct size_overflow_hash _001753_hash = {
90487 + .next = NULL,
90488 + .name = "split",
90489 + .file = "drivers/xen/xenbus/xenbus_xs.c",
90490 + .param2 = 1,
90491 +};
90492 +struct size_overflow_hash _001754_hash = {
90493 + .next = NULL,
90494 + .name = "storvsc_connect_to_vsp",
90495 + .file = "drivers/staging/hv/storvsc_drv.c",
90496 + .param2 = 1,
90497 +};
90498 +struct size_overflow_hash _001755_hash = {
90499 + .next = NULL,
90500 + .name = "u32_array_read",
90501 + .file = "arch/x86/xen/debugfs.c",
90502 + .param3 = 1,
90503 +};
90504 +struct size_overflow_hash _001756_hash = {
90505 + .next = NULL,
90506 + .name = "ValidateDSDParamsChecksum",
90507 + .file = "drivers/staging/bcm/led_control.c",
90508 + .param3 = 1,
90509 +};
90510 +struct size_overflow_hash _001757_hash = {
90511 + .next = NULL,
90512 + .name = "vfd_write",
90513 + .file = "drivers/staging/media/lirc/lirc_sasem.c",
90514 + .param3 = 1,
90515 +};
90516 +struct size_overflow_hash _001758_hash = {
90517 + .next = NULL,
90518 + .name = "vfd_write",
90519 + .file = "drivers/staging/media/lirc/lirc_imon.c",
90520 + .param3 = 1,
90521 +};
90522 +struct size_overflow_hash _001759_hash = {
90523 + .next = NULL,
90524 + .name = "Wb35Reg_BurstWrite",
90525 + .file = "drivers/staging/winbond/wb35reg.c",
90526 + .param4 = 1,
90527 +};
90528 +struct size_overflow_hash _001760_hash = {
90529 + .next = NULL,
90530 + .name = "xenbus_file_write",
90531 + .file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
90532 + .param3 = 1,
90533 +};
90534 +struct size_overflow_hash _001761_hash = {
90535 + .next = NULL,
90536 + .name = "xsd_read",
90537 + .file = "drivers/xen/xenfs/xenstored.c",
90538 + .param3 = 1,
90539 +};
90540 +struct size_overflow_hash _001762_hash = {
90541 + .next = NULL,
90542 + .name = "line6_dumpreq_init",
90543 + .file = "drivers/staging/line6/dumprequest.c",
90544 + .param3 = 1,
90545 +};
90546 +struct size_overflow_hash _001763_hash = {
90547 + .next = NULL,
90548 + .name = "r8712_usbctrl_vendorreq",
90549 + .file = "drivers/staging/rtl8712/usb_ops_linux.c",
90550 + .param6 = 1,
90551 +};
90552 +struct size_overflow_hash _001764_hash = {
90553 + .next = NULL,
90554 + .name = "r871x_set_wpa_ie",
90555 + .file = "drivers/staging/rtl8712/rtl871x_ioctl_linux.c",
90556 + .param3 = 1,
90557 +};
90558 +struct size_overflow_hash _001765_hash = {
90559 + .next = NULL,
90560 + .name = "sep_prepare_input_dma_table",
90561 + .file = "drivers/staging/sep/sep_driver.c",
90562 + .param2 = 1,
90563 + .param3 = 1,
90564 +};
90565 +struct size_overflow_hash _001767_hash = {
90566 + .next = NULL,
90567 + .name = "sep_prepare_input_output_dma_table",
90568 + .file = "drivers/staging/sep/sep_driver.c",
90569 + .param2 = 1,
90570 + .param4 = 1,
90571 + .param3 = 1,
90572 +};
90573 +struct size_overflow_hash _001770_hash = {
90574 + .next = NULL,
90575 + .name = "vme_user_write",
90576 + .file = "drivers/staging/vme/devices/vme_user.c",
90577 + .param3 = 1,
90578 +};
90579 +struct size_overflow_hash _001771_hash = {
90580 + .next = NULL,
90581 + .name = "alloc_ebda_hpc",
90582 + .file = "drivers/pci/hotplug/ibmphp_ebda.c",
90583 + .param1 = 1,
90584 + .param2 = 1,
90585 +};
90586 +struct size_overflow_hash _001772_hash = {
90587 + .next = NULL,
90588 + .name = "add_uuid",
90589 + .file = "net/bluetooth/mgmt.c",
90590 + .param4 = 1,
90591 +};
90592 +struct size_overflow_hash _001773_hash = {
90593 + .next = NULL,
90594 + .name = "__alloc_extent_buffer",
90595 + .file = "fs/btrfs/extent_io.c",
90596 + .param3 = 1,
90597 +};
90598 +struct size_overflow_hash _001774_hash = {
90599 + .next = NULL,
90600 + .name = "array_zalloc",
90601 + .file = "drivers/target/target_core_tpg.c",
90602 + .param2 = 1,
90603 +};
90604 +struct size_overflow_hash _001775_hash = {
90605 + .next = NULL,
90606 + .name = "ath6kl_fwlog_block_read",
90607 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
90608 + .param3 = 1,
90609 +};
90610 +struct size_overflow_hash _001776_hash = {
90611 + .next = NULL,
90612 + .name = "ath6kl_listen_int_read",
90613 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
90614 + .param3 = 1,
90615 +};
90616 +struct size_overflow_hash _001777_hash = {
90617 + .next = NULL,
90618 + .name = "ath6kl_mgmt_powersave_ap",
90619 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
90620 + .param6 = 1,
90621 +};
90622 +struct size_overflow_hash _001778_hash = {
90623 + .next = NULL,
90624 + .name = "__ath6kl_wmi_send_mgmt_cmd",
90625 + .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
90626 + .param7 = 1,
90627 +};
90628 +struct size_overflow_hash _001779_hash = {
90629 + .next = NULL,
90630 + .name = "cld_pipe_downcall",
90631 + .file = "fs/nfsd/nfs4recover.c",
90632 + .param3 = 1,
90633 +};
90634 +struct size_overflow_hash _001780_hash = {
90635 + .next = NULL,
90636 + .name = "create_bounce_buffer",
90637 + .file = "drivers/scsi/storvsc_drv.c",
90638 + .param3 = 1,
90639 +};
90640 +struct size_overflow_hash _001781_hash = {
90641 + .next = NULL,
90642 + .name = "dwc3_link_state_write",
90643 + .file = "drivers/usb/dwc3/debugfs.c",
90644 + .param3 = 1,
90645 +};
90646 +struct size_overflow_hash _001782_hash = {
90647 + .next = NULL,
90648 + .name = "dwc3_testmode_write",
90649 + .file = "drivers/usb/dwc3/debugfs.c",
90650 + .param3 = 1,
90651 +};
90652 +struct size_overflow_hash _001783_hash = {
90653 + .next = NULL,
90654 + .name = "dynamic_ps_timeout_read",
90655 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90656 + .param3 = 1,
90657 +};
90658 +struct size_overflow_hash _001784_hash = {
90659 + .next = NULL,
90660 + .name = "forced_ps_read",
90661 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90662 + .param3 = 1,
90663 +};
90664 +struct size_overflow_hash _001785_hash = {
90665 + .next = NULL,
90666 + .name = "idmap_pipe_downcall",
90667 + .file = "fs/nfs/idmap.c",
90668 + .param3 = 1,
90669 +};
90670 +struct size_overflow_hash _001786_hash = {
90671 + .next = NULL,
90672 + .name = "ieee80211_if_read_rc_rateidx_mcs_mask_2ghz",
90673 + .file = "net/mac80211/debugfs_netdev.c",
90674 + .param3 = 1,
90675 +};
90676 +struct size_overflow_hash _001787_hash = {
90677 + .next = NULL,
90678 + .name = "ieee80211_if_read_rc_rateidx_mcs_mask_5ghz",
90679 + .file = "net/mac80211/debugfs_netdev.c",
90680 + .param3 = 1,
90681 +};
90682 +struct size_overflow_hash _001788_hash = {
90683 + .next = NULL,
90684 + .name = "ieee80211_if_read_rssi_threshold",
90685 + .file = "net/mac80211/debugfs_netdev.c",
90686 + .param3 = 1,
90687 +};
90688 +struct size_overflow_hash _001789_hash = {
90689 + .next = NULL,
90690 + .name = "ieee80211_if_read_uapsd_max_sp_len",
90691 + .file = "net/mac80211/debugfs_netdev.c",
90692 + .param3 = 1,
90693 +};
90694 +struct size_overflow_hash _001790_hash = {
90695 + .next = NULL,
90696 + .name = "ieee80211_if_read_uapsd_queues",
90697 + .file = "net/mac80211/debugfs_netdev.c",
90698 + .param3 = 1,
90699 +};
90700 +struct size_overflow_hash _001791_hash = {
90701 + .next = NULL,
90702 + .name = "irq_domain_add_linear",
90703 + .file = "include/linux/irqdomain.h",
90704 + .param2 = 1,
90705 +};
90706 +struct size_overflow_hash _001792_hash = {
90707 + .next = NULL,
90708 + .name = "kmalloc_array",
90709 + .file = "include/linux/slab.h",
90710 + .param1 = 1,
90711 + .param2 = 1,
90712 +};
90713 +struct size_overflow_hash _001794_hash = {
90714 + .next = NULL,
90715 + .name = "nfc_llcp_send_i_frame",
90716 + .file = "net/nfc/llcp/commands.c",
90717 + .param3 = 1,
90718 +};
90719 +struct size_overflow_hash _001797_hash = {
90720 + .next = NULL,
90721 + .name = "pn533_dep_link_up",
90722 + .file = "drivers/nfc/pn533.c",
90723 + .param5 = 1,
90724 +};
90725 +struct size_overflow_hash _001798_hash = {
90726 + .next = NULL,
90727 + .name = "port_show_regs",
90728 + .file = "drivers/tty/serial/pch_uart.c",
90729 + .param3 = 1,
90730 +};
90731 +struct size_overflow_hash _001799_hash = {
90732 + .next = NULL,
90733 + .name = "qla4xxx_alloc_work",
90734 + .file = "drivers/scsi/qla4xxx/ql4_os.c",
90735 + .param2 = 1,
90736 +};
90737 +struct size_overflow_hash _001800_hash = {
90738 + .next = NULL,
90739 + .name = "rbd_add",
90740 + .file = "drivers/block/rbd.c",
90741 + .param3 = 1,
90742 +};
90743 +struct size_overflow_hash _001801_hash = {
90744 + .next = NULL,
90745 + .name = "read_file_reset",
90746 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
90747 + .param3 = 1,
90748 +};
90749 +struct size_overflow_hash _001802_hash = {
90750 + .next = NULL,
90751 + .name = "regmap_bulk_write",
90752 + .file = "include/linux/regmap.h",
90753 + .param4 = 1,
90754 +};
90755 +struct size_overflow_hash _001803_hash = {
90756 + .next = NULL,
90757 + .name = "regmap_name_read_file",
90758 + .file = "drivers/base/regmap/regmap-debugfs.c",
90759 + .param3 = 1,
90760 +};
90761 +struct size_overflow_hash _001804_hash = {
90762 + .next = NULL,
90763 + .name = "reiserfs_allocate_list_bitmaps",
90764 + .file = "fs/reiserfs/journal.c",
90765 + .param3 = 1,
90766 +};
90767 +struct size_overflow_hash _001805_hash = {
90768 + .next = NULL,
90769 + .name = "reiserfs_resize",
90770 + .file = "fs/reiserfs/resize.c",
90771 + .param2 = 1,
90772 +};
90773 +struct size_overflow_hash _001806_hash = {
90774 + .next = NULL,
90775 + .name = "remove_uuid",
90776 + .file = "net/bluetooth/mgmt.c",
90777 + .param4 = 1,
90778 +};
90779 +struct size_overflow_hash _001807_hash = {
90780 + .next = NULL,
90781 + .name = "set_dev_class",
90782 + .file = "net/bluetooth/mgmt.c",
90783 + .param4 = 1,
90784 +};
90785 +struct size_overflow_hash _001808_hash = {
90786 + .next = NULL,
90787 + .name = "set_le",
90788 + .file = "net/bluetooth/mgmt.c",
90789 + .param4 = 1,
90790 +};
90791 +struct size_overflow_hash _001809_hash = {
90792 + .next = NULL,
90793 + .name = "set_link_security",
90794 + .file = "net/bluetooth/mgmt.c",
90795 + .param4 = 1,
90796 +};
90797 +struct size_overflow_hash _001810_hash = {
90798 + .next = NULL,
90799 + .name = "set_ssp",
90800 + .file = "net/bluetooth/mgmt.c",
90801 + .param4 = 1,
90802 +};
90803 +struct size_overflow_hash _001811_hash = {
90804 + .next = NULL,
90805 + .name = "shmem_setxattr",
90806 + .file = "mm/shmem.c",
90807 + .param4 = 1,
90808 +};
90809 +struct size_overflow_hash _001812_hash = {
90810 + .next = NULL,
90811 + .name = "shmem_xattr_alloc",
90812 + .file = "mm/shmem.c",
90813 + .param2 = 1,
90814 +};
90815 +struct size_overflow_hash _001813_hash = {
90816 + .next = NULL,
90817 + .name = "split_scan_timeout_read",
90818 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90819 + .param3 = 1,
90820 +};
90821 +struct size_overflow_hash _001814_hash = {
90822 + .next = NULL,
90823 + .name = "storvsc_connect_to_vsp",
90824 + .file = "drivers/scsi/storvsc_drv.c",
90825 + .param2 = 1,
90826 +};
90827 +struct size_overflow_hash _001815_hash = {
90828 + .next = NULL,
90829 + .name = "suspend_dtim_interval_read",
90830 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90831 + .param3 = 1,
90832 +};
90833 +struct size_overflow_hash _001816_hash = {
90834 + .next = NULL,
90835 + .name = "alloc_extent_buffer",
90836 + .file = "fs/btrfs/extent_io.c",
90837 + .param3 = 1,
90838 +};
90839 +struct size_overflow_hash _001817_hash = {
90840 + .next = NULL,
90841 + .name = "nfs_idmap_get_key",
90842 + .file = "fs/nfs/idmap.c",
90843 + .param2 = 1,
90844 +};
90845 +struct size_overflow_hash _001818_hash = {
90846 + .next = NULL,
90847 + .name = "iio_debugfs_read_reg",
90848 + .file = "drivers/staging/iio/industrialio-core.c",
90849 + .param3 = 1,
90850 +};
90851 +struct size_overflow_hash _001819_hash = {
90852 + .next = NULL,
90853 + .name = "iio_debugfs_write_reg",
90854 + .file = "drivers/staging/iio/industrialio-core.c",
90855 + .param3 = 1,
90856 +};
90857 +struct size_overflow_hash _001820_hash = {
90858 + .next = NULL,
90859 + .name = "iio_event_chrdev_read",
90860 + .file = "drivers/staging/iio/industrialio-event.c",
90861 + .param3 = 1,
90862 +};
90863 +struct size_overflow_hash _001821_hash = {
90864 + .next = NULL,
90865 + .name = "sep_create_dcb_dmatables_context",
90866 + .file = "drivers/staging/sep/sep_main.c",
90867 + .param6 = 1,
90868 +};
90869 +struct size_overflow_hash _001822_hash = {
90870 + .next = NULL,
90871 + .name = "sep_create_dcb_dmatables_context_kernel",
90872 + .file = "drivers/staging/sep/sep_main.c",
90873 + .param6 = 1,
90874 +};
90875 +struct size_overflow_hash _001823_hash = {
90876 + .next = NULL,
90877 + .name = "sep_create_msgarea_context",
90878 + .file = "drivers/staging/sep/sep_main.c",
90879 + .param4 = 1,
90880 +};
90881 +struct size_overflow_hash _001824_hash = {
90882 + .next = NULL,
90883 + .name = "sep_lli_table_secure_dma",
90884 + .file = "drivers/staging/sep/sep_main.c",
90885 + .param2 = 1,
90886 + .param3 = 1,
90887 +};
90888 +struct size_overflow_hash _001826_hash = {
90889 + .next = NULL,
90890 + .name = "sep_lock_user_pages",
90891 + .file = "drivers/staging/sep/sep_main.c",
90892 + .param2 = 1,
90893 + .param3 = 1,
90894 +};
90895 +struct size_overflow_hash _001828_hash = {
90896 + .next = NULL,
90897 + .name = "sep_prepare_input_output_dma_table_in_dcb",
90898 + .file = "drivers/staging/sep/sep_main.c",
90899 + .param4 = 1,
90900 + .param5 = 1,
90901 +};
90902 +struct size_overflow_hash _001830_hash = {
90903 + .next = NULL,
90904 + .name = "sep_read",
90905 + .file = "drivers/staging/sep/sep_main.c",
90906 + .param3 = 1,
90907 +};
90908 +struct size_overflow_hash _001831_hash = {
90909 + .next = NULL,
90910 + .name = "alloc_rx_desc_ring",
90911 + .file = "drivers/staging/rtl8187se/r8180_core.c",
90912 + .param2 = 1,
90913 +};
90914 +struct size_overflow_hash _001832_hash = {
90915 + .next = NULL,
90916 + .name = "alloc_subdevices",
90917 + .file = "drivers/staging/comedi/drivers/../comedidev.h",
90918 + .param2 = 1,
90919 +};
90920 +struct size_overflow_hash _001833_hash = {
90921 + .next = NULL,
90922 + .name = "alloc_subdevices",
90923 + .file = "drivers/staging/comedi/drivers/addi-data/../../comedidev.h",
90924 + .param2 = 1,
90925 +};
90926 +struct size_overflow_hash _001834_hash = {
90927 + .next = NULL,
90928 + .name = "comedi_read",
90929 + .file = "drivers/staging/comedi/comedi_fops.c",
90930 + .param3 = 1,
90931 +};
90932 +struct size_overflow_hash _001835_hash = {
90933 + .next = NULL,
90934 + .name = "comedi_write",
90935 + .file = "drivers/staging/comedi/comedi_fops.c",
90936 + .param3 = 1,
90937 +};
90938 +struct size_overflow_hash _001836_hash = {
90939 + .next = NULL,
90940 + .name = "compat_sys_preadv64",
90941 + .file = "fs/compat.c",
90942 + .param3 = 1,
90943 +};
90944 +struct size_overflow_hash _001837_hash = {
90945 + .next = NULL,
90946 + .name = "compat_sys_pwritev64",
90947 + .file = "fs/compat.c",
90948 + .param3 = 1,
90949 +};
90950 +struct size_overflow_hash _001838_hash = {
90951 + .next = NULL,
90952 + .name = "ext_sd_execute_read_data",
90953 + .file = "drivers/staging/rts5139/sd_cprm.c",
90954 + .param9 = 1,
90955 +};
90956 +struct size_overflow_hash _001839_hash = {
90957 + .next = NULL,
90958 + .name = "ext_sd_execute_write_data",
90959 + .file = "drivers/staging/rts5139/sd_cprm.c",
90960 + .param9 = 1,
90961 +};
90962 +struct size_overflow_hash _001840_hash = {
90963 + .next = NULL,
90964 + .name = "ieee80211_wx_set_gen_ie",
90965 + .file = "drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c",
90966 + .param3 = 1,
90967 +};
90968 +struct size_overflow_hash _001841_hash = {
90969 + .next = NULL,
90970 + .name = "ieee80211_wx_set_gen_ie_rsl",
90971 + .file = "drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c",
90972 + .param3 = 1,
90973 +};
90974 +struct size_overflow_hash _001842_hash = {
90975 + .next = NULL,
90976 + .name = "ni_gpct_device_construct",
90977 + .file = "drivers/staging/comedi/drivers/ni_tio.c",
90978 + .param5 = 1,
90979 +};
90980 +struct size_overflow_hash _001843_hash = {
90981 + .next = NULL,
90982 + .name = "Realloc",
90983 + .file = "drivers/staging/comedi/drivers/comedi_bond.c",
90984 + .param2 = 1,
90985 +};
90986 +struct size_overflow_hash _001844_hash = {
90987 + .next = NULL,
90988 + .name = "rtllib_wx_set_gen_ie",
90989 + .file = "drivers/staging/rtl8192e/rtllib_wx.c",
90990 + .param3 = 1,
90991 +};
90992 +struct size_overflow_hash _001845_hash = {
90993 + .next = NULL,
90994 + .name = "rts51x_transfer_data_partial",
90995 + .file = "drivers/staging/rts5139/rts51x_transport.c",
90996 + .param6 = 1,
90997 +};
90998 +struct size_overflow_hash _001846_hash = {
90999 + .next = NULL,
91000 + .name = "store_debug_level",
91001 + .file = "drivers/staging/rtl8192u/ieee80211/ieee80211_module.c",
91002 + .param3 = 1,
91003 +};
91004 +struct size_overflow_hash _001847_hash = {
91005 + .next = NULL,
91006 + .name = "usb_buffer_alloc",
91007 + .file = "drivers/staging/rts5139/rts51x.h",
91008 + .param2 = 1,
91009 +};
91010 +struct size_overflow_hash _001848_hash = {
91011 + .next = NULL,
91012 + .name = "alloc_apertures",
91013 + .file = "include/linux/fb.h",
91014 + .param1 = 1,
91015 +};
91016 +struct size_overflow_hash _001849_hash = {
91017 + .next = NULL,
91018 + .name = "bin_uuid",
91019 + .file = "kernel/sysctl_binary.c",
91020 + .param3 = 1,
91021 +};
91022 +struct size_overflow_hash _001850_hash = {
91023 + .next = &_000640_hash,
91024 + .name = "__copy_from_user_inatomic_nocache",
91025 + .file = "arch/x86/include/asm/uaccess_64.h",
91026 + .param3 = 1,
91027 +};
91028 +struct size_overflow_hash _001851_hash = {
91029 + .next = NULL,
91030 + .name = "do_dmabuf_dirty_sou",
91031 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
91032 + .param7 = 1,
91033 +};
91034 +struct size_overflow_hash _001852_hash = {
91035 + .next = NULL,
91036 + .name = "do_surface_dirty_sou",
91037 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
91038 + .param7 = 1,
91039 +};
91040 +struct size_overflow_hash _001853_hash = {
91041 + .next = NULL,
91042 + .name = "drm_agp_bind_pages",
91043 + .file = "drivers/gpu/drm/drm_agpsupport.c",
91044 + .param3 = 1,
91045 +};
91046 +struct size_overflow_hash _001854_hash = {
91047 + .next = NULL,
91048 + .name = "drm_calloc_large",
91049 + .file = "include/drm/drm_mem_util.h",
91050 + .param1 = 1,
91051 + .param2 = 1,
91052 +};
91053 +struct size_overflow_hash _001856_hash = {
91054 + .next = NULL,
91055 + .name = "drm_ht_create",
91056 + .file = "drivers/gpu/drm/drm_hashtab.c",
91057 + .param2 = 1,
91058 +};
91059 +struct size_overflow_hash _001857_hash = {
91060 + .next = NULL,
91061 + .name = "drm_malloc_ab",
91062 + .file = "include/drm/drm_mem_util.h",
91063 + .param1 = 1,
91064 + .param2 = 1,
91065 +};
91066 +struct size_overflow_hash _001859_hash = {
91067 + .next = NULL,
91068 + .name = "drm_plane_init",
91069 + .file = "drivers/gpu/drm/drm_crtc.c",
91070 + .param6 = 1,
91071 +};
91072 +struct size_overflow_hash _001860_hash = {
91073 + .next = NULL,
91074 + .name = "drm_vmalloc_dma",
91075 + .file = "drivers/gpu/drm/drm_scatter.c",
91076 + .param1 = 1,
91077 +};
91078 +struct size_overflow_hash _001861_hash = {
91079 + .next = NULL,
91080 + .name = "fb_read",
91081 + .file = "drivers/video/fbmem.c",
91082 + .param3 = 1,
91083 +};
91084 +struct size_overflow_hash _001862_hash = {
91085 + .next = NULL,
91086 + .name = "fb_write",
91087 + .file = "drivers/video/fbmem.c",
91088 + .param3 = 1,
91089 +};
91090 +struct size_overflow_hash _001863_hash = {
91091 + .next = NULL,
91092 + .name = "framebuffer_alloc",
91093 + .file = "include/linux/fb.h",
91094 + .param1 = 1,
91095 +};
91096 +struct size_overflow_hash _001864_hash = {
91097 + .next = NULL,
91098 + .name = "i915_cache_sharing_read",
91099 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91100 + .param3 = 1,
91101 +};
91102 +struct size_overflow_hash _001865_hash = {
91103 + .next = NULL,
91104 + .name = "i915_cache_sharing_write",
91105 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91106 + .param3 = 1,
91107 +};
91108 +struct size_overflow_hash _001866_hash = {
91109 + .next = NULL,
91110 + .name = "i915_max_freq_read",
91111 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91112 + .param3 = 1,
91113 +};
91114 +struct size_overflow_hash _001867_hash = {
91115 + .next = NULL,
91116 + .name = "i915_max_freq_write",
91117 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91118 + .param3 = 1,
91119 +};
91120 +struct size_overflow_hash _001868_hash = {
91121 + .next = NULL,
91122 + .name = "i915_wedged_read",
91123 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91124 + .param3 = 1,
91125 +};
91126 +struct size_overflow_hash _001869_hash = {
91127 + .next = NULL,
91128 + .name = "i915_wedged_write",
91129 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91130 + .param3 = 1,
91131 +};
91132 +struct size_overflow_hash _001870_hash = {
91133 + .next = NULL,
91134 + .name = "__module_alloc",
91135 + .file = "arch/x86/kernel/module.c",
91136 + .param1 = 1,
91137 +};
91138 +struct size_overflow_hash _001871_hash = {
91139 + .next = NULL,
91140 + .name = "module_alloc_update_bounds_rw",
91141 + .file = "kernel/module.c",
91142 + .param1 = 1,
91143 +};
91144 +struct size_overflow_hash _001872_hash = {
91145 + .next = NULL,
91146 + .name = "module_alloc_update_bounds_rx",
91147 + .file = "kernel/module.c",
91148 + .param1 = 1,
91149 +};
91150 +struct size_overflow_hash _001873_hash = {
91151 + .next = NULL,
91152 + .name = "p9_client_read",
91153 + .file = "include/net/9p/client.h",
91154 + .param5 = 1,
91155 +};
91156 +struct size_overflow_hash _001874_hash = {
91157 + .next = NULL,
91158 + .name = "probe_kernel_write",
91159 + .file = "include/linux/uaccess.h",
91160 + .param3 = 1,
91161 +};
91162 +struct size_overflow_hash _001875_hash = {
91163 + .next = NULL,
91164 + .name = "sched_feat_write",
91165 + .file = "kernel/sched/core.c",
91166 + .param3 = 1,
91167 +};
91168 +struct size_overflow_hash _001876_hash = {
91169 + .next = NULL,
91170 + .name = "tstats_write",
91171 + .file = "kernel/time/timer_stats.c",
91172 + .param3 = 1,
91173 +};
91174 +struct size_overflow_hash _001877_hash = {
91175 + .next = NULL,
91176 + .name = "ttm_bo_fbdev_io",
91177 + .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
91178 + .param4 = 1,
91179 +};
91180 +struct size_overflow_hash _001878_hash = {
91181 + .next = NULL,
91182 + .name = "ttm_bo_io",
91183 + .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
91184 + .param5 = 1,
91185 +};
91186 +struct size_overflow_hash _001879_hash = {
91187 + .next = NULL,
91188 + .name = "ttm_dma_page_pool_free",
91189 + .file = "drivers/gpu/drm/ttm/ttm_page_alloc_dma.c",
91190 + .param2 = 1,
91191 +};
91192 +struct size_overflow_hash _001880_hash = {
91193 + .next = NULL,
91194 + .name = "ttm_page_pool_free",
91195 + .file = "drivers/gpu/drm/ttm/ttm_page_alloc.c",
91196 + .param2 = 1,
91197 +};
91198 +struct size_overflow_hash _001881_hash = {
91199 + .next = NULL,
91200 + .name = "vmw_execbuf_process",
91201 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c",
91202 + .param5 = 1,
91203 +};
91204 +struct size_overflow_hash _001882_hash = {
91205 + .next = NULL,
91206 + .name = "vmw_fifo_reserve",
91207 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c",
91208 + .param2 = 1,
91209 +};
91210 +struct size_overflow_hash _001883_hash = {
91211 + .next = NULL,
91212 + .name = "vmw_kms_present",
91213 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
91214 + .param9 = 1,
91215 +};
91216 +struct size_overflow_hash _001884_hash = {
91217 + .next = NULL,
91218 + .name = "vmw_kms_readback",
91219 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
91220 + .param6 = 1,
91221 +};
91222 +struct size_overflow_hash _001885_hash = {
91223 + .next = NULL,
91224 + .name = "__copy_from_user_inatomic_nocache",
91225 + .file = "arch/x86/include/asm/uaccess_32.h",
91226 + .param3 = 1,
91227 +};
91228 +struct size_overflow_hash _001886_hash = {
91229 + .next = NULL,
91230 + .name = "arcfb_write",
91231 + .file = "drivers/video/arcfb.c",
91232 + .param3 = 1,
91233 +};
91234 +struct size_overflow_hash _001887_hash = {
91235 + .next = NULL,
91236 + .name = "ath6kl_usb_submit_ctrl_in",
91237 + .file = "drivers/net/wireless/ath/ath6kl/usb.c",
91238 + .param6 = 1,
91239 +};
91240 +struct size_overflow_hash _001888_hash = {
91241 + .next = NULL,
91242 + .name = "ath6kl_usb_submit_ctrl_out",
91243 + .file = "drivers/net/wireless/ath/ath6kl/usb.c",
91244 + .param6 = 1,
91245 +};
91246 +struct size_overflow_hash _001889_hash = {
91247 + .next = NULL,
91248 + .name = "blk_dropped_read",
91249 + .file = "kernel/trace/blktrace.c",
91250 + .param3 = 1,
91251 +};
91252 +struct size_overflow_hash _001890_hash = {
91253 + .next = NULL,
91254 + .name = "blk_msg_write",
91255 + .file = "kernel/trace/blktrace.c",
91256 + .param3 = 1,
91257 +};
91258 +struct size_overflow_hash _001891_hash = {
91259 + .next = NULL,
91260 + .name = "broadsheetfb_write",
91261 + .file = "drivers/video/broadsheetfb.c",
91262 + .param3 = 1,
91263 +};
91264 +struct size_overflow_hash _001892_hash = {
91265 + .next = NULL,
91266 + .name = "cyttsp_probe",
91267 + .file = "drivers/input/touchscreen/cyttsp_core.c",
91268 + .param4 = 1,
91269 +};
91270 +struct size_overflow_hash _001893_hash = {
91271 + .next = NULL,
91272 + .name = "da9052_group_write",
91273 + .file = "include/linux/mfd/da9052/da9052.h",
91274 + .param3 = 1,
91275 +};
91276 +struct size_overflow_hash _001894_hash = {
91277 + .next = NULL,
91278 + .name = "dccpprobe_read",
91279 + .file = "net/dccp/probe.c",
91280 + .param3 = 1,
91281 +};
91282 +struct size_overflow_hash _001895_hash = {
91283 + .next = NULL,
91284 + .name = "__devres_alloc",
91285 + .file = "include/linux/device.h",
91286 + .param2 = 1,
91287 +};
91288 +struct size_overflow_hash _001896_hash = {
91289 + .next = NULL,
91290 + .name = "event_enable_read",
91291 + .file = "kernel/trace/trace_events.c",
91292 + .param3 = 1,
91293 +};
91294 +struct size_overflow_hash _001897_hash = {
91295 + .next = NULL,
91296 + .name = "event_filter_read",
91297 + .file = "kernel/trace/trace_events.c",
91298 + .param3 = 1,
91299 +};
91300 +struct size_overflow_hash _001898_hash = {
91301 + .next = NULL,
91302 + .name = "event_filter_write",
91303 + .file = "kernel/trace/trace_events.c",
91304 + .param3 = 1,
91305 +};
91306 +struct size_overflow_hash _001899_hash = {
91307 + .next = NULL,
91308 + .name = "event_id_read",
91309 + .file = "kernel/trace/trace_events.c",
91310 + .param3 = 1,
91311 +};
91312 +struct size_overflow_hash _001900_hash = {
91313 + .next = NULL,
91314 + .name = "fb_sys_read",
91315 + .file = "include/linux/fb.h",
91316 + .param3 = 1,
91317 +};
91318 +struct size_overflow_hash _001901_hash = {
91319 + .next = NULL,
91320 + .name = "fb_sys_write",
91321 + .file = "include/linux/fb.h",
91322 + .param3 = 1,
91323 +};
91324 +struct size_overflow_hash _001902_hash = {
91325 + .next = NULL,
91326 + .name = "ftrace_pid_write",
91327 + .file = "kernel/trace/ftrace.c",
91328 + .param3 = 1,
91329 +};
91330 +struct size_overflow_hash _001903_hash = {
91331 + .next = NULL,
91332 + .name = "ftrace_profile_read",
91333 + .file = "kernel/trace/ftrace.c",
91334 + .param3 = 1,
91335 +};
91336 +struct size_overflow_hash _001904_hash = {
91337 + .next = NULL,
91338 + .name = "hecubafb_write",
91339 + .file = "drivers/video/hecubafb.c",
91340 + .param3 = 1,
91341 +};
91342 +struct size_overflow_hash _001905_hash = {
91343 + .next = NULL,
91344 + .name = "hsc_msg_alloc",
91345 + .file = "drivers/hsi/clients/hsi_char.c",
91346 + .param1 = 1,
91347 +};
91348 +struct size_overflow_hash _001906_hash = {
91349 + .next = NULL,
91350 + .name = "hsc_write",
91351 + .file = "drivers/hsi/clients/hsi_char.c",
91352 + .param3 = 1,
91353 +};
91354 +struct size_overflow_hash _001907_hash = {
91355 + .next = NULL,
91356 + .name = "hsi_alloc_controller",
91357 + .file = "include/linux/hsi/hsi.h",
91358 + .param1 = 1,
91359 +};
91360 +struct size_overflow_hash _001908_hash = {
91361 + .next = NULL,
91362 + .name = "hsi_register_board_info",
91363 + .file = "include/linux/hsi/hsi.h",
91364 + .param2 = 1,
91365 +};
91366 +struct size_overflow_hash _001909_hash = {
91367 + .next = NULL,
91368 + .name = "ivtvfb_write",
91369 + .file = "drivers/media/video/ivtv/ivtvfb.c",
91370 + .param3 = 1,
91371 +};
91372 +struct size_overflow_hash _001910_hash = {
91373 + .next = NULL,
91374 + .name = "metronomefb_write",
91375 + .file = "drivers/video/metronomefb.c",
91376 + .param3 = 1,
91377 +};
91378 +struct size_overflow_hash _001911_hash = {
91379 + .next = NULL,
91380 + .name = "odev_update",
91381 + .file = "drivers/video/via/viafbdev.c",
91382 + .param2 = 1,
91383 +};
91384 +struct size_overflow_hash _001912_hash = {
91385 + .next = NULL,
91386 + .name = "oz_add_farewell",
91387 + .file = "drivers/staging/ozwpan/ozproto.c",
91388 + .param5 = 1,
91389 +};
91390 +struct size_overflow_hash _001913_hash = {
91391 + .next = NULL,
91392 + .name = "oz_cdev_read",
91393 + .file = "drivers/staging/ozwpan/ozcdev.c",
91394 + .param3 = 1,
91395 +};
91396 +struct size_overflow_hash _001914_hash = {
91397 + .next = NULL,
91398 + .name = "oz_cdev_write",
91399 + .file = "drivers/staging/ozwpan/ozcdev.c",
91400 + .param3 = 1,
91401 +};
91402 +struct size_overflow_hash _001915_hash = {
91403 + .next = NULL,
91404 + .name = "pmcraid_copy_sglist",
91405 + .file = "drivers/scsi/pmcraid.c",
91406 + .param3 = 1,
91407 +};
91408 +struct size_overflow_hash _001916_hash = {
91409 + .next = NULL,
91410 + .name = "probes_write",
91411 + .file = "kernel/trace/trace_kprobe.c",
91412 + .param3 = 1,
91413 +};
91414 +struct size_overflow_hash _001917_hash = {
91415 + .next = NULL,
91416 + .name = "proc_fault_inject_read",
91417 + .file = "fs/proc/base.c",
91418 + .param3 = 1,
91419 +};
91420 +struct size_overflow_hash _001918_hash = {
91421 + .next = NULL,
91422 + .name = "proc_fault_inject_write",
91423 + .file = "fs/proc/base.c",
91424 + .param3 = 1,
91425 +};
91426 +struct size_overflow_hash _001919_hash = {
91427 + .next = NULL,
91428 + .name = "rb_simple_read",
91429 + .file = "kernel/trace/trace.c",
91430 + .param3 = 1,
91431 +};
91432 +struct size_overflow_hash _001920_hash = {
91433 + .next = NULL,
91434 + .name = "read_file_dfs",
91435 + .file = "drivers/net/wireless/ath/ath9k/dfs_debug.c",
91436 + .param3 = 1,
91437 +};
91438 +struct size_overflow_hash _001921_hash = {
91439 + .next = NULL,
91440 + .name = "show_header",
91441 + .file = "kernel/trace/trace_events.c",
91442 + .param3 = 1,
91443 +};
91444 +struct size_overflow_hash _001922_hash = {
91445 + .next = NULL,
91446 + .name = "stack_max_size_read",
91447 + .file = "kernel/trace/trace_stack.c",
91448 + .param3 = 1,
91449 +};
91450 +struct size_overflow_hash _001923_hash = {
91451 + .next = NULL,
91452 + .name = "subsystem_filter_read",
91453 + .file = "kernel/trace/trace_events.c",
91454 + .param3 = 1,
91455 +};
91456 +struct size_overflow_hash _001924_hash = {
91457 + .next = NULL,
91458 + .name = "subsystem_filter_write",
91459 + .file = "kernel/trace/trace_events.c",
91460 + .param3 = 1,
91461 +};
91462 +struct size_overflow_hash _001925_hash = {
91463 + .next = NULL,
91464 + .name = "system_enable_read",
91465 + .file = "kernel/trace/trace_events.c",
91466 + .param3 = 1,
91467 +};
91468 +struct size_overflow_hash _001926_hash = {
91469 + .next = NULL,
91470 + .name = "trace_options_core_read",
91471 + .file = "kernel/trace/trace.c",
91472 + .param3 = 1,
91473 +};
91474 +struct size_overflow_hash _001927_hash = {
91475 + .next = NULL,
91476 + .name = "trace_options_read",
91477 + .file = "kernel/trace/trace.c",
91478 + .param3 = 1,
91479 +};
91480 +struct size_overflow_hash _001928_hash = {
91481 + .next = NULL,
91482 + .name = "trace_seq_to_user",
91483 + .file = "include/linux/trace_seq.h",
91484 + .param3 = 1,
91485 +};
91486 +struct size_overflow_hash _001929_hash = {
91487 + .next = NULL,
91488 + .name = "tracing_buffers_read",
91489 + .file = "kernel/trace/trace.c",
91490 + .param3 = 1,
91491 +};
91492 +struct size_overflow_hash _001930_hash = {
91493 + .next = NULL,
91494 + .name = "tracing_clock_write",
91495 + .file = "kernel/trace/trace.c",
91496 + .param3 = 1,
91497 +};
91498 +struct size_overflow_hash _001931_hash = {
91499 + .next = NULL,
91500 + .name = "tracing_cpumask_read",
91501 + .file = "kernel/trace/trace.c",
91502 + .param3 = 1,
91503 +};
91504 +struct size_overflow_hash _001932_hash = {
91505 + .next = NULL,
91506 + .name = "tracing_ctrl_read",
91507 + .file = "kernel/trace/trace.c",
91508 + .param3 = 1,
91509 +};
91510 +struct size_overflow_hash _001933_hash = {
91511 + .next = NULL,
91512 + .name = "tracing_entries_read",
91513 + .file = "kernel/trace/trace.c",
91514 + .param3 = 1,
91515 +};
91516 +struct size_overflow_hash _001934_hash = {
91517 + .next = NULL,
91518 + .name = "tracing_max_lat_read",
91519 + .file = "kernel/trace/trace.c",
91520 + .param3 = 1,
91521 +};
91522 +struct size_overflow_hash _001935_hash = {
91523 + .next = NULL,
91524 + .name = "tracing_read_dyn_info",
91525 + .file = "kernel/trace/trace.c",
91526 + .param3 = 1,
91527 +};
91528 +struct size_overflow_hash _001936_hash = {
91529 + .next = NULL,
91530 + .name = "tracing_readme_read",
91531 + .file = "kernel/trace/trace.c",
91532 + .param3 = 1,
91533 +};
91534 +struct size_overflow_hash _001937_hash = {
91535 + .next = NULL,
91536 + .name = "tracing_saved_cmdlines_read",
91537 + .file = "kernel/trace/trace.c",
91538 + .param3 = 1,
91539 +};
91540 +struct size_overflow_hash _001938_hash = {
91541 + .next = NULL,
91542 + .name = "tracing_set_trace_read",
91543 + .file = "kernel/trace/trace.c",
91544 + .param3 = 1,
91545 +};
91546 +struct size_overflow_hash _001939_hash = {
91547 + .next = NULL,
91548 + .name = "tracing_set_trace_write",
91549 + .file = "kernel/trace/trace.c",
91550 + .param3 = 1,
91551 +};
91552 +struct size_overflow_hash _001940_hash = {
91553 + .next = NULL,
91554 + .name = "tracing_stats_read",
91555 + .file = "kernel/trace/trace.c",
91556 + .param3 = 1,
91557 +};
91558 +struct size_overflow_hash _001941_hash = {
91559 + .next = NULL,
91560 + .name = "tracing_total_entries_read",
91561 + .file = "kernel/trace/trace.c",
91562 + .param3 = 1,
91563 +};
91564 +struct size_overflow_hash _001942_hash = {
91565 + .next = NULL,
91566 + .name = "tracing_trace_options_write",
91567 + .file = "kernel/trace/trace.c",
91568 + .param3 = 1,
91569 +};
91570 +struct size_overflow_hash _001943_hash = {
91571 + .next = NULL,
91572 + .name = "ufx_alloc_urb_list",
91573 + .file = "drivers/video/smscufx.c",
91574 + .param3 = 1,
91575 +};
91576 +struct size_overflow_hash _001944_hash = {
91577 + .next = NULL,
91578 + .name = "u_memcpya",
91579 + .file = "drivers/gpu/drm/nouveau/nouveau_gem.c",
91580 + .param2 = 1,
91581 + .param3 = 1,
91582 +};
91583 +struct size_overflow_hash _001946_hash = {
91584 + .next = NULL,
91585 + .name = "v9fs_fid_readn",
91586 + .file = "fs/9p/vfs_file.c",
91587 + .param4 = 1,
91588 +};
91589 +struct size_overflow_hash _001947_hash = {
91590 + .next = NULL,
91591 + .name = "v9fs_file_read",
91592 + .file = "fs/9p/vfs_file.c",
91593 + .param3 = 1,
91594 +};
91595 +struct size_overflow_hash _001948_hash = {
91596 + .next = NULL,
91597 + .name = "viafb_dfph_proc_write",
91598 + .file = "drivers/video/via/viafbdev.c",
91599 + .param3 = 1,
91600 +};
91601 +struct size_overflow_hash _001949_hash = {
91602 + .next = NULL,
91603 + .name = "viafb_dfpl_proc_write",
91604 + .file = "drivers/video/via/viafbdev.c",
91605 + .param3 = 1,
91606 +};
91607 +struct size_overflow_hash _001950_hash = {
91608 + .next = NULL,
91609 + .name = "viafb_dvp0_proc_write",
91610 + .file = "drivers/video/via/viafbdev.c",
91611 + .param3 = 1,
91612 +};
91613 +struct size_overflow_hash _001951_hash = {
91614 + .next = NULL,
91615 + .name = "viafb_dvp1_proc_write",
91616 + .file = "drivers/video/via/viafbdev.c",
91617 + .param3 = 1,
91618 +};
91619 +struct size_overflow_hash _001952_hash = {
91620 + .next = NULL,
91621 + .name = "viafb_vt1636_proc_write",
91622 + .file = "drivers/video/via/viafbdev.c",
91623 + .param3 = 1,
91624 +};
91625 +struct size_overflow_hash _001953_hash = {
91626 + .next = NULL,
91627 + .name = "vivi_read",
91628 + .file = "drivers/media/video/vivi.c",
91629 + .param3 = 1,
91630 +};
91631 +struct size_overflow_hash *size_overflow_hash[65536] = {
91632 + [56878] = &_000001_hash,
91633 + [11151] = &_000002_hash,
91634 + [17854] = &_000003_hash,
91635 + [4132] = &_000004_hash,
91636 + [39070] = &_000005_hash,
91637 + [35447] = &_000007_hash,
91638 + [47830] = &_000008_hash,
91639 + [65254] = &_000009_hash,
91640 + [17521] = &_000011_hash,
91641 + [41425] = &_000012_hash,
91642 + [5785] = &_000013_hash,
91643 + [19960] = &_000014_hash,
91644 + [26729] = &_000015_hash,
91645 + [7954] = &_000016_hash,
91646 + [22403] = &_000017_hash,
91647 + [23258] = &_000018_hash,
91648 + [55695] = &_000019_hash,
91649 + [38964] = &_000020_hash,
91650 + [64250] = &_000021_hash,
91651 + [31825] = &_000022_hash,
91652 + [47446] = &_000023_hash,
91653 + [61521] = &_000024_hash,
91654 + [64227] = &_000025_hash,
91655 + [53378] = &_000026_hash,
91656 + [8885] = &_000027_hash,
91657 + [62101] = &_000028_hash,
91658 + [18152] = &_000029_hash,
91659 + [37525] = &_000030_hash,
91660 + [25827] = &_000031_hash,
91661 + [1169] = &_000032_hash,
91662 + [11925] = &_000033_hash,
91663 + [20558] = &_000034_hash,
91664 + [44019] = &_000035_hash,
91665 + [21909] = &_000036_hash,
91666 + [63679] = &_000037_hash,
91667 + [39450] = &_000038_hash,
91668 + [25085] = &_000039_hash,
91669 + [17830] = &_000040_hash,
91670 + [14329] = &_000041_hash,
91671 + [31235] = &_000042_hash,
91672 + [48207] = &_000043_hash,
91673 + [34918] = &_000044_hash,
91674 + [46839] = &_000045_hash,
91675 + [57930] = &_000046_hash,
91676 + [41364] = &_000047_hash,
91677 + [17581] = &_000048_hash,
91678 + [45922] = &_000049_hash,
91679 + [49567] = &_000050_hash,
91680 + [18248] = &_000051_hash,
91681 + [25528] = &_000052_hash,
91682 + [61874] = &_000053_hash,
91683 + [22591] = &_000054_hash,
91684 + [48456] = &_000055_hash,
91685 + [8743] = &_000056_hash,
91686 + [39131] = &_000057_hash,
91687 + [48328] = &_000058_hash,
91688 + [47136] = &_000059_hash,
91689 + [6358] = &_000060_hash,
91690 + [12252] = &_000061_hash,
91691 + [49340] = &_000062_hash,
91692 + [45875] = &_000063_hash,
91693 + [52182] = &_000065_hash,
91694 + [31149] = &_000067_hash,
91695 + [20455] = &_000068_hash,
91696 + [19917] = &_000070_hash,
91697 + [64771] = &_000071_hash,
91698 + [25140] = &_000072_hash,
91699 + [34097] = &_000073_hash,
91700 + [58131] = &_000074_hash,
91701 + [65311] = &_000075_hash,
91702 + [60609] = &_000076_hash,
91703 + [1917] = &_000077_hash,
91704 + [15337] = &_000078_hash,
91705 + [4732] = &_000079_hash,
91706 + [38783] = &_000080_hash,
91707 + [37249] = &_000081_hash,
91708 + [9234] = &_000082_hash,
91709 + [33309] = &_000083_hash,
91710 + [22389] = &_000084_hash,
91711 + [56319] = &_000085_hash,
91712 + [21496] = &_000086_hash,
91713 + [8163] = &_000087_hash,
91714 + [58766] = &_000088_hash,
91715 + [21048] = &_000089_hash,
91716 + [51221] = &_000090_hash,
91717 + [21498] = &_000091_hash,
91718 + [42627] = &_000092_hash,
91719 + [53059] = &_000094_hash,
91720 + [52870] = &_000095_hash,
91721 + [1567] = &_000096_hash,
91722 + [38330] = &_000097_hash,
91723 + [30892] = &_000098_hash,
91724 + [16927] = &_000099_hash,
91725 + [16461] = &_000100_hash,
91726 + [5634] = &_000101_hash,
91727 + [16496] = &_000103_hash,
91728 + [40012] = &_000104_hash,
91729 + [46014] = &_000105_hash,
91730 + [39600] = &_000106_hash,
91731 + [7435] = &_000107_hash,
91732 + [13332] = &_000109_hash,
91733 + [36665] = &_000110_hash,
91734 + [12413] = &_000111_hash,
91735 + [27279] = &_000112_hash,
91736 + [44774] = &_000113_hash,
91737 + [14479] = &_000114_hash,
91738 + [32447] = &_000115_hash,
91739 + [15439] = &_000116_hash,
91740 + [17932] = &_000117_hash,
91741 + [26096] = &_000118_hash,
91742 + [50814] = &_000119_hash,
91743 + [22598] = &_000120_hash,
91744 + [48287] = &_000121_hash,
91745 + [15611] = &_000122_hash,
91746 + [13414] = &_000123_hash,
91747 + [40371] = &_000124_hash,
91748 + [284] = &_000125_hash,
91749 + [6293] = &_000127_hash,
91750 + [60587] = &_000128_hash,
91751 + [8181] = &_000129_hash,
91752 + [27451] = &_000130_hash,
91753 + [29259] = &_000131_hash,
91754 + [41172] = &_000132_hash,
91755 + [3315] = &_000133_hash,
91756 + [37550] = &_000134_hash,
91757 + [40395] = &_000135_hash,
91758 + [24124] = &_000136_hash,
91759 + [63535] = &_000137_hash,
91760 + [14981] = &_000138_hash,
91761 + [52008] = &_000139_hash,
91762 + [22091] = &_000140_hash,
91763 + [64800] = &_000141_hash,
91764 + [14919] = &_000142_hash,
91765 + [60340] = &_000143_hash,
91766 + [34205] = &_000145_hash,
91767 + [65246] = &_000146_hash,
91768 + [1299] = &_000147_hash,
91769 + [33165] = &_000148_hash,
91770 + [22394] = &_000149_hash,
91771 + [49562] = &_000150_hash,
91772 + [56881] = &_000151_hash,
91773 + [13870] = &_000152_hash,
91774 + [65074] = &_000153_hash,
91775 + [11553] = &_000154_hash,
91776 + [43222] = &_000155_hash,
91777 + [17984] = &_000156_hash,
91778 + [26811] = &_000157_hash,
91779 + [30848] = &_000158_hash,
91780 + [15627] = &_000159_hash,
91781 + [43101] = &_000160_hash,
91782 + [4082] = &_000161_hash,
91783 + [43692] = &_000162_hash,
91784 + [21622] = &_000163_hash,
91785 + [50734] = &_000164_hash,
91786 + [803] = &_000166_hash,
91787 + [64674] = &_000168_hash,
91788 + [57538] = &_000170_hash,
91789 + [42442] = &_000171_hash,
91790 + [23031] = &_000172_hash,
91791 + [40663] = &_000173_hash,
91792 + [51180] = &_000174_hash,
91793 + [24173] = &_000175_hash,
91794 + [9286] = &_000176_hash,
91795 + [49517] = &_000177_hash,
91796 + [34878] = &_000180_hash,
91797 + [22819] = &_000181_hash,
91798 + [64314] = &_000182_hash,
91799 + [20494] = &_000183_hash,
91800 + [9483] = &_000184_hash,
91801 + [26518] = &_000185_hash,
91802 + [44651] = &_000186_hash,
91803 + [1188] = &_000187_hash,
91804 + [36031] = &_000188_hash,
91805 + [33469] = &_000189_hash,
91806 + [19672] = &_000190_hash,
91807 + [3216] = &_000191_hash,
91808 + [25071] = &_000192_hash,
91809 + [11744] = &_000194_hash,
91810 + [2358] = &_000196_hash,
91811 + [10146] = &_000198_hash,
91812 + [58709] = &_000199_hash,
91813 + [64773] = &_000200_hash,
91814 + [6159] = &_000201_hash,
91815 + [28617] = &_000202_hash,
91816 + [61067] = &_000203_hash,
91817 + [12884] = &_000204_hash,
91818 + [37308] = &_000205_hash,
91819 + [59973] = &_000206_hash,
91820 + [35895] = &_000207_hash,
91821 + [24951] = &_000208_hash,
91822 + [3070] = &_000209_hash,
91823 + [61023] = &_000210_hash,
91824 + [45702] = &_000211_hash,
91825 + [5533] = &_000212_hash,
91826 + [29186] = &_000213_hash,
91827 + [26311] = &_000214_hash,
91828 + [40182] = &_000215_hash,
91829 + [50505] = &_000216_hash,
91830 + [59061] = &_000217_hash,
91831 + [27511] = &_000218_hash,
91832 + [63286] = &_000219_hash,
91833 + [6678] = &_000220_hash,
91834 + [23065] = &_000222_hash,
91835 + [18156] = &_000223_hash,
91836 + [53757] = &_000224_hash,
91837 + [53720] = &_000225_hash,
91838 + [50241] = &_000226_hash,
91839 + [22498] = &_000227_hash,
91840 + [10991] = &_000228_hash,
91841 + [40026] = &_000229_hash,
91842 + [19995] = &_000230_hash,
91843 + [30445] = &_000231_hash,
91844 + [57691] = &_000232_hash,
91845 + [23150] = &_000233_hash,
91846 + [9960] = &_000234_hash,
91847 + [8736] = &_000235_hash,
91848 + [23750] = &_000237_hash,
91849 + [18393] = &_000238_hash,
91850 + [28541] = &_000240_hash,
91851 + [59944] = &_000241_hash,
91852 + [35042] = &_000242_hash,
91853 + [63488] = &_000243_hash,
91854 + [27286] = &_000244_hash,
91855 + [46922] = &_000245_hash,
91856 + [11860] = &_000246_hash,
91857 + [52928] = &_000247_hash,
91858 + [46714] = &_000248_hash,
91859 + [57313] = &_000249_hash,
91860 + [61978] = &_000250_hash,
91861 + [61063] = &_000251_hash,
91862 + [22271] = &_000252_hash,
91863 + [4214] = &_000253_hash,
91864 + [46247] = &_000254_hash,
91865 + [33246] = &_000255_hash,
91866 + [58325] = &_000257_hash,
91867 + [47399] = &_000259_hash,
91868 + [34963] = &_000260_hash,
91869 + [21221] = &_000261_hash,
91870 + [32211] = &_000262_hash,
91871 + [20854] = &_000263_hash,
91872 + [49351] = &_000264_hash,
91873 + [52341] = &_000265_hash,
91874 + [53533] = &_000266_hash,
91875 + [52267] = &_000267_hash,
91876 + [46753] = &_000268_hash,
91877 + [2115] = &_000269_hash,
91878 + [44017] = &_000271_hash,
91879 + [13495] = &_000272_hash,
91880 + [12988] = &_000273_hash,
91881 + [55227] = &_000274_hash,
91882 + [47762] = &_000276_hash,
91883 + [17613] = &_000277_hash,
91884 + [52037] = &_000278_hash,
91885 + [5994] = &_000279_hash,
91886 + [46818] = &_000280_hash,
91887 + [13467] = &_000281_hash,
91888 + [61848] = &_000282_hash,
91889 + [43082] = &_000284_hash,
91890 + [55732] = &_000286_hash,
91891 + [2543] = &_000287_hash,
91892 + [51694] = &_000288_hash,
91893 + [18402] = &_000289_hash,
91894 + [38282] = &_000290_hash,
91895 + [5456] = &_000291_hash,
91896 + [58261] = &_000292_hash,
91897 + [24792] = &_000293_hash,
91898 + [6422] = &_000294_hash,
91899 + [63953] = &_000295_hash,
91900 + [27384] = &_000296_hash,
91901 + [47213] = &_000297_hash,
91902 + [23548] = &_000298_hash,
91903 + [47858] = &_000299_hash,
91904 + [52501] = &_000300_hash,
91905 + [12475] = &_000301_hash,
91906 + [52921] = &_000302_hash,
91907 + [19120] = &_000303_hash,
91908 + [14355] = &_000304_hash,
91909 + [30563] = &_000305_hash,
91910 + [14942] = &_000306_hash,
91911 + [30969] = &_000307_hash,
91912 + [57776] = &_000308_hash,
91913 + [21956] = &_000309_hash,
91914 + [44050] = &_000310_hash,
91915 + [2193] = &_000311_hash,
91916 + [44818] = &_000312_hash,
91917 + [50616] = &_000313_hash,
91918 + [49299] = &_000314_hash,
91919 + [2796] = &_000315_hash,
91920 + [4190] = &_000316_hash,
91921 + [11548] = &_000317_hash,
91922 + [53798] = &_000318_hash,
91923 + [60370] = &_000319_hash,
91924 + [35863] = &_000320_hash,
91925 + [54595] = &_000322_hash,
91926 + [2808] = &_000323_hash,
91927 + [24656] = &_000324_hash,
91928 + [895] = &_000325_hash,
91929 + [32809] = &_000326_hash,
91930 + [55621] = &_000327_hash,
91931 + [1733] = &_000328_hash,
91932 + [36069] = &_000330_hash,
91933 + [23714] = &_000331_hash,
91934 + [26020] = &_000332_hash,
91935 + [63875] = &_000333_hash,
91936 + [8919] = &_000335_hash,
91937 + [23906] = &_000336_hash,
91938 + [59497] = &_000337_hash,
91939 + [34782] = &_000338_hash,
91940 + [40998] = &_000339_hash,
91941 + [33328] = &_000340_hash,
91942 + [17866] = &_000341_hash,
91943 + [38741] = &_000342_hash,
91944 + [53939] = &_000343_hash,
91945 + [14658] = &_000344_hash,
91946 + [42465] = &_000345_hash,
91947 + [49600] = &_000346_hash,
91948 + [7391] = &_000347_hash,
91949 + [43616] = &_000348_hash,
91950 + [16775] = &_000349_hash,
91951 + [41393] = &_000350_hash,
91952 + [10532] = &_000351_hash,
91953 + [50366] = &_000352_hash,
91954 + [33324] = &_000353_hash,
91955 + [38200] = &_000354_hash,
91956 + [59315] = &_000355_hash,
91957 + [33916] = &_000356_hash,
91958 + [36593] = &_000357_hash,
91959 + [63079] = &_000358_hash,
91960 + [379] = &_000359_hash,
91961 + [34248] = &_000360_hash,
91962 + [27251] = &_000361_hash,
91963 + [29460] = &_000362_hash,
91964 + [7461] = &_000363_hash,
91965 + [9870] = &_000364_hash,
91966 + [44596] = &_000365_hash,
91967 + [45157] = &_000366_hash,
91968 + [55069] = &_000367_hash,
91969 + [29452] = &_000368_hash,
91970 + [54888] = &_000369_hash,
91971 + [31885] = &_000370_hash,
91972 + [20206] = &_000371_hash,
91973 + [20325] = &_000373_hash,
91974 + [18488] = &_000374_hash,
91975 + [22017] = &_000375_hash,
91976 + [57485] = &_000376_hash,
91977 + [49827] = &_000377_hash,
91978 + [37770] = &_000379_hash,
91979 + [52668] = &_000380_hash,
91980 + [13724] = &_000381_hash,
91981 + [59701] = &_000382_hash,
91982 + [11954] = &_000383_hash,
91983 + [9890] = &_000384_hash,
91984 + [17684] = &_000385_hash,
91985 + [18158] = &_000386_hash,
91986 + [61318] = &_000387_hash,
91987 + [2760] = &_000388_hash,
91988 + [38444] = &_000390_hash,
91989 + [55856] = &_000392_hash,
91990 + [34762] = &_000393_hash,
91991 + [48360] = &_000394_hash,
91992 + [40885] = &_000395_hash,
91993 + [36032] = &_000396_hash,
91994 + [52057] = &_000397_hash,
91995 + [12463] = &_000398_hash,
91996 + [30616] = &_000399_hash,
91997 + [38680] = &_000400_hash,
91998 + [41742] = &_000401_hash,
91999 + [50662] = &_000402_hash,
92000 + [48440] = &_000403_hash,
92001 + [34418] = &_000404_hash,
92002 + [64275] = &_000405_hash,
92003 + [12231] = &_000406_hash,
92004 + [53530] = &_000407_hash,
92005 + [54723] = &_000408_hash,
92006 + [19490] = &_000409_hash,
92007 + [11595] = &_000410_hash,
92008 + [15277] = &_000411_hash,
92009 + [4811] = &_000412_hash,
92010 + [42017] = &_000413_hash,
92011 + [17238] = &_000414_hash,
92012 + [55439] = &_000415_hash,
92013 + [45794] = &_000416_hash,
92014 + [60027] = &_000417_hash,
92015 + [3750] = &_000418_hash,
92016 + [11091] = &_000419_hash,
92017 + [32935] = &_000420_hash,
92018 + [22809] = &_000422_hash,
92019 + [60193] = &_000423_hash,
92020 + [14396] = &_000424_hash,
92021 + [18101] = &_000425_hash,
92022 + [46395] = &_000426_hash,
92023 + [24339] = &_000427_hash,
92024 + [26065] = &_000428_hash,
92025 + [43016] = &_000429_hash,
92026 + [41996] = &_000430_hash,
92027 + [7371] = &_000431_hash,
92028 + [32968] = &_000432_hash,
92029 + [53082] = &_000433_hash,
92030 + [38798] = &_000434_hash,
92031 + [12726] = &_000435_hash,
92032 + [55018] = &_000436_hash,
92033 + [26114] = &_000437_hash,
92034 + [31697] = &_000438_hash,
92035 + [21401] = &_000441_hash,
92036 + [33193] = &_000442_hash,
92037 + [52271] = &_000443_hash,
92038 + [20847] = &_000444_hash,
92039 + [30754] = &_000445_hash,
92040 + [54440] = &_000446_hash,
92041 + [22059] = &_000447_hash,
92042 + [47566] = &_000448_hash,
92043 + [22926] = &_000449_hash,
92044 + [20788] = &_000450_hash,
92045 + [18162] = &_000451_hash,
92046 + [65006] = &_000452_hash,
92047 + [11523] = &_000453_hash,
92048 + [29207] = &_000454_hash,
92049 + [18071] = &_000455_hash,
92050 + [7601] = &_000456_hash,
92051 + [12773] = &_000457_hash,
92052 + [61543] = &_000458_hash,
92053 + [5578] = &_000460_hash,
92054 + [49050] = &_000461_hash,
92055 + [51965] = &_000462_hash,
92056 + [6807] = &_000463_hash,
92057 + [22982] = &_000464_hash,
92058 + [36769] = &_000465_hash,
92059 + [53892] = &_000466_hash,
92060 + [2547] = &_000467_hash,
92061 + [53678] = &_000468_hash,
92062 + [61439] = &_000469_hash,
92063 + [31287] = &_000470_hash,
92064 + [6125] = &_000471_hash,
92065 + [57511] = &_000472_hash,
92066 + [13001] = &_000473_hash,
92067 + [62932] = &_000474_hash,
92068 + [62284] = &_000475_hash,
92069 + [9472] = &_000476_hash,
92070 + [26260] = &_000477_hash,
92071 + [63065] = &_000478_hash,
92072 + [18949] = &_000479_hash,
92073 + [29891] = &_000481_hash,
92074 + [41916] = &_000482_hash,
92075 + [40474] = &_000483_hash,
92076 + [63551] = &_000484_hash,
92077 + [36557] = &_000485_hash,
92078 + [2994] = &_000486_hash,
92079 + [5521] = &_000487_hash,
92080 + [51016] = &_000488_hash,
92081 + [7644] = &_000489_hash,
92082 + [55103] = &_000490_hash,
92083 + [11488] = &_000491_hash,
92084 + [7184] = &_000492_hash,
92085 + [36934] = &_000493_hash,
92086 + [54855] = &_000494_hash,
92087 + [63193] = &_000495_hash,
92088 + [12369] = &_000496_hash,
92089 + [15828] = &_000497_hash,
92090 + [61322] = &_000498_hash,
92091 + [5412] = &_000499_hash,
92092 + [28089] = &_000500_hash,
92093 + [64306] = &_000502_hash,
92094 + [24071] = &_000503_hash,
92095 + [50308] = &_000504_hash,
92096 + [38790] = &_000505_hash,
92097 + [9838] = &_000506_hash,
92098 + [18983] = &_000507_hash,
92099 + [9656] = &_000508_hash,
92100 + [18950] = &_000509_hash,
92101 + [59749] = &_000510_hash,
92102 + [20465] = &_000511_hash,
92103 + [4765] = &_000512_hash,
92104 + [16169] = &_000513_hash,
92105 + [6930] = &_000514_hash,
92106 + [16926] = &_000515_hash,
92107 + [35218] = &_000516_hash,
92108 + [19956] = &_000517_hash,
92109 + [55255] = &_000518_hash,
92110 + [861] = &_000519_hash,
92111 + [26574] = &_000520_hash,
92112 + [26794] = &_000521_hash,
92113 + [2133] = &_000522_hash,
92114 + [44616] = &_000523_hash,
92115 + [12840] = &_000524_hash,
92116 + [60426] = &_000525_hash,
92117 + [18133] = &_000526_hash,
92118 + [30479] = &_000527_hash,
92119 + [3219] = &_000528_hash,
92120 + [36488] = &_000529_hash,
92121 + [62043] = &_000530_hash,
92122 + [21714] = &_000532_hash,
92123 + [48007] = &_000533_hash,
92124 + [49969] = &_000534_hash,
92125 + [7701] = &_000535_hash,
92126 + [11521] = &_000536_hash,
92127 + [4269] = &_000537_hash,
92128 + [37627] = &_000539_hash,
92129 + [33555] = &_000540_hash,
92130 + [25900] = &_000541_hash,
92131 + [31709] = &_000542_hash,
92132 + [44626] = &_000544_hash,
92133 + [1679] = &_000545_hash,
92134 + [18349] = &_000546_hash,
92135 + [15338] = &_000547_hash,
92136 + [57935] = &_000548_hash,
92137 + [55850] = &_000549_hash,
92138 + [36063] = &_000550_hash,
92139 + [56674] = &_000551_hash,
92140 + [21379] = &_000552_hash,
92141 + [18507] = &_000553_hash,
92142 + [55719] = &_000554_hash,
92143 + [31210] = &_000555_hash,
92144 + [36207] = &_000556_hash,
92145 + [64180] = &_000557_hash,
92146 + [41770] = &_000558_hash,
92147 + [11600] = &_000559_hash,
92148 + [36638] = &_000560_hash,
92149 + [25576] = &_000561_hash,
92150 + [7000] = &_000562_hash,
92151 + [34187] = &_000563_hash,
92152 + [58533] = &_000564_hash,
92153 + [5083] = &_000565_hash,
92154 + [62614] = &_000566_hash,
92155 + [20085] = &_000567_hash,
92156 + [1135] = &_000568_hash,
92157 + [25613] = &_000569_hash,
92158 + [9541] = &_000570_hash,
92159 + [30577] = &_000571_hash,
92160 + [35722] = &_000572_hash,
92161 + [60407] = &_000573_hash,
92162 + [29465] = &_000574_hash,
92163 + [46891] = &_000575_hash,
92164 + [43633] = &_000576_hash,
92165 + [53743] = &_000577_hash,
92166 + [16196] = &_000578_hash,
92167 + [34425] = &_000580_hash,
92168 + [9646] = &_000581_hash,
92169 + [59756] = &_000583_hash,
92170 + [45524] = &_000584_hash,
92171 + [36702] = &_000585_hash,
92172 + [36747] = &_000586_hash,
92173 + [33643] = &_000588_hash,
92174 + [29158] = &_000589_hash,
92175 + [49662] = &_000590_hash,
92176 + [51062] = &_000591_hash,
92177 + [64755] = &_000592_hash,
92178 + [4829] = &_000594_hash,
92179 + [16413] = &_000595_hash,
92180 + [36125] = &_000596_hash,
92181 + [36293] = &_000597_hash,
92182 + [39712] = &_000598_hash,
92183 + [32160] = &_000599_hash,
92184 + [22962] = &_000600_hash,
92185 + [32001] = &_000601_hash,
92186 + [35828] = &_000602_hash,
92187 + [3106] = &_000603_hash,
92188 + [34039] = &_000604_hash,
92189 + [22393] = &_000605_hash,
92190 + [3560] = &_000606_hash,
92191 + [28195] = &_000607_hash,
92192 + [2062] = &_000608_hash,
92193 + [64001] = &_000609_hash,
92194 + [42407] = &_000610_hash,
92195 + [6253] = &_000611_hash,
92196 + [58640] = &_000612_hash,
92197 + [32195] = &_000613_hash,
92198 + [26197] = &_000614_hash,
92199 + [58003] = &_000615_hash,
92200 + [21662] = &_000616_hash,
92201 + [45750] = &_000617_hash,
92202 + [25798] = &_000618_hash,
92203 + [41052] = &_000619_hash,
92204 + [14096] = &_000620_hash,
92205 + [1439] = &_000621_hash,
92206 + [29074] = &_000622_hash,
92207 + [2376] = &_000623_hash,
92208 + [24068] = &_000625_hash,
92209 + [59519] = &_000627_hash,
92210 + [9893] = &_000628_hash,
92211 + [39979] = &_000630_hash,
92212 + [41540] = &_000631_hash,
92213 + [43200] = &_000633_hash,
92214 + [33494] = &_000634_hash,
92215 + [2028] = &_000635_hash,
92216 + [27206] = &_000636_hash,
92217 + [24302] = &_000637_hash,
92218 + [38112] = &_000638_hash,
92219 + [46538] = &_000639_hash,
92220 + [35228] = &_000641_hash,
92221 + [8339] = &_000642_hash,
92222 + [45349] = &_000643_hash,
92223 + [48404] = &_000644_hash,
92224 + [37865] = &_000645_hash,
92225 + [45763] = &_000646_hash,
92226 + [62347] = &_000647_hash,
92227 + [21644] = &_000648_hash,
92228 + [53135] = &_000649_hash,
92229 + [25095] = &_000650_hash,
92230 + [11697] = &_000651_hash,
92231 + [27003] = &_000652_hash,
92232 + [32464] = &_000653_hash,
92233 + [65339] = &_000654_hash,
92234 + [44248] = &_000655_hash,
92235 + [16] = &_000656_hash,
92236 + [29933] = &_000657_hash,
92237 + [34359] = &_000658_hash,
92238 + [3154] = &_000659_hash,
92239 + [59308] = &_000660_hash,
92240 + [61661] = &_000661_hash,
92241 + [23959] = &_000662_hash,
92242 + [6724] = &_000663_hash,
92243 + [54587] = &_000664_hash,
92244 + [28479] = &_000665_hash,
92245 + [56583] = &_000666_hash,
92246 + [64644] = &_000667_hash,
92247 + [23284] = &_000668_hash,
92248 + [61655] = &_000669_hash,
92249 + [20980] = &_000670_hash,
92250 + [19794] = &_000671_hash,
92251 + [30036] = &_000672_hash,
92252 + [25649] = &_000673_hash,
92253 + [47428] = &_000674_hash,
92254 + [47737] = &_000675_hash,
92255 + [8367] = &_000676_hash,
92256 + [2987] = &_000677_hash,
92257 + [50962] = &_000678_hash,
92258 + [10760] = &_000679_hash,
92259 + [31678] = &_000680_hash,
92260 + [48558] = &_000681_hash,
92261 + [2274] = &_000682_hash,
92262 + [831] = &_000683_hash,
92263 + [61833] = &_000684_hash,
92264 + [56864] = &_000685_hash,
92265 + [31040] = &_000686_hash,
92266 + [22196] = &_000687_hash,
92267 + [20076] = &_000688_hash,
92268 + [52821] = &_000689_hash,
92269 + [21896] = &_000690_hash,
92270 + [49367] = &_000691_hash,
92271 + [64731] = &_000692_hash,
92272 + [37110] = &_000693_hash,
92273 + [53694] = &_000694_hash,
92274 + [6175] = &_000695_hash,
92275 + [33048] = &_000696_hash,
92276 + [34746] = &_000697_hash,
92277 + [23777] = &_000698_hash,
92278 + [53828] = &_000699_hash,
92279 + [26539] = &_000700_hash,
92280 + [42628] = &_000701_hash,
92281 + [59115] = &_000702_hash,
92282 + [4456] = &_000703_hash,
92283 + [63619] = &_000704_hash,
92284 + [47329] = &_000705_hash,
92285 + [13534] = &_000706_hash,
92286 + [36955] = &_000707_hash,
92287 + [9841] = &_000708_hash,
92288 + [19308] = &_000709_hash,
92289 + [52439] = &_000710_hash,
92290 + [24680] = &_000711_hash,
92291 + [55652] = &_000712_hash,
92292 + [7842] = &_000713_hash,
92293 + [6500] = &_000714_hash,
92294 + [33485] = &_000715_hash,
92295 + [49920] = &_000716_hash,
92296 + [50750] = &_000717_hash,
92297 + [22318] = &_000718_hash,
92298 + [44599] = &_000719_hash,
92299 + [46403] = &_000720_hash,
92300 + [44534] = &_000721_hash,
92301 + [303] = &_000722_hash,
92302 + [22960] = &_000723_hash,
92303 + [10544] = &_000724_hash,
92304 + [8236] = &_000725_hash,
92305 + [21239] = &_000726_hash,
92306 + [24712] = &_000727_hash,
92307 + [37974] = &_000728_hash,
92308 + [62082] = &_000729_hash,
92309 + [57054] = &_000730_hash,
92310 + [53265] = &_000731_hash,
92311 + [52239] = &_000732_hash,
92312 + [14753] = &_000733_hash,
92313 + [60221] = &_000736_hash,
92314 + [27142] = &_000737_hash,
92315 + [14295] = &_000738_hash,
92316 + [25923] = &_000739_hash,
92317 + [29213] = &_000740_hash,
92318 + [31865] = &_000741_hash,
92319 + [4764] = &_000742_hash,
92320 + [10574] = &_000743_hash,
92321 + [55766] = &_000744_hash,
92322 + [22483] = &_000745_hash,
92323 + [61047] = &_000746_hash,
92324 + [41044] = &_000747_hash,
92325 + [58978] = &_000748_hash,
92326 + [47578] = &_000749_hash,
92327 + [7730] = &_000750_hash,
92328 + [15904] = &_000751_hash,
92329 + [25081] = &_000752_hash,
92330 + [45743] = &_000753_hash,
92331 + [58830] = &_000754_hash,
92332 + [59081] = &_000755_hash,
92333 + [47533] = &_000756_hash,
92334 + [11305] = &_000757_hash,
92335 + [29096] = &_000758_hash,
92336 + [19749] = &_000759_hash,
92337 + [56290] = &_000760_hash,
92338 + [44963] = &_000761_hash,
92339 + [30026] = &_000762_hash,
92340 + [27694] = &_000763_hash,
92341 + [8089] = &_000764_hash,
92342 + [38583] = &_000765_hash,
92343 + [1144] = &_000766_hash,
92344 + [20939] = &_000767_hash,
92345 + [22231] = &_000768_hash,
92346 + [17486] = &_000769_hash,
92347 + [51811] = &_000770_hash,
92348 + [62746] = &_000771_hash,
92349 + [19181] = &_000772_hash,
92350 + [52661] = &_000773_hash,
92351 + [51148] = &_000774_hash,
92352 + [49864] = &_000775_hash,
92353 + [37978] = &_000776_hash,
92354 + [6280] = &_000777_hash,
92355 + [12961] = &_000778_hash,
92356 + [60541] = &_000779_hash,
92357 + [37021] = &_000780_hash,
92358 + [26028] = &_000781_hash,
92359 + [41363] = &_000782_hash,
92360 + [42016] = &_000783_hash,
92361 + [58540] = &_000784_hash,
92362 + [2326] = &_000785_hash,
92363 + [60981] = &_000786_hash,
92364 + [13313] = &_000787_hash,
92365 + [44188] = &_000788_hash,
92366 + [34638] = &_000789_hash,
92367 + [20304] = &_000790_hash,
92368 + [60975] = &_000791_hash,
92369 + [12244] = &_000792_hash,
92370 + [16266] = &_000793_hash,
92371 + [3395] = &_000794_hash,
92372 + [63321] = &_000795_hash,
92373 + [20509] = &_000796_hash,
92374 + [57365] = &_000797_hash,
92375 + [47449] = &_000798_hash,
92376 + [56693] = &_000799_hash,
92377 + [33936] = &_000800_hash,
92378 + [52548] = &_000801_hash,
92379 + [18733] = &_000802_hash,
92380 + [15560] = &_000803_hash,
92381 + [13231] = &_000804_hash,
92382 + [64518] = &_000806_hash,
92383 + [54551] = &_000807_hash,
92384 + [54359] = &_000809_hash,
92385 + [46503] = &_000810_hash,
92386 + [22258] = &_000811_hash,
92387 + [39434] = &_000812_hash,
92388 + [52887] = &_000813_hash,
92389 + [3079] = &_000814_hash,
92390 + [18813] = &_000816_hash,
92391 + [47614] = &_000817_hash,
92392 + [38186] = &_000818_hash,
92393 + [57652] = &_000819_hash,
92394 + [10078] = &_000820_hash,
92395 + [17910] = &_000821_hash,
92396 + [13567] = &_000822_hash,
92397 + [21531] = &_000823_hash,
92398 + [46135] = &_000824_hash,
92399 + [10582] = &_000825_hash,
92400 + [4662] = &_000826_hash,
92401 + [17969] = &_000827_hash,
92402 + [43943] = &_000828_hash,
92403 + [46740] = &_000829_hash,
92404 + [26716] = &_000830_hash,
92405 + [58230] = &_000831_hash,
92406 + [252] = &_000832_hash,
92407 + [15704] = &_000833_hash,
92408 + [59765] = &_000834_hash,
92409 + [7322] = &_000835_hash,
92410 + [43950] = &_000836_hash,
92411 + [53093] = &_000837_hash,
92412 + [21646] = &_000838_hash,
92413 + [57063] = &_000839_hash,
92414 + [17132] = &_000840_hash,
92415 + [53922] = &_000842_hash,
92416 + [49155] = &_000843_hash,
92417 + [16356] = &_000844_hash,
92418 + [60037] = &_000845_hash,
92419 + [17299] = &_000846_hash,
92420 + [25678] = &_000847_hash,
92421 + [15494] = &_000848_hash,
92422 + [15159] = &_000849_hash,
92423 + [28442] = &_000850_hash,
92424 + [3514] = &_000851_hash,
92425 + [38151] = &_000852_hash,
92426 + [4173] = &_000853_hash,
92427 + [7258] = &_000854_hash,
92428 + [65109] = &_000855_hash,
92429 + [58827] = &_000856_hash,
92430 + [33575] = &_000857_hash,
92431 + [33078] = &_000858_hash,
92432 + [47234] = &_000859_hash,
92433 + [39193] = &_000860_hash,
92434 + [10950] = &_000861_hash,
92435 + [15613] = &_000862_hash,
92436 + [16046] = &_000863_hash,
92437 + [50172] = &_000864_hash,
92438 + [26107] = &_000865_hash,
92439 + [60543] = &_000866_hash,
92440 + [56337] = &_000867_hash,
92441 + [47626] = &_000868_hash,
92442 + [24409] = &_000869_hash,
92443 + [11732] = &_000870_hash,
92444 + [30010] = &_000871_hash,
92445 + [51480] = &_000872_hash,
92446 + [28518] = &_000873_hash,
92447 + [2061] = &_000874_hash,
92448 + [10885] = &_000875_hash,
92449 + [29517] = &_000876_hash,
92450 + [45913] = &_000877_hash,
92451 + [51774] = &_000878_hash,
92452 + [62298] = &_000879_hash,
92453 + [8892] = &_000880_hash,
92454 + [64891] = &_000881_hash,
92455 + [64537] = &_000882_hash,
92456 + [38103] = &_000883_hash,
92457 + [55518] = &_000884_hash,
92458 + [27419] = &_000885_hash,
92459 + [13869] = &_000886_hash,
92460 + [53150] = &_000887_hash,
92461 + [2884] = &_000888_hash,
92462 + [10362] = &_000889_hash,
92463 + [6961] = &_000890_hash,
92464 + [56975] = &_000891_hash,
92465 + [12508] = &_000892_hash,
92466 + [54597] = &_000893_hash,
92467 + [60499] = &_000894_hash,
92468 + [50109] = &_000895_hash,
92469 + [944] = &_000896_hash,
92470 + [29229] = &_000897_hash,
92471 + [37648] = &_000898_hash,
92472 + [1568] = &_000899_hash,
92473 + [61793] = &_000900_hash,
92474 + [53395] = &_000901_hash,
92475 + [5519] = &_000902_hash,
92476 + [28637] = &_000903_hash,
92477 + [53687] = &_000904_hash,
92478 + [6783] = &_000905_hash,
92479 + [43312] = &_000906_hash,
92480 + [2373] = &_000907_hash,
92481 + [33482] = &_000908_hash,
92482 + [24886] = &_000909_hash,
92483 + [48154] = &_000910_hash,
92484 + [12838] = &_000911_hash,
92485 + [47012] = &_000912_hash,
92486 + [23691] = &_000913_hash,
92487 + [37924] = &_000914_hash,
92488 + [47346] = &_000915_hash,
92489 + [5624] = &_000916_hash,
92490 + [16842] = &_000918_hash,
92491 + [60399] = &_000919_hash,
92492 + [2312] = &_000920_hash,
92493 + [59212] = &_000921_hash,
92494 + [11923] = &_000922_hash,
92495 + [10805] = &_000923_hash,
92496 + [36577] = &_000924_hash,
92497 + [60948] = &_000925_hash,
92498 + [21711] = &_000926_hash,
92499 + [54830] = &_000927_hash,
92500 + [1822] = &_000928_hash,
92501 + [44573] = &_000929_hash,
92502 + [23805] = &_000930_hash,
92503 + [46061] = &_000931_hash,
92504 + [33996] = &_000932_hash,
92505 + [40856] = &_000933_hash,
92506 + [16299] = &_000934_hash,
92507 + [63446] = &_000935_hash,
92508 + [31205] = &_000936_hash,
92509 + [33100] = &_000937_hash,
92510 + [40843] = &_000938_hash,
92511 + [23712] = &_000939_hash,
92512 + [36962] = &_000940_hash,
92513 + [9845] = &_000942_hash,
92514 + [13738] = &_000943_hash,
92515 + [58099] = &_000944_hash,
92516 + [31869] = &_000945_hash,
92517 + [63501] = &_000946_hash,
92518 + [58188] = &_000947_hash,
92519 + [51338] = &_000948_hash,
92520 + [54999] = &_000949_hash,
92521 + [2434] = &_000950_hash,
92522 + [34958] = &_000951_hash,
92523 + [41487] = &_000952_hash,
92524 + [11941] = &_000953_hash,
92525 + [56728] = &_000954_hash,
92526 + [48150] = &_000955_hash,
92527 + [13905] = &_000956_hash,
92528 + [9054] = &_000957_hash,
92529 + [10758] = &_000958_hash,
92530 + [48056] = &_000959_hash,
92531 + [24231] = &_000960_hash,
92532 + [43748] = &_000961_hash,
92533 + [24237] = &_000962_hash,
92534 + [14899] = &_000963_hash,
92535 + [38652] = &_000964_hash,
92536 + [65013] = &_000965_hash,
92537 + [16645] = &_000967_hash,
92538 + [55031] = &_000968_hash,
92539 + [23978] = &_000969_hash,
92540 + [24208] = &_000970_hash,
92541 + [18098] = &_000971_hash,
92542 + [2303] = &_000972_hash,
92543 + [3338] = &_000973_hash,
92544 + [39219] = &_000974_hash,
92545 + [18609] = &_000976_hash,
92546 + [64412] = &_000977_hash,
92547 + [16962] = &_000978_hash,
92548 + [26346] = &_000979_hash,
92549 + [39380] = &_000980_hash,
92550 + [33020] = &_000981_hash,
92551 + [22639] = &_000982_hash,
92552 + [6453] = &_000983_hash,
92553 + [58602] = &_000984_hash,
92554 + [50920] = &_000985_hash,
92555 + [56471] = &_000987_hash,
92556 + [15378] = &_000988_hash,
92557 + [3589] = &_000989_hash,
92558 + [12558] = &_000990_hash,
92559 + [3201] = &_000991_hash,
92560 + [28175] = &_000993_hash,
92561 + [43888] = &_000995_hash,
92562 + [56010] = &_000996_hash,
92563 + [32456] = &_000997_hash,
92564 + [29036] = &_000998_hash,
92565 + [32330] = &_000999_hash,
92566 + [25603] = &_001000_hash,
92567 + [17675] = &_001001_hash,
92568 + [36271] = &_001002_hash,
92569 + [49814] = &_001003_hash,
92570 + [5693] = &_001004_hash,
92571 + [51009] = &_001005_hash,
92572 + [62835] = &_001006_hash,
92573 + [27139] = &_001007_hash,
92574 + [45155] = &_001008_hash,
92575 + [17186] = &_001009_hash,
92576 + [46734] = &_001010_hash,
92577 + [61957] = &_001011_hash,
92578 + [51389] = &_001012_hash,
92579 + [23687] = &_001013_hash,
92580 + [46825] = &_001014_hash,
92581 + [52287] = &_001016_hash,
92582 + [31408] = &_001017_hash,
92583 + [5396] = &_001018_hash,
92584 + [62247] = &_001019_hash,
92585 + [7946] = &_001020_hash,
92586 + [58210] = &_001022_hash,
92587 + [15618] = &_001023_hash,
92588 + [61225] = &_001024_hash,
92589 + [13163] = &_001025_hash,
92590 + [36882] = &_001026_hash,
92591 + [8719] = &_001027_hash,
92592 + [8539] = &_001028_hash,
92593 + [27134] = &_001029_hash,
92594 + [53335] = &_001030_hash,
92595 + [30381] = &_001031_hash,
92596 + [32336] = &_001032_hash,
92597 + [32867] = &_001033_hash,
92598 + [1238] = &_001034_hash,
92599 + [8174] = &_001035_hash,
92600 + [6368] = &_001036_hash,
92601 + [29170] = &_001037_hash,
92602 + [9687] = &_001038_hash,
92603 + [61116] = &_001039_hash,
92604 + [31681] = &_001040_hash,
92605 + [22119] = &_001041_hash,
92606 + [59885] = &_001042_hash,
92607 + [47789] = &_001043_hash,
92608 + [5796] = &_001044_hash,
92609 + [43376] = &_001045_hash,
92610 + [36706] = &_001046_hash,
92611 + [47945] = &_001047_hash,
92612 + [33208] = &_001048_hash,
92613 + [55431] = &_001049_hash,
92614 + [25291] = &_001050_hash,
92615 + [58805] = &_001051_hash,
92616 + [23708] = &_001052_hash,
92617 + [29278] = &_001053_hash,
92618 + [1272] = &_001054_hash,
92619 + [10199] = &_001055_hash,
92620 + [34666] = &_001056_hash,
92621 + [49317] = &_001057_hash,
92622 + [18604] = &_001058_hash,
92623 + [42545] = &_001059_hash,
92624 + [33157] = &_001060_hash,
92625 + [53343] = &_001061_hash,
92626 + [64842] = &_001062_hash,
92627 + [61865] = &_001063_hash,
92628 + [54010] = &_001064_hash,
92629 + [64638] = &_001065_hash,
92630 + [20480] = &_001066_hash,
92631 + [23341] = &_001067_hash,
92632 + [10350] = &_001068_hash,
92633 + [30970] = &_001069_hash,
92634 + [62360] = &_001070_hash,
92635 + [52537] = &_001071_hash,
92636 + [51386] = &_001072_hash,
92637 + [48731] = &_001073_hash,
92638 + [58061] = &_001074_hash,
92639 + [40405] = &_001075_hash,
92640 + [57198] = &_001076_hash,
92641 + [19290] = &_001077_hash,
92642 + [60403] = &_001078_hash,
92643 + [2738] = &_001079_hash,
92644 + [59721] = &_001080_hash,
92645 + [24980] = &_001081_hash,
92646 + [55896] = &_001082_hash,
92647 + [57055] = &_001083_hash,
92648 + [46010] = &_001084_hash,
92649 + [712] = &_001085_hash,
92650 + [37747] = &_001086_hash,
92651 + [59996] = &_001087_hash,
92652 + [45219] = &_001088_hash,
92653 + [16917] = &_001089_hash,
92654 + [7415] = &_001090_hash,
92655 + [29576] = &_001091_hash,
92656 + [13584] = &_001092_hash,
92657 + [53364] = &_001093_hash,
92658 + [14813] = &_001094_hash,
92659 + [25543] = &_001095_hash,
92660 + [29240] = &_001096_hash,
92661 + [38748] = &_001097_hash,
92662 + [34848] = &_001099_hash,
92663 + [46226] = &_001100_hash,
92664 + [55526] = &_001101_hash,
92665 + [48271] = &_001102_hash,
92666 + [24658] = &_001104_hash,
92667 + [46964] = &_001105_hash,
92668 + [2637] = &_001106_hash,
92669 + [55601] = &_001107_hash,
92670 + [60275] = &_001108_hash,
92671 + [52645] = &_001109_hash,
92672 + [11712] = &_001110_hash,
92673 + [51364] = &_001111_hash,
92674 + [5106] = &_001112_hash,
92675 + [24710] = &_001113_hash,
92676 + [13101] = &_001114_hash,
92677 + [46963] = &_001115_hash,
92678 + [6779] = &_001116_hash,
92679 + [9237] = &_001117_hash,
92680 + [61524] = &_001118_hash,
92681 + [38247] = &_001119_hash,
92682 + [48715] = &_001120_hash,
92683 + [40797] = &_001121_hash,
92684 + [46780] = &_001122_hash,
92685 + [22071] = &_001123_hash,
92686 + [49735] = &_001125_hash,
92687 + [63925] = &_001126_hash,
92688 + [30902] = &_001127_hash,
92689 + [39828] = &_001128_hash,
92690 + [53089] = &_001129_hash,
92691 + [6394] = &_001130_hash,
92692 + [5116] = &_001131_hash,
92693 + [50702] = &_001132_hash,
92694 + [59565] = &_001133_hash,
92695 + [61042] = &_001134_hash,
92696 + [14533] = &_001135_hash,
92697 + [23807] = &_001136_hash,
92698 + [24296] = &_001137_hash,
92699 + [8808] = &_001138_hash,
92700 + [52383] = &_001139_hash,
92701 + [30487] = &_001140_hash,
92702 + [30125] = &_001141_hash,
92703 + [40665] = &_001142_hash,
92704 + [60809] = &_001143_hash,
92705 + [4842] = &_001144_hash,
92706 + [13955] = &_001145_hash,
92707 + [33237] = &_001146_hash,
92708 + [40673] = &_001147_hash,
92709 + [48026] = &_001148_hash,
92710 + [64033] = &_001149_hash,
92711 + [13879] = &_001150_hash,
92712 + [60114] = &_001151_hash,
92713 + [19472] = &_001152_hash,
92714 + [33552] = &_001153_hash,
92715 + [28575] = &_001154_hash,
92716 + [19696] = &_001155_hash,
92717 + [19742] = &_001156_hash,
92718 + [15286] = &_001157_hash,
92719 + [24629] = &_001158_hash,
92720 + [28382] = &_001159_hash,
92721 + [18962] = &_001160_hash,
92722 + [45796] = &_001161_hash,
92723 + [51632] = &_001162_hash,
92724 + [16907] = &_001163_hash,
92725 + [49336] = &_001164_hash,
92726 + [25316] = &_001165_hash,
92727 + [39978] = &_001166_hash,
92728 + [8091] = &_001167_hash,
92729 + [30680] = &_001168_hash,
92730 + [2066] = &_001169_hash,
92731 + [24271] = &_001170_hash,
92732 + [34934] = &_001171_hash,
92733 + [29208] = &_001172_hash,
92734 + [18591] = &_001173_hash,
92735 + [24373] = &_001174_hash,
92736 + [41485] = &_001175_hash,
92737 + [45487] = &_001176_hash,
92738 + [29299] = &_001177_hash,
92739 + [53920] = &_001178_hash,
92740 + [25407] = &_001179_hash,
92741 + [5525] = &_001180_hash,
92742 + [3531] = &_001181_hash,
92743 + [25143] = &_001182_hash,
92744 + [56046] = &_001183_hash,
92745 + [34693] = &_001184_hash,
92746 + [48644] = &_001185_hash,
92747 + [21226] = &_001186_hash,
92748 + [14051] = &_001187_hash,
92749 + [7715] = &_001188_hash,
92750 + [30413] = &_001189_hash,
92751 + [13681] = &_001190_hash,
92752 + [6554] = &_001191_hash,
92753 + [12228] = &_001192_hash,
92754 + [25497] = &_001193_hash,
92755 + [52228] = &_001194_hash,
92756 + [49069] = &_001195_hash,
92757 + [26961] = &_001196_hash,
92758 + [13768] = &_001197_hash,
92759 + [56185] = &_001198_hash,
92760 + [41838] = &_001199_hash,
92761 + [60119] = &_001200_hash,
92762 + [3112] = &_001201_hash,
92763 + [62001] = &_001202_hash,
92764 + [35888] = &_001203_hash,
92765 + [64177] = &_001207_hash,
92766 + [57222] = &_001208_hash,
92767 + [5260] = &_001209_hash,
92768 + [55517] = &_001210_hash,
92769 + [18186] = &_001211_hash,
92770 + [14257] = &_001212_hash,
92771 + [26846] = &_001213_hash,
92772 + [56097] = &_001214_hash,
92773 + [55151] = &_001215_hash,
92774 + [2999] = &_001216_hash,
92775 + [3602] = &_001217_hash,
92776 + [18460] = &_001218_hash,
92777 + [3507] = &_001219_hash,
92778 + [57847] = &_001220_hash,
92779 + [58077] = &_001221_hash,
92780 + [2659] = &_001222_hash,
92781 + [39846] = &_001223_hash,
92782 + [18629] = &_001224_hash,
92783 + [2723] = &_001225_hash,
92784 + [45230] = &_001226_hash,
92785 + [26941] = &_001227_hash,
92786 + [4344] = &_001228_hash,
92787 + [8487] = &_001229_hash,
92788 + [9901] = &_001230_hash,
92789 + [43061] = &_001231_hash,
92790 + [42551] = &_001232_hash,
92791 + [63272] = &_001233_hash,
92792 + [37771] = &_001234_hash,
92793 + [28261] = &_001235_hash,
92794 + [44694] = &_001236_hash,
92795 + [8573] = &_001237_hash,
92796 + [60174] = &_001238_hash,
92797 + [28040] = &_001239_hash,
92798 + [39423] = &_001240_hash,
92799 + [98] = &_001241_hash,
92800 + [62874] = &_001242_hash,
92801 + [38726] = &_001243_hash,
92802 + [55348] = &_001244_hash,
92803 + [10997] = &_001245_hash,
92804 + [88] = &_001246_hash,
92805 + [60639] = &_001247_hash,
92806 + [48159] = &_001248_hash,
92807 + [47899] = &_001249_hash,
92808 + [25367] = &_001250_hash,
92809 + [55681] = &_001251_hash,
92810 + [44716] = &_001252_hash,
92811 + [26161] = &_001253_hash,
92812 + [55347] = &_001254_hash,
92813 + [14518] = &_001255_hash,
92814 + [8887] = &_001256_hash,
92815 + [23009] = &_001257_hash,
92816 + [27962] = &_001258_hash,
92817 + [20004] = &_001259_hash,
92818 + [61750] = &_001260_hash,
92819 + [11661] = &_001261_hash,
92820 + [37118] = &_001262_hash,
92821 + [9370] = &_001263_hash,
92822 + [15099] = &_001264_hash,
92823 + [2404] = &_001265_hash,
92824 + [64074] = &_001266_hash,
92825 + [7538] = &_001267_hash,
92826 + [19736] = &_001268_hash,
92827 + [8199] = &_001269_hash,
92828 + [40711] = &_001270_hash,
92829 + [47859] = &_001271_hash,
92830 + [53925] = &_001272_hash,
92831 + [46888] = &_001273_hash,
92832 + [21783] = &_001274_hash,
92833 + [37305] = &_001275_hash,
92834 + [18414] = &_001276_hash,
92835 + [62423] = &_001277_hash,
92836 + [30371] = &_001278_hash,
92837 + [32617] = &_001279_hash,
92838 + [14530] = &_001281_hash,
92839 + [48623] = &_001282_hash,
92840 + [12845] = &_001283_hash,
92841 + [8895] = &_001284_hash,
92842 + [33661] = &_001285_hash,
92843 + [23178] = &_001286_hash,
92844 + [54706] = &_001287_hash,
92845 + [27133] = &_001288_hash,
92846 + [52745] = &_001289_hash,
92847 + [64420] = &_001290_hash,
92848 + [25617] = &_001291_hash,
92849 + [25414] = &_001292_hash,
92850 + [20445] = &_001293_hash,
92851 + [64006] = &_001294_hash,
92852 + [52646] = &_001295_hash,
92853 + [30281] = &_001296_hash,
92854 + [3761] = &_001297_hash,
92855 + [44345] = &_001298_hash,
92856 + [14713] = &_001299_hash,
92857 + [26043] = &_001300_hash,
92858 + [41679] = &_001301_hash,
92859 + [6267] = &_001302_hash,
92860 + [22247] = &_001304_hash,
92861 + [9440] = &_001305_hash,
92862 + [54676] = &_001306_hash,
92863 + [53982] = &_001308_hash,
92864 + [9467] = &_001309_hash,
92865 + [53419] = &_001310_hash,
92866 + [1424] = &_001311_hash,
92867 + [17561] = &_001312_hash,
92868 + [28161] = &_001313_hash,
92869 + [57262] = &_001314_hash,
92870 + [61071] = &_001315_hash,
92871 + [20067] = &_001316_hash,
92872 + [34321] = &_001317_hash,
92873 + [56199] = &_001318_hash,
92874 + [29070] = &_001319_hash,
92875 + [15698] = &_001320_hash,
92876 + [14173] = &_001321_hash,
92877 + [41224] = &_001322_hash,
92878 + [56438] = &_001323_hash,
92879 + [41894] = &_001324_hash,
92880 + [20885] = &_001325_hash,
92881 + [23275] = &_001326_hash,
92882 + [45043] = &_001327_hash,
92883 + [22143] = &_001328_hash,
92884 + [38029] = &_001329_hash,
92885 + [55343] = &_001330_hash,
92886 + [40624] = &_001331_hash,
92887 + [26476] = &_001332_hash,
92888 + [43128] = &_001333_hash,
92889 + [45115] = &_001334_hash,
92890 + [32958] = &_001335_hash,
92891 + [43091] = &_001336_hash,
92892 + [33299] = &_001337_hash,
92893 + [55021] = &_001338_hash,
92894 + [5509] = &_001339_hash,
92895 + [53012] = &_001340_hash,
92896 + [57849] = &_001341_hash,
92897 + [63282] = &_001342_hash,
92898 + [27883] = &_001343_hash,
92899 + [1670] = &_001344_hash,
92900 + [24095] = &_001345_hash,
92901 + [47810] = &_001346_hash,
92902 + [40759] = &_001347_hash,
92903 + [42139] = &_001348_hash,
92904 + [50484] = &_001349_hash,
92905 + [2305] = &_001350_hash,
92906 + [59832] = &_001351_hash,
92907 + [17662] = &_001352_hash,
92908 + [58943] = &_001353_hash,
92909 + [37417] = &_001356_hash,
92910 + [25127] = &_001357_hash,
92911 + [15006] = &_001358_hash,
92912 + [54292] = &_001359_hash,
92913 + [30642] = &_001360_hash,
92914 + [39939] = &_001361_hash,
92915 + [34818] = &_001362_hash,
92916 + [23378] = &_001363_hash,
92917 + [24090] = &_001364_hash,
92918 + [11111] = &_001365_hash,
92919 + [64141] = &_001366_hash,
92920 + [46457] = &_001367_hash,
92921 + [57927] = &_001368_hash,
92922 + [58877] = &_001371_hash,
92923 + [13880] = &_001372_hash,
92924 + [62888] = &_001373_hash,
92925 + [57962] = &_001374_hash,
92926 + [9117] = &_001375_hash,
92927 + [52012] = &_001376_hash,
92928 + [49246] = &_001377_hash,
92929 + [52701] = &_001378_hash,
92930 + [29857] = &_001379_hash,
92931 + [49420] = &_001380_hash,
92932 + [45897] = &_001381_hash,
92933 + [15141] = &_001382_hash,
92934 + [24177] = &_001383_hash,
92935 + [10325] = &_001384_hash,
92936 + [52861] = &_001385_hash,
92937 + [28922] = &_001386_hash,
92938 + [31089] = &_001387_hash,
92939 + [63084] = &_001388_hash,
92940 + [26245] = &_001389_hash,
92941 + [60000] = &_001390_hash,
92942 + [56935] = &_001391_hash,
92943 + [37569] = &_001392_hash,
92944 + [6446] = &_001394_hash,
92945 + [35883] = &_001395_hash,
92946 + [9123] = &_001396_hash,
92947 + [51457] = &_001397_hash,
92948 + [1787] = &_001398_hash,
92949 + [10135] = &_001399_hash,
92950 + [952] = &_001400_hash,
92951 + [53578] = &_001401_hash,
92952 + [9923] = &_001402_hash,
92953 + [45249] = &_001403_hash,
92954 + [52860] = &_001404_hash,
92955 + [29558] = &_001405_hash,
92956 + [40556] = &_001406_hash,
92957 + [53210] = &_001407_hash,
92958 + [2506] = &_001408_hash,
92959 + [48262] = &_001409_hash,
92960 + [46939] = &_001410_hash,
92961 + [17901] = &_001411_hash,
92962 + [27204] = &_001412_hash,
92963 + [52516] = &_001413_hash,
92964 + [55885] = &_001414_hash,
92965 + [6681] = &_001415_hash,
92966 + [42360] = &_001416_hash,
92967 + [20259] = &_001417_hash,
92968 + [8874] = &_001418_hash,
92969 + [53363] = &_001419_hash,
92970 + [17500] = &_001420_hash,
92971 + [63988] = &_001421_hash,
92972 + [26378] = &_001422_hash,
92973 + [7768] = &_001423_hash,
92974 + [12938] = &_001424_hash,
92975 + [6755] = &_001425_hash,
92976 + [43806] = &_001426_hash,
92977 + [15976] = &_001427_hash,
92978 + [2732] = &_001428_hash,
92979 + [2519] = &_001429_hash,
92980 + [14340] = &_001430_hash,
92981 + [34772] = &_001431_hash,
92982 + [36433] = &_001432_hash,
92983 + [16068] = &_001433_hash,
92984 + [22052] = &_001434_hash,
92985 + [8929] = &_001435_hash,
92986 + [63220] = &_001436_hash,
92987 + [18246] = &_001437_hash,
92988 + [37678] = &_001438_hash,
92989 + [4932] = &_001439_hash,
92990 + [46960] = &_001440_hash,
92991 + [16909] = &_001441_hash,
92992 + [44429] = &_001442_hash,
92993 + [59514] = &_001443_hash,
92994 + [62760] = &_001444_hash,
92995 + [41841] = &_001445_hash,
92996 + [25417] = &_001446_hash,
92997 + [63230] = &_001447_hash,
92998 + [39532] = &_001448_hash,
92999 + [24688] = &_001449_hash,
93000 + [18555] = &_001450_hash,
93001 + [54499] = &_001451_hash,
93002 + [10719] = &_001452_hash,
93003 + [1644] = &_001453_hash,
93004 + [15109] = &_001454_hash,
93005 + [15787] = &_001455_hash,
93006 + [57869] = &_001456_hash,
93007 + [54445] = &_001457_hash,
93008 + [19398] = &_001458_hash,
93009 + [9488] = &_001459_hash,
93010 + [12587] = &_001460_hash,
93011 + [17124] = &_001461_hash,
93012 + [53665] = &_001462_hash,
93013 + [40386] = &_001463_hash,
93014 + [39444] = &_001464_hash,
93015 + [28873] = &_001465_hash,
93016 + [11290] = &_001466_hash,
93017 + [51313] = &_001467_hash,
93018 + [23354] = &_001469_hash,
93019 + [49559] = &_001470_hash,
93020 + [49312] = &_001471_hash,
93021 + [36333] = &_001472_hash,
93022 + [59349] = &_001473_hash,
93023 + [60316] = &_001474_hash,
93024 + [2546] = &_001475_hash,
93025 + [57483] = &_001476_hash,
93026 + [14569] = &_001478_hash,
93027 + [61842] = &_001481_hash,
93028 + [32923] = &_001482_hash,
93029 + [57471] = &_001483_hash,
93030 + [83] = &_001484_hash,
93031 + [40242] = &_001485_hash,
93032 + [42578] = &_001486_hash,
93033 + [62037] = &_001487_hash,
93034 + [8131] = &_001488_hash,
93035 + [752] = &_001489_hash,
93036 + [56376] = &_001490_hash,
93037 + [22290] = &_001491_hash,
93038 + [46232] = &_001492_hash,
93039 + [35132] = &_001493_hash,
93040 + [23825] = &_001494_hash,
93041 + [43262] = &_001495_hash,
93042 + [8138] = &_001496_hash,
93043 + [31489] = &_001497_hash,
93044 + [57578] = &_001498_hash,
93045 + [28007] = &_001499_hash,
93046 + [28688] = &_001500_hash,
93047 + [19319] = &_001501_hash,
93048 + [12575] = &_001502_hash,
93049 + [62762] = &_001504_hash,
93050 + [47450] = &_001505_hash,
93051 + [1869] = &_001506_hash,
93052 + [51225] = &_001507_hash,
93053 + [19561] = &_001508_hash,
93054 + [64894] = &_001509_hash,
93055 + [6829] = &_001510_hash,
93056 + [30644] = &_001511_hash,
93057 + [63391] = &_001512_hash,
93058 + [11655] = &_001514_hash,
93059 + [28229] = &_001515_hash,
93060 + [22382] = &_001516_hash,
93061 + [22649] = &_001517_hash,
93062 + [42619] = &_001518_hash,
93063 + [19761] = &_001519_hash,
93064 + [56990] = &_001520_hash,
93065 + [19531] = &_001521_hash,
93066 + [26514] = &_001522_hash,
93067 + [56773] = &_001523_hash,
93068 + [15563] = &_001524_hash,
93069 + [26212] = &_001525_hash,
93070 + [29203] = &_001526_hash,
93071 + [32768] = &_001527_hash,
93072 + [15110] = &_001528_hash,
93073 + [3885] = &_001529_hash,
93074 + [13788] = &_001530_hash,
93075 + [27875] = &_001531_hash,
93076 + [54959] = &_001532_hash,
93077 + [20945] = &_001533_hash,
93078 + [59640] = &_001534_hash,
93079 + [4693] = &_001535_hash,
93080 + [13793] = &_001536_hash,
93081 + [25659] = &_001537_hash,
93082 + [18734] = &_001538_hash,
93083 + [17869] = &_001539_hash,
93084 + [26270] = &_001540_hash,
93085 + [18458] = &_001541_hash,
93086 + [58468] = &_001542_hash,
93087 + [61257] = &_001543_hash,
93088 + [39946] = &_001544_hash,
93089 + [52382] = &_001545_hash,
93090 + [18428] = &_001546_hash,
93091 + [31069] = &_001547_hash,
93092 + [61614] = &_001548_hash,
93093 + [60044] = &_001549_hash,
93094 + [36818] = &_001550_hash,
93095 + [54353] = &_001551_hash,
93096 + [55994] = &_001552_hash,
93097 + [65142] = &_001553_hash,
93098 + [1664] = &_001554_hash,
93099 + [32212] = &_001555_hash,
93100 + [63087] = &_001556_hash,
93101 + [29916] = &_001557_hash,
93102 + [54912] = &_001558_hash,
93103 + [10318] = &_001559_hash,
93104 + [44031] = &_001560_hash,
93105 + [50108] = &_001561_hash,
93106 + [57812] = &_001562_hash,
93107 + [63190] = &_001563_hash,
93108 + [48246] = &_001564_hash,
93109 + [3744] = &_001565_hash,
93110 + [56321] = &_001566_hash,
93111 + [42691] = &_001567_hash,
93112 + [62052] = &_001568_hash,
93113 + [21999] = &_001569_hash,
93114 + [13672] = &_001570_hash,
93115 + [20648] = &_001571_hash,
93116 + [42500] = &_001572_hash,
93117 + [22795] = &_001573_hash,
93118 + [19496] = &_001574_hash,
93119 + [35556] = &_001575_hash,
93120 + [57144] = &_001576_hash,
93121 + [1019] = &_001577_hash,
93122 + [28818] = &_001578_hash,
93123 + [52880] = &_001579_hash,
93124 + [6543] = &_001580_hash,
93125 + [18895] = &_001581_hash,
93126 + [857] = &_001582_hash,
93127 + [45966] = &_001583_hash,
93128 + [11785] = &_001584_hash,
93129 + [7736] = &_001585_hash,
93130 + [4308] = &_001586_hash,
93131 + [51095] = &_001587_hash,
93132 + [12101] = &_001588_hash,
93133 + [427] = &_001589_hash,
93134 + [4021] = &_001590_hash,
93135 + [54201] = &_001591_hash,
93136 + [5615] = &_001592_hash,
93137 + [16234] = &_001593_hash,
93138 + [51718] = &_001594_hash,
93139 + [42390] = &_001595_hash,
93140 + [55391] = &_001596_hash,
93141 + [28539] = &_001597_hash,
93142 + [943] = &_001598_hash,
93143 + [32683] = &_001599_hash,
93144 + [39182] = &_001600_hash,
93145 + [33198] = &_001601_hash,
93146 + [39446] = &_001602_hash,
93147 + [16394] = &_001603_hash,
93148 + [30791] = &_001604_hash,
93149 + [35530] = &_001605_hash,
93150 + [53193] = &_001607_hash,
93151 + [39401] = &_001608_hash,
93152 + [28624] = &_001609_hash,
93153 + [12066] = &_001610_hash,
93154 + [63492] = &_001611_hash,
93155 + [14897] = &_001612_hash,
93156 + [29641] = &_001613_hash,
93157 + [10165] = &_001614_hash,
93158 + [60046] = &_001615_hash,
93159 + [12429] = &_001616_hash,
93160 + [32788] = &_001617_hash,
93161 + [52698] = &_001618_hash,
93162 + [13130] = &_001620_hash,
93163 + [28643] = &_001621_hash,
93164 + [50666] = &_001622_hash,
93165 + [35126] = &_001623_hash,
93166 + [33593] = &_001624_hash,
93167 + [27547] = &_001625_hash,
93168 + [5484] = &_001626_hash,
93169 + [26642] = &_001627_hash,
93170 + [25586] = &_001628_hash,
93171 + [58757] = &_001629_hash,
93172 + [18701] = &_001630_hash,
93173 + [26271] = &_001631_hash,
93174 + [23829] = &_001632_hash,
93175 + [63659] = &_001634_hash,
93176 + [26603] = &_001635_hash,
93177 + [25704] = &_001636_hash,
93178 + [21149] = &_001637_hash,
93179 + [36900] = &_001638_hash,
93180 + [61577] = &_001640_hash,
93181 + [54095] = &_001641_hash,
93182 + [31650] = &_001642_hash,
93183 + [48970] = &_001643_hash,
93184 + [49357] = &_001644_hash,
93185 + [33835] = &_001645_hash,
93186 + [46861] = &_001646_hash,
93187 + [1428] = &_001647_hash,
93188 + [36247] = &_001648_hash,
93189 + [21600] = &_001649_hash,
93190 + [24747] = &_001650_hash,
93191 + [51012] = &_001651_hash,
93192 + [38974] = &_001653_hash,
93193 + [30549] = &_001655_hash,
93194 + [40146] = &_001656_hash,
93195 + [41756] = &_001657_hash,
93196 + [37010] = &_001658_hash,
93197 + [35072] = &_001660_hash,
93198 + [2114] = &_001661_hash,
93199 + [48424] = &_001662_hash,
93200 + [61522] = &_001663_hash,
93201 + [50633] = &_001664_hash,
93202 + [2283] = &_001665_hash,
93203 + [61763] = &_001666_hash,
93204 + [48195] = &_001667_hash,
93205 + [31000] = &_001668_hash,
93206 + [23856] = &_001669_hash,
93207 + [37421] = &_001670_hash,
93208 + [10019] = &_001672_hash,
93209 + [5148] = &_001673_hash,
93210 + [14363] = &_001674_hash,
93211 + [57354] = &_001675_hash,
93212 + [62460] = &_001676_hash,
93213 + [45174] = &_001677_hash,
93214 + [31054] = &_001678_hash,
93215 + [62195] = &_001679_hash,
93216 + [14976] = &_001680_hash,
93217 + [55676] = &_001681_hash,
93218 + [1025] = &_001682_hash,
93219 + [6921] = &_001683_hash,
93220 + [22158] = &_001684_hash,
93221 + [18050] = &_001685_hash,
93222 + [18612] = &_001686_hash,
93223 + [31107] = &_001687_hash,
93224 + [45212] = &_001688_hash,
93225 + [29599] = &_001689_hash,
93226 + [30827] = &_001690_hash,
93227 + [25086] = &_001691_hash,
93228 + [27146] = &_001692_hash,
93229 + [2467] = &_001693_hash,
93230 + [45786] = &_001694_hash,
93231 + [51909] = &_001695_hash,
93232 + [64604] = &_001696_hash,
93233 + [57819] = &_001697_hash,
93234 + [11001] = &_001698_hash,
93235 + [20326] = &_001699_hash,
93236 + [12682] = &_001700_hash,
93237 + [28932] = &_001701_hash,
93238 + [53491] = &_001702_hash,
93239 + [63894] = &_001703_hash,
93240 + [51191] = &_001704_hash,
93241 + [59759] = &_001705_hash,
93242 + [15691] = &_001706_hash,
93243 + [38786] = &_001707_hash,
93244 + [51546] = &_001708_hash,
93245 + [10121] = &_001709_hash,
93246 + [60786] = &_001710_hash,
93247 + [19952] = &_001712_hash,
93248 + [7271] = &_001715_hash,
93249 + [10729] = &_001716_hash,
93250 + [28883] = &_001717_hash,
93251 + [52042] = &_001718_hash,
93252 + [49606] = &_001719_hash,
93253 + [33243] = &_001720_hash,
93254 + [57341] = &_001721_hash,
93255 + [7978] = &_001722_hash,
93256 + [36330] = &_001723_hash,
93257 + [39035] = &_001724_hash,
93258 + [34498] = &_001725_hash,
93259 + [19789] = &_001726_hash,
93260 + [55685] = &_001727_hash,
93261 + [55419] = &_001728_hash,
93262 + [27798] = &_001729_hash,
93263 + [54599] = &_001730_hash,
93264 + [65522] = &_001731_hash,
93265 + [38111] = &_001732_hash,
93266 + [57077] = &_001733_hash,
93267 + [53053] = &_001734_hash,
93268 + [14190] = &_001735_hash,
93269 + [47037] = &_001736_hash,
93270 + [33296] = &_001737_hash,
93271 + [23803] = &_001738_hash,
93272 + [48773] = &_001739_hash,
93273 + [63014] = &_001740_hash,
93274 + [64392] = &_001741_hash,
93275 + [44203] = &_001742_hash,
93276 + [47717] = &_001743_hash,
93277 + [38399] = &_001744_hash,
93278 + [30385] = &_001745_hash,
93279 + [61693] = &_001746_hash,
93280 + [32049] = &_001747_hash,
93281 + [26133] = &_001748_hash,
93282 + [45038] = &_001749_hash,
93283 + [8582] = &_001751_hash,
93284 + [38182] = &_001753_hash,
93285 + [62457] = &_001754_hash,
93286 + [27937] = &_001755_hash,
93287 + [3795] = &_001756_hash,
93288 + [23228] = &_001757_hash,
93289 + [56511] = &_001758_hash,
93290 + [47807] = &_001759_hash,
93291 + [60528] = &_001760_hash,
93292 + [51858] = &_001761_hash,
93293 + [49183] = &_001762_hash,
93294 + [33807] = &_001763_hash,
93295 + [34791] = &_001764_hash,
93296 + [8150] = &_001765_hash,
93297 + [19691] = &_001767_hash,
93298 + [20519] = &_001770_hash,
93299 + [17144] = &_001771_hash,
93300 + [19394] = &_001772_hash,
93301 + [53730] = &_001773_hash,
93302 + [8447] = &_001774_hash,
93303 + [30004] = &_001775_hash,
93304 + [40939] = &_001776_hash,
93305 + [53674] = &_001777_hash,
93306 + [11820] = &_001778_hash,
93307 + [23401] = &_001779_hash,
93308 + [9641] = &_001780_hash,
93309 + [2721] = &_001781_hash,
93310 + [19700] = &_001782_hash,
93311 + [1619] = &_001783_hash,
93312 + [23272] = &_001784_hash,
93313 + [56424] = &_001785_hash,
93314 + [14483] = &_001786_hash,
93315 + [1599] = &_001787_hash,
93316 + [27604] = &_001788_hash,
93317 + [37219] = &_001789_hash,
93318 + [31958] = &_001790_hash,
93319 + [5273] = &_001791_hash,
93320 + [46712] = &_001792_hash,
93321 + [27259] = &_001794_hash,
93322 + [23674] = &_001797_hash,
93323 + [40509] = &_001798_hash,
93324 + [17549] = &_001799_hash,
93325 + [53992] = &_001800_hash,
93326 + [24062] = &_001801_hash,
93327 + [23371] = &_001802_hash,
93328 + [19115] = &_001803_hash,
93329 + [51532] = &_001804_hash,
93330 + [45193] = &_001805_hash,
93331 + [29340] = &_001806_hash,
93332 + [5048] = &_001807_hash,
93333 + [65040] = &_001808_hash,
93334 + [39155] = &_001809_hash,
93335 + [31406] = &_001810_hash,
93336 + [49182] = &_001811_hash,
93337 + [37695] = &_001812_hash,
93338 + [28432] = &_001813_hash,
93339 + [23482] = &_001814_hash,
93340 + [56550] = &_001815_hash,
93341 + [7374] = &_001816_hash,
93342 + [57050] = &_001817_hash,
93343 + [57011] = &_001818_hash,
93344 + [27529] = &_001819_hash,
93345 + [33662] = &_001820_hash,
93346 + [4314] = &_001821_hash,
93347 + [22812] = &_001822_hash,
93348 + [47555] = &_001823_hash,
93349 + [38737] = &_001824_hash,
93350 + [36101] = &_001826_hash,
93351 + [877] = &_001828_hash,
93352 + [2639] = &_001830_hash,
93353 + [64343] = &_001831_hash,
93354 + [11150] = &_001832_hash,
93355 + [46486] = &_001833_hash,
93356 + [18719] = &_001834_hash,
93357 + [49574] = &_001835_hash,
93358 + [37617] = &_001836_hash,
93359 + [3045] = &_001837_hash,
93360 + [39395] = &_001838_hash,
93361 + [15297] = &_001839_hash,
93362 + [50862] = &_001840_hash,
93363 + [28877] = &_001841_hash,
93364 + [57117] = &_001842_hash,
93365 + [62064] = &_001843_hash,
93366 + [64610] = &_001844_hash,
93367 + [24065] = &_001845_hash,
93368 + [24846] = &_001846_hash,
93369 + [8624] = &_001847_hash,
93370 + [14000] = &_001848_hash,
93371 + [31148] = &_001849_hash,
93372 + [62594] = &_001850_hash,
93373 + [39210] = &_001851_hash,
93374 + [2077] = &_001852_hash,
93375 + [23497] = &_001853_hash,
93376 + [34512] = &_001854_hash,
93377 + [16268] = &_001856_hash,
93378 + [14562] = &_001857_hash,
93379 + [17606] = &_001859_hash,
93380 + [25654] = &_001860_hash,
93381 + [56078] = &_001861_hash,
93382 + [61088] = &_001862_hash,
93383 + [53442] = &_001863_hash,
93384 + [54456] = &_001864_hash,
93385 + [22038] = &_001865_hash,
93386 + [58394] = &_001866_hash,
93387 + [38953] = &_001867_hash,
93388 + [16109] = &_001868_hash,
93389 + [3812] = &_001869_hash,
93390 + [5084] = &_001870_hash,
93391 + [41893] = &_001871_hash,
93392 + [45486] = &_001872_hash,
93393 + [50226] = &_001873_hash,
93394 + [63694] = &_001874_hash,
93395 + [56763] = &_001875_hash,
93396 + [20905] = &_001876_hash,
93397 + [13080] = &_001877_hash,
93398 + [54700] = &_001878_hash,
93399 + [40947] = &_001879_hash,
93400 + [32645] = &_001880_hash,
93401 + [57462] = &_001881_hash,
93402 + [33853] = &_001882_hash,
93403 + [57940] = &_001883_hash,
93404 + [45583] = &_001884_hash,
93405 + [49704] = &_001885_hash,
93406 + [39232] = &_001886_hash,
93407 + [5140] = &_001887_hash,
93408 + [45726] = &_001888_hash,
93409 + [35392] = &_001889_hash,
93410 + [44895] = &_001890_hash,
93411 + [17219] = &_001891_hash,
93412 + [50185] = &_001892_hash,
93413 + [3062] = &_001893_hash,
93414 + [9784] = &_001894_hash,
93415 + [52513] = &_001895_hash,
93416 + [52678] = &_001896_hash,
93417 + [36258] = &_001897_hash,
93418 + [2885] = &_001898_hash,
93419 + [11588] = &_001899_hash,
93420 + [65337] = &_001900_hash,
93421 + [19329] = &_001901_hash,
93422 + [23791] = &_001902_hash,
93423 + [38078] = &_001903_hash,
93424 + [42270] = &_001904_hash,
93425 + [30475] = &_001905_hash,
93426 + [25564] = &_001906_hash,
93427 + [33581] = &_001907_hash,
93428 + [59644] = &_001908_hash,
93429 + [5800] = &_001909_hash,
93430 + [42227] = &_001910_hash,
93431 + [54718] = &_001911_hash,
93432 + [41255] = &_001912_hash,
93433 + [31502] = &_001913_hash,
93434 + [44929] = &_001914_hash,
93435 + [47332] = &_001915_hash,
93436 + [10107] = &_001916_hash,
93437 + [47137] = &_001917_hash,
93438 + [26017] = &_001918_hash,
93439 + [41477] = &_001919_hash,
93440 + [6656] = &_001920_hash,
93441 + [50198] = &_001921_hash,
93442 + [48909] = &_001922_hash,
93443 + [9474] = &_001923_hash,
93444 + [58554] = &_001924_hash,
93445 + [45747] = &_001925_hash,
93446 + [43151] = &_001926_hash,
93447 + [15626] = &_001927_hash,
93448 + [17364] = &_001928_hash,
93449 + [15077] = &_001929_hash,
93450 + [31912] = &_001930_hash,
93451 + [2803] = &_001931_hash,
93452 + [42715] = &_001932_hash,
93453 + [12552] = &_001933_hash,
93454 + [13099] = &_001934_hash,
93455 + [40973] = &_001935_hash,
93456 + [20988] = &_001936_hash,
93457 + [16939] = &_001937_hash,
93458 + [48587] = &_001938_hash,
93459 + [52889] = &_001939_hash,
93460 + [38776] = &_001940_hash,
93461 + [58608] = &_001941_hash,
93462 + [4360] = &_001942_hash,
93463 + [53447] = &_001943_hash,
93464 + [25355] = &_001944_hash,
93465 + [14955] = &_001946_hash,
93466 + [5428] = &_001947_hash,
93467 + [11063] = &_001948_hash,
93468 + [59852] = &_001949_hash,
93469 + [45648] = &_001950_hash,
93470 + [21855] = &_001951_hash,
93471 + [54573] = &_001952_hash,
93472 + [56316] = &_001953_hash,
93473 +};
93474 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
93475 new file mode 100644
93476 index 0000000..92b8ee6
93477 --- /dev/null
93478 +++ b/tools/gcc/size_overflow_plugin.c
93479 @@ -0,0 +1,1188 @@
93480 +/*
93481 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
93482 + * Licensed under the GPL v2, or (at your option) v3
93483 + *
93484 + * Homepage:
93485 + * http://www.grsecurity.net/~ephox/overflow_plugin/
93486 + *
93487 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
93488 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
93489 + * The recomputed argument is checked against INT_MAX and an event is logged on overflow and the triggering process is killed.
93490 + *
93491 + * Usage:
93492 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c
93493 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
93494 + */
93495 +
93496 +#include "gcc-plugin.h"
93497 +#include "config.h"
93498 +#include "system.h"
93499 +#include "coretypes.h"
93500 +#include "tree.h"
93501 +#include "tree-pass.h"
93502 +#include "intl.h"
93503 +#include "plugin-version.h"
93504 +#include "tm.h"
93505 +#include "toplev.h"
93506 +#include "function.h"
93507 +#include "tree-flow.h"
93508 +#include "plugin.h"
93509 +#include "gimple.h"
93510 +#include "c-common.h"
93511 +#include "diagnostic.h"
93512 +#include "cfgloop.h"
93513 +
93514 +struct size_overflow_hash {
93515 + struct size_overflow_hash *next;
93516 + const char *name;
93517 + const char *file;
93518 + unsigned short param1:1;
93519 + unsigned short param2:1;
93520 + unsigned short param3:1;
93521 + unsigned short param4:1;
93522 + unsigned short param5:1;
93523 + unsigned short param6:1;
93524 + unsigned short param7:1;
93525 + unsigned short param8:1;
93526 + unsigned short param9:1;
93527 +};
93528 +
93529 +#include "size_overflow_hash.h"
93530 +
93531 +#define __unused __attribute__((__unused__))
93532 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
93533 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
93534 +#define BEFORE_STMT true
93535 +#define AFTER_STMT false
93536 +#define CREATE_NEW_VAR NULL_TREE
93537 +
93538 +int plugin_is_GPL_compatible;
93539 +void debug_gimple_stmt(gimple gs);
93540 +
93541 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
93542 +static tree signed_size_overflow_type;
93543 +static tree unsigned_size_overflow_type;
93544 +static tree report_size_overflow_decl;
93545 +static tree const_char_ptr_type_node;
93546 +static unsigned int handle_function(void);
93547 +static bool file_match = true;
93548 +
93549 +static struct plugin_info size_overflow_plugin_info = {
93550 + .version = "20120521beta",
93551 + .help = "no-size_overflow\tturn off size overflow checking\n",
93552 +};
93553 +
93554 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
93555 +{
93556 + unsigned int arg_count = type_num_arguments(*node);
93557 +
93558 + for (; args; args = TREE_CHAIN(args)) {
93559 + tree position = TREE_VALUE(args);
93560 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
93561 + error("handle_size_overflow_attribute: overflow parameter outside range.");
93562 + *no_add_attrs = true;
93563 + }
93564 + }
93565 + return NULL_TREE;
93566 +}
93567 +
93568 +static struct attribute_spec no_size_overflow_attr = {
93569 + .name = "size_overflow",
93570 + .min_length = 1,
93571 + .max_length = -1,
93572 + .decl_required = false,
93573 + .type_required = true,
93574 + .function_type_required = true,
93575 + .handler = handle_size_overflow_attribute
93576 +};
93577 +
93578 +static void register_attributes(void __unused *event_data, void __unused *data)
93579 +{
93580 + register_attribute(&no_size_overflow_attr);
93581 +}
93582 +
93583 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
93584 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
93585 +{
93586 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
93587 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
93588 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
93589 +
93590 + const unsigned int m = 0x57559429;
93591 + const unsigned int n = 0x5052acdb;
93592 + const unsigned int *key4 = (const unsigned int *)key;
93593 + unsigned int h = len;
93594 + unsigned int k = len + seed + n;
93595 + unsigned long long p;
93596 +
93597 + while (len >= 8) {
93598 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
93599 + len -= 8;
93600 + }
93601 + if (len >= 4) {
93602 + cwmixb(key4[0]) key4 += 1;
93603 + len -= 4;
93604 + }
93605 + if (len)
93606 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
93607 + cwmixb(h ^ (k + n));
93608 + return k ^ h;
93609 +
93610 +#undef cwfold
93611 +#undef cwmixa
93612 +#undef cwmixb
93613 +}
93614 +
93615 +static inline unsigned int get_hash_num(const char *fndecl, const char *loc_file, unsigned int seed)
93616 +{
93617 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
93618 + unsigned int file = CrapWow(loc_file, strlen(loc_file), seed) & 0xffff;
93619 +
93620 + if (file_match)
93621 + return fn ^ file;
93622 + else
93623 + return fn;
93624 +}
93625 +
93626 +static inline tree get_original_function_decl(tree fndecl)
93627 +{
93628 + if (DECL_ABSTRACT_ORIGIN(fndecl))
93629 + return DECL_ABSTRACT_ORIGIN(fndecl);
93630 + return fndecl;
93631 +}
93632 +
93633 +static inline gimple get_def_stmt(tree node)
93634 +{
93635 + gcc_assert(TREE_CODE(node) == SSA_NAME);
93636 + return SSA_NAME_DEF_STMT(node);
93637 +}
93638 +
93639 +static struct size_overflow_hash *get_function_hash(tree fndecl, const char *loc_file)
93640 +{
93641 + unsigned int hash;
93642 + struct size_overflow_hash *entry;
93643 + const char *func_name = NAME(fndecl);
93644 +
93645 + hash = get_hash_num(NAME(fndecl), loc_file, 0);
93646 +
93647 + entry = size_overflow_hash[hash];
93648 + while (entry) {
93649 + if (!strcmp(entry->name, func_name) && (!file_match || !strcmp(entry->file, loc_file)))
93650 + return entry;
93651 + entry = entry->next;
93652 + }
93653 +
93654 + return NULL;
93655 +}
93656 +
93657 +static void check_arg_type(tree var)
93658 +{
93659 + tree type = TREE_TYPE(var);
93660 + enum tree_code code = TREE_CODE(type);
93661 +
93662 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
93663 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
93664 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
93665 +}
93666 +
93667 +static int find_arg_number(tree arg, tree func)
93668 +{
93669 + tree var;
93670 + bool match = false;
93671 + unsigned int argnum = 1;
93672 +
93673 + if (TREE_CODE(arg) == SSA_NAME)
93674 + arg = SSA_NAME_VAR(arg);
93675 +
93676 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
93677 + if (strcmp(NAME(arg), NAME(var))) {
93678 + argnum++;
93679 + continue;
93680 + }
93681 + check_arg_type(var);
93682 +
93683 + match = true;
93684 + if (!TYPE_UNSIGNED(TREE_TYPE(var)))
93685 + return 0;
93686 + break;
93687 + }
93688 + if (!match) {
93689 + warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func));
93690 + return 0;
93691 + }
93692 + return argnum;
93693 +}
93694 +
93695 +static void print_missing_msg(tree func, const char *filename, unsigned int argnum)
93696 +{
93697 + unsigned int new_hash;
93698 + location_t loc = DECL_SOURCE_LOCATION(func);
93699 + const char *curfunc = NAME(func);
93700 +
93701 + new_hash = get_hash_num(curfunc, filename, 0);
93702 +// inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+%s+", curfunc, curfunc, argnum, new_hash, filename);
93703 +}
93704 +
93705 +static void check_missing_attribute(tree arg)
93706 +{
93707 + tree type, func = get_original_function_decl(current_function_decl);
93708 + unsigned int argnum;
93709 + struct size_overflow_hash *hash;
93710 + const char *filename;
93711 +
93712 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
93713 +
93714 + type = TREE_TYPE(arg);
93715 + // skip function pointers
93716 + if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
93717 + return;
93718 +
93719 + if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
93720 + return;
93721 +
93722 + argnum = find_arg_number(arg, func);
93723 + if (argnum == 0)
93724 + return;
93725 +
93726 + filename = DECL_SOURCE_FILE(func);
93727 +
93728 + hash = get_function_hash(func, filename);
93729 + if (!hash) {
93730 + print_missing_msg(func, filename, argnum);
93731 + return;
93732 + }
93733 +
93734 +#define check_param(num) \
93735 + if (num == argnum && hash->param##num) \
93736 + return;
93737 + check_param(1);
93738 + check_param(2);
93739 + check_param(3);
93740 + check_param(4);
93741 + check_param(5);
93742 + check_param(6);
93743 + check_param(7);
93744 + check_param(8);
93745 + check_param(9);
93746 +#undef check_param
93747 +
93748 + print_missing_msg(func, filename, argnum);
93749 +}
93750 +
93751 +static tree create_new_var(tree type)
93752 +{
93753 + tree new_var = create_tmp_var(type, "cicus");
93754 +
93755 + add_referenced_var(new_var);
93756 + mark_sym_for_renaming(new_var);
93757 + return new_var;
93758 +}
93759 +
93760 +static bool is_bool(tree node)
93761 +{
93762 + tree type;
93763 +
93764 + if (node == NULL_TREE)
93765 + return false;
93766 +
93767 + type = TREE_TYPE(node);
93768 + if (!INTEGRAL_TYPE_P(type))
93769 + return false;
93770 + if (TREE_CODE(type) == BOOLEAN_TYPE)
93771 + return true;
93772 + if (TYPE_PRECISION(type) == 1)
93773 + return true;
93774 + return false;
93775 +}
93776 +
93777 +static tree cast_a_tree(tree type, tree var)
93778 +{
93779 + gcc_assert(fold_convertible_p(type, var));
93780 +
93781 + return fold_convert(type, var);
93782 +}
93783 +
93784 +static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
93785 +{
93786 + gimple assign;
93787 +
93788 + if (new_var == CREATE_NEW_VAR)
93789 + new_var = create_new_var(type);
93790 +
93791 + assign = gimple_build_assign(new_var, cast_a_tree(type, var));
93792 + gimple_set_location(assign, loc);
93793 + gimple_set_lhs(assign, make_ssa_name(new_var, assign));
93794 +
93795 + return assign;
93796 +}
93797 +
93798 +static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
93799 +{
93800 + tree oldstmt_rhs1;
93801 + enum tree_code code;
93802 + gimple stmt;
93803 + gimple_stmt_iterator gsi;
93804 +
93805 + if (!*potentionally_overflowed)
93806 + return NULL_TREE;
93807 +
93808 + if (rhs1 == NULL_TREE) {
93809 + debug_gimple_stmt(oldstmt);
93810 + error("create_assign: rhs1 is NULL_TREE");
93811 + gcc_unreachable();
93812 + }
93813 +
93814 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
93815 + code = TREE_CODE(oldstmt_rhs1);
93816 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
93817 + check_missing_attribute(oldstmt_rhs1);
93818 +
93819 + stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
93820 + gsi = gsi_for_stmt(oldstmt);
93821 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
93822 + basic_block next_bb, cur_bb;
93823 + edge e;
93824 +
93825 + gcc_assert(before == false);
93826 + gcc_assert(stmt_can_throw_internal(oldstmt));
93827 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
93828 + gcc_assert(!gsi_end_p(gsi));
93829 +
93830 + cur_bb = gimple_bb(oldstmt);
93831 + next_bb = cur_bb->next_bb;
93832 + e = find_edge(cur_bb, next_bb);
93833 + gcc_assert(e != NULL);
93834 + gcc_assert(e->flags & EDGE_FALLTHRU);
93835 +
93836 + gsi = gsi_after_labels(next_bb);
93837 + gcc_assert(!gsi_end_p(gsi));
93838 + before = true;
93839 + }
93840 + if (before)
93841 + gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
93842 + else
93843 + gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
93844 + update_stmt(stmt);
93845 + pointer_set_insert(visited, oldstmt);
93846 + return gimple_get_lhs(stmt);
93847 +}
93848 +
93849 +static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
93850 +{
93851 + tree new_var, lhs = gimple_get_lhs(oldstmt);
93852 + gimple stmt;
93853 + gimple_stmt_iterator gsi;
93854 +
93855 + if (!*potentionally_overflowed)
93856 + return NULL_TREE;
93857 +
93858 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
93859 + rhs1 = gimple_assign_rhs1(oldstmt);
93860 + rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
93861 + }
93862 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
93863 + rhs2 = gimple_assign_rhs2(oldstmt);
93864 + rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
93865 + }
93866 +
93867 + stmt = gimple_copy(oldstmt);
93868 + gimple_set_location(stmt, gimple_location(oldstmt));
93869 +
93870 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
93871 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
93872 +
93873 + if (is_bool(lhs))
93874 + new_var = SSA_NAME_VAR(lhs);
93875 + else
93876 + new_var = create_new_var(signed_size_overflow_type);
93877 + new_var = make_ssa_name(new_var, stmt);
93878 + gimple_set_lhs(stmt, new_var);
93879 +
93880 + if (rhs1 != NULL_TREE) {
93881 + if (!gimple_assign_cast_p(oldstmt))
93882 + rhs1 = cast_a_tree(signed_size_overflow_type, rhs1);
93883 + gimple_assign_set_rhs1(stmt, rhs1);
93884 + }
93885 +
93886 + if (rhs2 != NULL_TREE)
93887 + gimple_assign_set_rhs2(stmt, rhs2);
93888 +#if BUILDING_GCC_VERSION >= 4007
93889 + if (rhs3 != NULL_TREE)
93890 + gimple_assign_set_rhs3(stmt, rhs3);
93891 +#endif
93892 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
93893 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
93894 +
93895 + gsi = gsi_for_stmt(oldstmt);
93896 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
93897 + update_stmt(stmt);
93898 + pointer_set_insert(visited, oldstmt);
93899 + return gimple_get_lhs(stmt);
93900 +}
93901 +
93902 +static gimple overflow_create_phi_node(gimple oldstmt, tree var)
93903 +{
93904 + basic_block bb;
93905 + gimple phi;
93906 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
93907 +
93908 + bb = gsi_bb(gsi);
93909 +
93910 + phi = create_phi_node(var, bb);
93911 + gsi = gsi_last(phi_nodes(bb));
93912 + gsi_remove(&gsi, false);
93913 +
93914 + gsi = gsi_for_stmt(oldstmt);
93915 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
93916 + gimple_set_bb(phi, bb);
93917 + return phi;
93918 +}
93919 +
93920 +static tree signed_cast_constant(tree node)
93921 +{
93922 + gcc_assert(is_gimple_constant(node));
93923 +
93924 + return cast_a_tree(signed_size_overflow_type, node);
93925 +}
93926 +
93927 +static basic_block create_a_first_bb(void)
93928 +{
93929 + basic_block first_bb;
93930 +
93931 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
93932 + if (dom_info_available_p(CDI_DOMINATORS))
93933 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
93934 + return first_bb;
93935 +}
93936 +
93937 +static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
93938 +{
93939 + basic_block bb;
93940 + gimple newstmt, def_stmt;
93941 + gimple_stmt_iterator gsi;
93942 +
93943 + newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
93944 + if (TREE_CODE(arg) == SSA_NAME) {
93945 + def_stmt = get_def_stmt(arg);
93946 + if (gimple_code(def_stmt) != GIMPLE_NOP) {
93947 + gsi = gsi_for_stmt(def_stmt);
93948 + gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
93949 + return newstmt;
93950 + }
93951 + }
93952 +
93953 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
93954 + if (bb->index == 0)
93955 + bb = create_a_first_bb();
93956 + gsi = gsi_after_labels(bb);
93957 + gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
93958 + return newstmt;
93959 +}
93960 +
93961 +static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
93962 +{
93963 + gimple newstmt;
93964 + gimple_stmt_iterator gsi;
93965 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
93966 + gimple def_newstmt = get_def_stmt(new_rhs);
93967 +
93968 + gsi_insert = gsi_insert_after;
93969 + gsi = gsi_for_stmt(def_newstmt);
93970 +
93971 + switch (gimple_code(get_def_stmt(arg))) {
93972 + case GIMPLE_PHI:
93973 + newstmt = gimple_build_assign(new_var, new_rhs);
93974 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
93975 + gsi_insert = gsi_insert_before;
93976 + break;
93977 + case GIMPLE_ASM:
93978 + case GIMPLE_CALL:
93979 + newstmt = gimple_build_assign(new_var, new_rhs);
93980 + break;
93981 + case GIMPLE_ASSIGN:
93982 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
93983 + break;
93984 + default:
93985 + /* unknown gimple_code (handle_build_new_phi_arg) */
93986 + gcc_unreachable();
93987 + }
93988 +
93989 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
93990 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
93991 + update_stmt(newstmt);
93992 + return newstmt;
93993 +}
93994 +
93995 +static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
93996 +{
93997 + gimple newstmt;
93998 + tree new_rhs;
93999 +
94000 + new_rhs = expand(visited, potentionally_overflowed, arg);
94001 +
94002 + if (new_rhs == NULL_TREE)
94003 + return NULL_TREE;
94004 +
94005 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
94006 + return gimple_get_lhs(newstmt);
94007 +}
94008 +
94009 +static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
94010 +{
94011 + gimple phi;
94012 + tree new_var = create_new_var(signed_size_overflow_type);
94013 + unsigned int i, n = gimple_phi_num_args(oldstmt);
94014 +
94015 + pointer_set_insert(visited, oldstmt);
94016 + phi = overflow_create_phi_node(oldstmt, new_var);
94017 + for (i = 0; i < n; i++) {
94018 + tree arg, lhs;
94019 +
94020 + arg = gimple_phi_arg_def(oldstmt, i);
94021 + if (is_gimple_constant(arg))
94022 + arg = signed_cast_constant(arg);
94023 + lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
94024 + if (lhs == NULL_TREE)
94025 + lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
94026 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
94027 + }
94028 +
94029 + update_stmt(phi);
94030 + return gimple_phi_result(phi);
94031 +}
94032 +
94033 +static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94034 +{
94035 + gimple def_stmt = get_def_stmt(var);
94036 + tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
94037 +
94038 + *potentionally_overflowed = true;
94039 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
94040 + if (new_rhs1 == NULL_TREE) {
94041 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
94042 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94043 + else
94044 + return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
94045 + }
94046 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
94047 +}
94048 +
94049 +static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94050 +{
94051 + gimple def_stmt = get_def_stmt(var);
94052 + tree rhs1 = gimple_assign_rhs1(def_stmt);
94053 +
94054 + if (is_gimple_constant(rhs1))
94055 + return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE);
94056 +
94057 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
94058 + switch (TREE_CODE(rhs1)) {
94059 + case SSA_NAME:
94060 + return handle_unary_rhs(visited, potentionally_overflowed, var);
94061 +
94062 + case ARRAY_REF:
94063 + case BIT_FIELD_REF:
94064 + case ADDR_EXPR:
94065 + case COMPONENT_REF:
94066 + case INDIRECT_REF:
94067 +#if BUILDING_GCC_VERSION >= 4006
94068 + case MEM_REF:
94069 +#endif
94070 + case PARM_DECL:
94071 + case TARGET_MEM_REF:
94072 + case VAR_DECL:
94073 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94074 +
94075 + default:
94076 + debug_gimple_stmt(def_stmt);
94077 + debug_tree(rhs1);
94078 + gcc_unreachable();
94079 + }
94080 +}
94081 +
94082 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
94083 +{
94084 + gimple cond_stmt;
94085 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
94086 +
94087 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
94088 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
94089 + update_stmt(cond_stmt);
94090 +}
94091 +
94092 +static tree create_string_param(tree string)
94093 +{
94094 + tree i_type, a_type;
94095 + int length = TREE_STRING_LENGTH(string);
94096 +
94097 + gcc_assert(length > 0);
94098 +
94099 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
94100 + a_type = build_array_type(char_type_node, i_type);
94101 +
94102 + TREE_TYPE(string) = a_type;
94103 + TREE_CONSTANT(string) = 1;
94104 + TREE_READONLY(string) = 1;
94105 +
94106 + return build1(ADDR_EXPR, ptr_type_node, string);
94107 +}
94108 +
94109 +static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
94110 +{
94111 + gimple func_stmt, def_stmt;
94112 + tree current_func, loc_file, loc_line;
94113 + expanded_location xloc;
94114 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
94115 +
94116 + def_stmt = get_def_stmt(arg);
94117 + xloc = expand_location(gimple_location(def_stmt));
94118 +
94119 + if (!gimple_has_location(def_stmt)) {
94120 + xloc = expand_location(gimple_location(stmt));
94121 + if (!gimple_has_location(stmt))
94122 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
94123 + }
94124 +
94125 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
94126 +
94127 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
94128 + loc_file = create_string_param(loc_file);
94129 +
94130 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
94131 + current_func = create_string_param(current_func);
94132 +
94133 + // void report_size_overflow(const char *file, unsigned int line, const char *func)
94134 + func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
94135 +
94136 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
94137 +}
94138 +
94139 +static void __unused print_the_code_insertions(gimple stmt)
94140 +{
94141 + location_t loc = gimple_location(stmt);
94142 +
94143 + inform(loc, "Integer size_overflow check applied here.");
94144 +}
94145 +
94146 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
94147 +{
94148 + basic_block cond_bb, join_bb, bb_true;
94149 + edge e;
94150 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
94151 +
94152 + cond_bb = gimple_bb(stmt);
94153 + gsi_prev(&gsi);
94154 + if (gsi_end_p(gsi))
94155 + e = split_block_after_labels(cond_bb);
94156 + else
94157 + e = split_block(cond_bb, gsi_stmt(gsi));
94158 + cond_bb = e->src;
94159 + join_bb = e->dest;
94160 + e->flags = EDGE_FALSE_VALUE;
94161 + e->probability = REG_BR_PROB_BASE;
94162 +
94163 + bb_true = create_empty_bb(cond_bb);
94164 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
94165 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
94166 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
94167 +
94168 + if (dom_info_available_p(CDI_DOMINATORS)) {
94169 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
94170 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
94171 + }
94172 +
94173 + if (current_loops != NULL) {
94174 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
94175 + add_bb_to_loop(bb_true, cond_bb->loop_father);
94176 + }
94177 +
94178 + insert_cond(cond_bb, arg, cond_code, type_value);
94179 + insert_cond_result(bb_true, stmt, arg);
94180 +
94181 +// print_the_code_insertions(stmt);
94182 +}
94183 +
94184 +static tree get_type_for_check(tree rhs)
94185 +{
94186 + tree def_rhs;
94187 + gimple def_stmt = get_def_stmt(rhs);
94188 +
94189 + if (!gimple_assign_cast_p(def_stmt))
94190 + return TREE_TYPE(rhs);
94191 + def_rhs = gimple_assign_rhs1(def_stmt);
94192 + if (TREE_CODE(TREE_TYPE(def_rhs)) == INTEGER_TYPE)
94193 + return TREE_TYPE(def_rhs);
94194 + return TREE_TYPE(rhs);
94195 +}
94196 +
94197 +static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
94198 +{
94199 + gimple ucast_stmt;
94200 + gimple_stmt_iterator gsi;
94201 + location_t loc = gimple_location(stmt);
94202 +
94203 + ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
94204 + gsi = gsi_for_stmt(stmt);
94205 + gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
94206 + return ucast_stmt;
94207 +}
94208 +
94209 +static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
94210 +{
94211 + tree type_max, type_min, rhs_type;
94212 + gimple ucast_stmt;
94213 +
94214 + if (!*potentionally_overflowed)
94215 + return;
94216 +
94217 + rhs_type = get_type_for_check(rhs);
94218 +
94219 + if (TYPE_UNSIGNED(rhs_type)) {
94220 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
94221 + type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
94222 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
94223 + } else {
94224 + type_max = cast_a_tree(signed_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
94225 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
94226 +
94227 + type_min = cast_a_tree(signed_size_overflow_type, TYPE_MIN_VALUE(rhs_type));
94228 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
94229 + }
94230 +}
94231 +
94232 +static tree change_assign_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt, tree orig_rhs)
94233 +{
94234 + gimple assign;
94235 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
94236 + tree new_rhs, origtype = TREE_TYPE(orig_rhs);
94237 +
94238 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
94239 +
94240 + new_rhs = expand(visited, potentionally_overflowed, orig_rhs);
94241 + if (new_rhs == NULL_TREE)
94242 + return NULL_TREE;
94243 +
94244 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
94245 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
94246 + update_stmt(assign);
94247 + return gimple_get_lhs(assign);
94248 +}
94249 +
94250 +static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
94251 +{
94252 + tree new_rhs, cast_rhs;
94253 +
94254 + if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
94255 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
94256 +
94257 + new_rhs = change_assign_rhs(visited, potentionally_overflowed, def_stmt, rhs);
94258 + if (new_rhs != NULL_TREE) {
94259 + gimple_assign_set_rhs(def_stmt, new_rhs);
94260 + update_stmt(def_stmt);
94261 +
94262 + cast_rhs = gimple_assign_rhs1(get_def_stmt(new_rhs));
94263 +
94264 + check_size_overflow(def_stmt, cast_rhs, rhs, potentionally_overflowed);
94265 + }
94266 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94267 +}
94268 +
94269 +static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94270 +{
94271 + tree rhs1, rhs2;
94272 + gimple def_stmt = get_def_stmt(var);
94273 + tree new_rhs1 = NULL_TREE;
94274 + tree new_rhs2 = NULL_TREE;
94275 +
94276 + rhs1 = gimple_assign_rhs1(def_stmt);
94277 + rhs2 = gimple_assign_rhs2(def_stmt);
94278 +
94279 + /* no DImode/TImode division in the 32/64 bit kernel */
94280 + switch (gimple_assign_rhs_code(def_stmt)) {
94281 + case RDIV_EXPR:
94282 + case TRUNC_DIV_EXPR:
94283 + case CEIL_DIV_EXPR:
94284 + case FLOOR_DIV_EXPR:
94285 + case ROUND_DIV_EXPR:
94286 + case TRUNC_MOD_EXPR:
94287 + case CEIL_MOD_EXPR:
94288 + case FLOOR_MOD_EXPR:
94289 + case ROUND_MOD_EXPR:
94290 + case EXACT_DIV_EXPR:
94291 + case POINTER_PLUS_EXPR:
94292 + case BIT_AND_EXPR:
94293 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94294 + default:
94295 + break;
94296 + }
94297 +
94298 + *potentionally_overflowed = true;
94299 +
94300 + if (TREE_CODE(rhs1) == SSA_NAME)
94301 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
94302 + if (TREE_CODE(rhs2) == SSA_NAME)
94303 + new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
94304 +
94305 + if (is_gimple_constant(rhs2))
94306 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, signed_cast_constant(rhs2), &gimple_assign_set_rhs1);
94307 +
94308 + if (is_gimple_constant(rhs1))
94309 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, signed_cast_constant(rhs1), new_rhs2, &gimple_assign_set_rhs2);
94310 +
94311 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
94312 +}
94313 +
94314 +#if BUILDING_GCC_VERSION >= 4007
94315 +static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
94316 +{
94317 + if (is_gimple_constant(rhs))
94318 + return signed_cast_constant(rhs);
94319 + if (TREE_CODE(rhs) != SSA_NAME)
94320 + return NULL_TREE;
94321 + return expand(visited, potentionally_overflowed, rhs);
94322 +}
94323 +
94324 +static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94325 +{
94326 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
94327 + gimple def_stmt = get_def_stmt(var);
94328 +
94329 + *potentionally_overflowed = true;
94330 +
94331 + rhs1 = gimple_assign_rhs1(def_stmt);
94332 + rhs2 = gimple_assign_rhs2(def_stmt);
94333 + rhs3 = gimple_assign_rhs3(def_stmt);
94334 + new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
94335 + new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
94336 + new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
94337 +
94338 + if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
94339 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
94340 + error("handle_ternary_ops: unknown rhs");
94341 + gcc_unreachable();
94342 +}
94343 +#endif
94344 +
94345 +static void set_size_overflow_type(tree node)
94346 +{
94347 + switch (TYPE_MODE(TREE_TYPE(node))) {
94348 + case SImode:
94349 + signed_size_overflow_type = intDI_type_node;
94350 + unsigned_size_overflow_type = unsigned_intDI_type_node;
94351 + break;
94352 + case DImode:
94353 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
94354 + signed_size_overflow_type = intDI_type_node;
94355 + unsigned_size_overflow_type = unsigned_intDI_type_node;
94356 + } else {
94357 + signed_size_overflow_type = intTI_type_node;
94358 + unsigned_size_overflow_type = unsigned_intTI_type_node;
94359 + }
94360 + break;
94361 + default:
94362 + error("set_size_overflow_type: unsupported gcc configuration.");
94363 + gcc_unreachable();
94364 + }
94365 +}
94366 +
94367 +static tree expand_visited(gimple def_stmt)
94368 +{
94369 + gimple tmp;
94370 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
94371 +
94372 + gsi_next(&gsi);
94373 + tmp = gsi_stmt(gsi);
94374 + switch (gimple_code(tmp)) {
94375 + case GIMPLE_ASSIGN:
94376 + return gimple_get_lhs(tmp);
94377 + case GIMPLE_PHI:
94378 + return gimple_phi_result(tmp);
94379 + case GIMPLE_CALL:
94380 + return gimple_call_lhs(tmp);
94381 + default:
94382 + return NULL_TREE;
94383 + }
94384 +}
94385 +
94386 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94387 +{
94388 + gimple def_stmt;
94389 + enum tree_code code = TREE_CODE(TREE_TYPE(var));
94390 +
94391 + if (is_gimple_constant(var))
94392 + return NULL_TREE;
94393 +
94394 + if (TREE_CODE(var) == ADDR_EXPR)
94395 + return NULL_TREE;
94396 +
94397 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
94398 + if (code != INTEGER_TYPE)
94399 + return NULL_TREE;
94400 +
94401 + if (SSA_NAME_IS_DEFAULT_DEF(var)) {
94402 + check_missing_attribute(var);
94403 + return NULL_TREE;
94404 + }
94405 +
94406 + def_stmt = get_def_stmt(var);
94407 +
94408 + if (!def_stmt)
94409 + return NULL_TREE;
94410 +
94411 + if (pointer_set_contains(visited, def_stmt))
94412 + return expand_visited(def_stmt);
94413 +
94414 + switch (gimple_code(def_stmt)) {
94415 + case GIMPLE_NOP:
94416 + check_missing_attribute(var);
94417 + return NULL_TREE;
94418 + case GIMPLE_PHI:
94419 + return build_new_phi(visited, potentionally_overflowed, def_stmt);
94420 + case GIMPLE_CALL:
94421 + case GIMPLE_ASM:
94422 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94423 + case GIMPLE_ASSIGN:
94424 + switch (gimple_num_ops(def_stmt)) {
94425 + case 2:
94426 + return handle_unary_ops(visited, potentionally_overflowed, var);
94427 + case 3:
94428 + return handle_binary_ops(visited, potentionally_overflowed, var);
94429 +#if BUILDING_GCC_VERSION >= 4007
94430 + case 4:
94431 + return handle_ternary_ops(visited, potentionally_overflowed, var);
94432 +#endif
94433 + }
94434 + default:
94435 + debug_gimple_stmt(def_stmt);
94436 + error("expand: unknown gimple code");
94437 + gcc_unreachable();
94438 + }
94439 +}
94440 +
94441 +static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
94442 +{
94443 + gimple assign;
94444 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
94445 + tree origtype = TREE_TYPE(origarg);
94446 +
94447 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
94448 +
94449 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
94450 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
94451 + update_stmt(assign);
94452 +
94453 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
94454 + update_stmt(stmt);
94455 +}
94456 +
94457 +static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
94458 +{
94459 + const char *origid;
94460 + tree arg, origarg;
94461 +
94462 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
94463 + gcc_assert(gimple_call_num_args(stmt) > argnum);
94464 + return gimple_call_arg(stmt, argnum);
94465 + }
94466 +
94467 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
94468 + while (origarg && argnum) {
94469 + argnum--;
94470 + origarg = TREE_CHAIN(origarg);
94471 + }
94472 +
94473 + gcc_assert(argnum == 0);
94474 +
94475 + gcc_assert(origarg != NULL_TREE);
94476 + origid = NAME(origarg);
94477 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
94478 + if (!strcmp(origid, NAME(arg)))
94479 + return arg;
94480 + }
94481 + return NULL_TREE;
94482 +}
94483 +
94484 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
94485 +{
94486 + struct pointer_set_t *visited;
94487 + tree arg, newarg, type_max;
94488 + gimple ucast_stmt;
94489 + bool potentionally_overflowed;
94490 +
94491 + arg = get_function_arg(argnum, stmt, fndecl);
94492 + if (arg == NULL_TREE)
94493 + return;
94494 +
94495 + if (is_gimple_constant(arg))
94496 + return;
94497 + if (TREE_CODE(arg) != SSA_NAME)
94498 + return;
94499 +
94500 + check_arg_type(arg);
94501 +
94502 + set_size_overflow_type(arg);
94503 +
94504 + visited = pointer_set_create();
94505 + potentionally_overflowed = false;
94506 + newarg = expand(visited, &potentionally_overflowed, arg);
94507 + pointer_set_destroy(visited);
94508 +
94509 + if (newarg == NULL_TREE || !potentionally_overflowed)
94510 + return;
94511 +
94512 + change_function_arg(stmt, arg, argnum, newarg);
94513 +
94514 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, newarg);
94515 +
94516 + type_max = build_int_cstu(unsigned_size_overflow_type, 0x7fffffff);
94517 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
94518 +}
94519 +
94520 +static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
94521 +{
94522 + tree p = TREE_VALUE(attr);
94523 + do {
94524 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
94525 + p = TREE_CHAIN(p);
94526 + } while (p);
94527 +}
94528 +
94529 +static void handle_function_by_hash(gimple stmt, tree fndecl)
94530 +{
94531 + tree orig_fndecl;
94532 + struct size_overflow_hash *hash;
94533 + const char *filename = DECL_SOURCE_FILE(fndecl);
94534 +
94535 + orig_fndecl = get_original_function_decl(fndecl);
94536 + hash = get_function_hash(orig_fndecl, filename);
94537 + if (!hash)
94538 + return;
94539 +
94540 +#define search_param(argnum) \
94541 + if (hash->param##argnum) \
94542 + handle_function_arg(stmt, fndecl, argnum - 1);
94543 +
94544 + search_param(1);
94545 + search_param(2);
94546 + search_param(3);
94547 + search_param(4);
94548 + search_param(5);
94549 + search_param(6);
94550 + search_param(7);
94551 + search_param(8);
94552 + search_param(9);
94553 +#undef search_param
94554 +}
94555 +
94556 +static unsigned int handle_function(void)
94557 +{
94558 + basic_block bb = ENTRY_BLOCK_PTR->next_bb;
94559 + int saved_last_basic_block = last_basic_block;
94560 +
94561 + do {
94562 + gimple_stmt_iterator gsi;
94563 + basic_block next = bb->next_bb;
94564 +
94565 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
94566 + tree fndecl, attr;
94567 + gimple stmt = gsi_stmt(gsi);
94568 +
94569 + if (!(is_gimple_call(stmt)))
94570 + continue;
94571 + fndecl = gimple_call_fndecl(stmt);
94572 + if (fndecl == NULL_TREE)
94573 + continue;
94574 + if (gimple_call_num_args(stmt) == 0)
94575 + continue;
94576 + attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
94577 + if (!attr || !TREE_VALUE(attr))
94578 + handle_function_by_hash(stmt, fndecl);
94579 + else
94580 + handle_function_by_attribute(stmt, attr, fndecl);
94581 + gsi = gsi_for_stmt(stmt);
94582 + }
94583 + bb = next;
94584 + } while (bb && bb->index <= saved_last_basic_block);
94585 + return 0;
94586 +}
94587 +
94588 +static struct gimple_opt_pass size_overflow_pass = {
94589 + .pass = {
94590 + .type = GIMPLE_PASS,
94591 + .name = "size_overflow",
94592 + .gate = NULL,
94593 + .execute = handle_function,
94594 + .sub = NULL,
94595 + .next = NULL,
94596 + .static_pass_number = 0,
94597 + .tv_id = TV_NONE,
94598 + .properties_required = PROP_cfg | PROP_referenced_vars,
94599 + .properties_provided = 0,
94600 + .properties_destroyed = 0,
94601 + .todo_flags_start = 0,
94602 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
94603 + }
94604 +};
94605 +
94606 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
94607 +{
94608 + tree fntype;
94609 +
94610 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
94611 +
94612 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
94613 + fntype = build_function_type_list(void_type_node,
94614 + const_char_ptr_type_node,
94615 + unsigned_type_node,
94616 + const_char_ptr_type_node,
94617 + NULL_TREE);
94618 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
94619 +
94620 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
94621 + TREE_PUBLIC(report_size_overflow_decl) = 1;
94622 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
94623 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
94624 +}
94625 +
94626 +extern struct gimple_opt_pass pass_dce;
94627 +
94628 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
94629 +{
94630 + int i;
94631 + const char * const plugin_name = plugin_info->base_name;
94632 + const int argc = plugin_info->argc;
94633 + const struct plugin_argument * const argv = plugin_info->argv;
94634 + bool enable = true;
94635 +
94636 + struct register_pass_info size_overflow_pass_info = {
94637 + .pass = &size_overflow_pass.pass,
94638 + .reference_pass_name = "ssa",
94639 + .ref_pass_instance_number = 1,
94640 + .pos_op = PASS_POS_INSERT_AFTER
94641 + };
94642 +
94643 + if (!plugin_default_version_check(version, &gcc_version)) {
94644 + error(G_("incompatible gcc/plugin versions"));
94645 + return 1;
94646 + }
94647 +
94648 + for (i = 0; i < argc; ++i) {
94649 + if (!strcmp(argv[i].key, "no-size-overflow")) {
94650 + enable = false;
94651 + continue;
94652 + } else if (!(strcmp(argv[i].key, "no-file-match"))) {
94653 + file_match = false;
94654 + continue;
94655 + }
94656 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
94657 + }
94658 +
94659 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
94660 + if (enable) {
94661 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
94662 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
94663 + }
94664 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
94665 +
94666 + return 0;
94667 +}
94668 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
94669 new file mode 100644
94670 index 0000000..b87ec9d
94671 --- /dev/null
94672 +++ b/tools/gcc/stackleak_plugin.c
94673 @@ -0,0 +1,313 @@
94674 +/*
94675 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
94676 + * Licensed under the GPL v2
94677 + *
94678 + * Note: the choice of the license means that the compilation process is
94679 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
94680 + * but for the kernel it doesn't matter since it doesn't link against
94681 + * any of the gcc libraries
94682 + *
94683 + * gcc plugin to help implement various PaX features
94684 + *
94685 + * - track lowest stack pointer
94686 + *
94687 + * TODO:
94688 + * - initialize all local variables
94689 + *
94690 + * BUGS:
94691 + * - none known
94692 + */
94693 +#include "gcc-plugin.h"
94694 +#include "config.h"
94695 +#include "system.h"
94696 +#include "coretypes.h"
94697 +#include "tree.h"
94698 +#include "tree-pass.h"
94699 +#include "flags.h"
94700 +#include "intl.h"
94701 +#include "toplev.h"
94702 +#include "plugin.h"
94703 +//#include "expr.h" where are you...
94704 +#include "diagnostic.h"
94705 +#include "plugin-version.h"
94706 +#include "tm.h"
94707 +#include "function.h"
94708 +#include "basic-block.h"
94709 +#include "gimple.h"
94710 +#include "rtl.h"
94711 +#include "emit-rtl.h"
94712 +
94713 +extern void print_gimple_stmt(FILE *, gimple, int, int);
94714 +
94715 +int plugin_is_GPL_compatible;
94716 +
94717 +static int track_frame_size = -1;
94718 +static const char track_function[] = "pax_track_stack";
94719 +static const char check_function[] = "pax_check_alloca";
94720 +static bool init_locals;
94721 +
94722 +static struct plugin_info stackleak_plugin_info = {
94723 + .version = "201203140940",
94724 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
94725 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
94726 +};
94727 +
94728 +static bool gate_stackleak_track_stack(void);
94729 +static unsigned int execute_stackleak_tree_instrument(void);
94730 +static unsigned int execute_stackleak_final(void);
94731 +
94732 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
94733 + .pass = {
94734 + .type = GIMPLE_PASS,
94735 + .name = "stackleak_tree_instrument",
94736 + .gate = gate_stackleak_track_stack,
94737 + .execute = execute_stackleak_tree_instrument,
94738 + .sub = NULL,
94739 + .next = NULL,
94740 + .static_pass_number = 0,
94741 + .tv_id = TV_NONE,
94742 + .properties_required = PROP_gimple_leh | PROP_cfg,
94743 + .properties_provided = 0,
94744 + .properties_destroyed = 0,
94745 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
94746 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
94747 + }
94748 +};
94749 +
94750 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
94751 + .pass = {
94752 + .type = RTL_PASS,
94753 + .name = "stackleak_final",
94754 + .gate = gate_stackleak_track_stack,
94755 + .execute = execute_stackleak_final,
94756 + .sub = NULL,
94757 + .next = NULL,
94758 + .static_pass_number = 0,
94759 + .tv_id = TV_NONE,
94760 + .properties_required = 0,
94761 + .properties_provided = 0,
94762 + .properties_destroyed = 0,
94763 + .todo_flags_start = 0,
94764 + .todo_flags_finish = TODO_dump_func
94765 + }
94766 +};
94767 +
94768 +static bool gate_stackleak_track_stack(void)
94769 +{
94770 + return track_frame_size >= 0;
94771 +}
94772 +
94773 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
94774 +{
94775 + gimple check_alloca;
94776 + tree fntype, fndecl, alloca_size;
94777 +
94778 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
94779 + fndecl = build_fn_decl(check_function, fntype);
94780 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
94781 +
94782 + // insert call to void pax_check_alloca(unsigned long size)
94783 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
94784 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
94785 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
94786 +}
94787 +
94788 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
94789 +{
94790 + gimple track_stack;
94791 + tree fntype, fndecl;
94792 +
94793 + fntype = build_function_type_list(void_type_node, NULL_TREE);
94794 + fndecl = build_fn_decl(track_function, fntype);
94795 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
94796 +
94797 + // insert call to void pax_track_stack(void)
94798 + track_stack = gimple_build_call(fndecl, 0);
94799 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
94800 +}
94801 +
94802 +#if BUILDING_GCC_VERSION == 4005
94803 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
94804 +{
94805 + tree fndecl;
94806 +
94807 + if (!is_gimple_call(stmt))
94808 + return false;
94809 + fndecl = gimple_call_fndecl(stmt);
94810 + if (!fndecl)
94811 + return false;
94812 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
94813 + return false;
94814 +// print_node(stderr, "pax", fndecl, 4);
94815 + return DECL_FUNCTION_CODE(fndecl) == code;
94816 +}
94817 +#endif
94818 +
94819 +static bool is_alloca(gimple stmt)
94820 +{
94821 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
94822 + return true;
94823 +
94824 +#if BUILDING_GCC_VERSION >= 4007
94825 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
94826 + return true;
94827 +#endif
94828 +
94829 + return false;
94830 +}
94831 +
94832 +static unsigned int execute_stackleak_tree_instrument(void)
94833 +{
94834 + basic_block bb, entry_bb;
94835 + bool prologue_instrumented = false, is_leaf = true;
94836 +
94837 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
94838 +
94839 + // 1. loop through BBs and GIMPLE statements
94840 + FOR_EACH_BB(bb) {
94841 + gimple_stmt_iterator gsi;
94842 +
94843 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
94844 + gimple stmt;
94845 +
94846 + stmt = gsi_stmt(gsi);
94847 +
94848 + if (is_gimple_call(stmt))
94849 + is_leaf = false;
94850 +
94851 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
94852 + if (!is_alloca(stmt))
94853 + continue;
94854 +
94855 + // 2. insert stack overflow check before each __builtin_alloca call
94856 + stackleak_check_alloca(&gsi);
94857 +
94858 + // 3. insert track call after each __builtin_alloca call
94859 + stackleak_add_instrumentation(&gsi);
94860 + if (bb == entry_bb)
94861 + prologue_instrumented = true;
94862 + }
94863 + }
94864 +
94865 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
94866 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
94867 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
94868 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
94869 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
94870 + return 0;
94871 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
94872 + return 0;
94873 +
94874 + // 4. insert track call at the beginning
94875 + if (!prologue_instrumented) {
94876 + gimple_stmt_iterator gsi;
94877 +
94878 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
94879 + if (dom_info_available_p(CDI_DOMINATORS))
94880 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
94881 + gsi = gsi_start_bb(bb);
94882 + stackleak_add_instrumentation(&gsi);
94883 + }
94884 +
94885 + return 0;
94886 +}
94887 +
94888 +static unsigned int execute_stackleak_final(void)
94889 +{
94890 + rtx insn;
94891 +
94892 + if (cfun->calls_alloca)
94893 + return 0;
94894 +
94895 + // keep calls only if function frame is big enough
94896 + if (get_frame_size() >= track_frame_size)
94897 + return 0;
94898 +
94899 + // 1. find pax_track_stack calls
94900 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
94901 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
94902 + rtx body;
94903 +
94904 + if (!CALL_P(insn))
94905 + continue;
94906 + body = PATTERN(insn);
94907 + if (GET_CODE(body) != CALL)
94908 + continue;
94909 + body = XEXP(body, 0);
94910 + if (GET_CODE(body) != MEM)
94911 + continue;
94912 + body = XEXP(body, 0);
94913 + if (GET_CODE(body) != SYMBOL_REF)
94914 + continue;
94915 + if (strcmp(XSTR(body, 0), track_function))
94916 + continue;
94917 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
94918 + // 2. delete call
94919 + insn = delete_insn_and_edges(insn);
94920 +#if BUILDING_GCC_VERSION >= 4007
94921 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
94922 + insn = delete_insn_and_edges(insn);
94923 +#endif
94924 + }
94925 +
94926 +// print_simple_rtl(stderr, get_insns());
94927 +// print_rtl(stderr, get_insns());
94928 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
94929 +
94930 + return 0;
94931 +}
94932 +
94933 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
94934 +{
94935 + const char * const plugin_name = plugin_info->base_name;
94936 + const int argc = plugin_info->argc;
94937 + const struct plugin_argument * const argv = plugin_info->argv;
94938 + int i;
94939 + struct register_pass_info stackleak_tree_instrument_pass_info = {
94940 + .pass = &stackleak_tree_instrument_pass.pass,
94941 +// .reference_pass_name = "tree_profile",
94942 + .reference_pass_name = "optimized",
94943 + .ref_pass_instance_number = 0,
94944 + .pos_op = PASS_POS_INSERT_BEFORE
94945 + };
94946 + struct register_pass_info stackleak_final_pass_info = {
94947 + .pass = &stackleak_final_rtl_opt_pass.pass,
94948 + .reference_pass_name = "final",
94949 + .ref_pass_instance_number = 0,
94950 + .pos_op = PASS_POS_INSERT_BEFORE
94951 + };
94952 +
94953 + if (!plugin_default_version_check(version, &gcc_version)) {
94954 + error(G_("incompatible gcc/plugin versions"));
94955 + return 1;
94956 + }
94957 +
94958 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
94959 +
94960 + for (i = 0; i < argc; ++i) {
94961 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
94962 + if (!argv[i].value) {
94963 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
94964 + continue;
94965 + }
94966 + track_frame_size = atoi(argv[i].value);
94967 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
94968 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
94969 + continue;
94970 + }
94971 + if (!strcmp(argv[i].key, "initialize-locals")) {
94972 + if (argv[i].value) {
94973 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
94974 + continue;
94975 + }
94976 + init_locals = true;
94977 + continue;
94978 + }
94979 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
94980 + }
94981 +
94982 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
94983 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
94984 +
94985 + return 0;
94986 +}
94987 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
94988 index 6789d78..4afd019 100644
94989 --- a/tools/perf/util/include/asm/alternative-asm.h
94990 +++ b/tools/perf/util/include/asm/alternative-asm.h
94991 @@ -5,4 +5,7 @@
94992
94993 #define altinstruction_entry #
94994
94995 + .macro pax_force_retaddr rip=0, reload=0
94996 + .endm
94997 +
94998 #endif
94999 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
95000 index af0f22f..9a7d479 100644
95001 --- a/usr/gen_init_cpio.c
95002 +++ b/usr/gen_init_cpio.c
95003 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
95004 int retval;
95005 int rc = -1;
95006 int namesize;
95007 - int i;
95008 + unsigned int i;
95009
95010 mode |= S_IFREG;
95011
95012 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
95013 *env_var = *expanded = '\0';
95014 strncat(env_var, start + 2, end - start - 2);
95015 strncat(expanded, new_location, start - new_location);
95016 - strncat(expanded, getenv(env_var), PATH_MAX);
95017 - strncat(expanded, end + 1, PATH_MAX);
95018 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
95019 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
95020 strncpy(new_location, expanded, PATH_MAX);
95021 + new_location[PATH_MAX] = 0;
95022 } else
95023 break;
95024 }
95025 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
95026 index 9739b53..6d457e3 100644
95027 --- a/virt/kvm/kvm_main.c
95028 +++ b/virt/kvm/kvm_main.c
95029 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
95030
95031 static cpumask_var_t cpus_hardware_enabled;
95032 static int kvm_usage_count = 0;
95033 -static atomic_t hardware_enable_failed;
95034 +static atomic_unchecked_t hardware_enable_failed;
95035
95036 struct kmem_cache *kvm_vcpu_cache;
95037 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
95038 @@ -2247,7 +2247,7 @@ static void hardware_enable_nolock(void *junk)
95039
95040 if (r) {
95041 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
95042 - atomic_inc(&hardware_enable_failed);
95043 + atomic_inc_unchecked(&hardware_enable_failed);
95044 printk(KERN_INFO "kvm: enabling virtualization on "
95045 "CPU%d failed\n", cpu);
95046 }
95047 @@ -2301,10 +2301,10 @@ static int hardware_enable_all(void)
95048
95049 kvm_usage_count++;
95050 if (kvm_usage_count == 1) {
95051 - atomic_set(&hardware_enable_failed, 0);
95052 + atomic_set_unchecked(&hardware_enable_failed, 0);
95053 on_each_cpu(hardware_enable_nolock, NULL, 1);
95054
95055 - if (atomic_read(&hardware_enable_failed)) {
95056 + if (atomic_read_unchecked(&hardware_enable_failed)) {
95057 hardware_disable_all_nolock();
95058 r = -EBUSY;
95059 }
95060 @@ -2667,7 +2667,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
95061 kvm_arch_vcpu_put(vcpu);
95062 }
95063
95064 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
95065 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
95066 struct module *module)
95067 {
95068 int r;
95069 @@ -2730,7 +2730,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
95070 if (!vcpu_align)
95071 vcpu_align = __alignof__(struct kvm_vcpu);
95072 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
95073 - 0, NULL);
95074 + SLAB_USERCOPY, NULL);
95075 if (!kvm_vcpu_cache) {
95076 r = -ENOMEM;
95077 goto out_free_3;
95078 @@ -2740,9 +2740,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
95079 if (r)
95080 goto out_free;
95081
95082 - kvm_chardev_ops.owner = module;
95083 - kvm_vm_fops.owner = module;
95084 - kvm_vcpu_fops.owner = module;
95085 + pax_open_kernel();
95086 + *(void **)&kvm_chardev_ops.owner = module;
95087 + *(void **)&kvm_vm_fops.owner = module;
95088 + *(void **)&kvm_vcpu_fops.owner = module;
95089 + pax_close_kernel();
95090
95091 r = misc_register(&kvm_dev);
95092 if (r) {